1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_dump.h"
58 #define DRV_MODULE_VERSION "1.48.105"
59 #define DRV_MODULE_RELDATE "2009/03/02"
60 #define BNX2X_BC_VER 0x040200
62 /* Time in jiffies before concluding the transmitter is hung */
63 #define TX_TIMEOUT (5*HZ)
65 static char version[] __devinitdata =
66 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
67 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69 MODULE_AUTHOR("Eliezer Tamir");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
74 static int multi_mode = 1;
75 module_param(multi_mode, int, 0);
76 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
78 static int disable_tpa;
79 module_param(disable_tpa, int, 0);
80 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
83 module_param(int_mode, int, 0);
84 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
87 module_param(poll, int, 0);
88 MODULE_PARM_DESC(poll, " Use polling (for debug)");
91 module_param(mrrs, int, 0);
92 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95 module_param(debug, int, 0);
96 MODULE_PARM_DESC(debug, " Default debug msglevel");
98 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
100 static struct workqueue_struct *bnx2x_wq;
102 enum bnx2x_board_type {
108 /* indexed by board_type, above */
111 } board_info[] __devinitdata = {
112 { "Broadcom NetXtreme II BCM57710 XGb" },
113 { "Broadcom NetXtreme II BCM57711 XGb" },
114 { "Broadcom NetXtreme II BCM57711E XGb" }
118 static const struct pci_device_id bnx2x_pci_tbl[] = {
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
128 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130 /****************************************************************************
131 * General service functions
132 ****************************************************************************/
135 * locking is done by mcp
137 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
142 PCICFG_VENDOR_ID_OFFSET);
145 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
150 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
151 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
152 PCICFG_VENDOR_ID_OFFSET);
157 static const u32 dmae_reg_go_c[] = {
158 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
159 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
160 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
161 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
164 /* copy command into DMAE command memory and set DMAE command go */
165 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
171 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
172 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
173 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
176 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
178 REG_WR(bp, dmae_reg_go_c[idx], 1);
181 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
184 struct dmae_command *dmae = &bp->init_dmae;
185 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
188 if (!bp->dmae_ready) {
189 u32 *data = bnx2x_sp(bp, wb_data[0]);
191 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
192 " using indirect\n", dst_addr, len32);
193 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
197 mutex_lock(&bp->dmae_mutex);
199 memset(dmae, 0, sizeof(struct dmae_command));
201 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
207 DMAE_CMD_ENDIANITY_DW_SWAP |
209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
211 dmae->src_addr_lo = U64_LO(dma_addr);
212 dmae->src_addr_hi = U64_HI(dma_addr);
213 dmae->dst_addr_lo = dst_addr >> 2;
214 dmae->dst_addr_hi = 0;
216 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_val = DMAE_COMP_VAL;
220 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
221 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
222 "dst_addr [%x:%08x (%08x)]\n"
223 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
224 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
225 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
226 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
227 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
228 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
237 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
241 BNX2X_ERR("DMAE timeout!\n");
245 /* adjust delay for emulation/FPGA */
246 if (CHIP_REV_IS_SLOW(bp))
252 mutex_unlock(&bp->dmae_mutex);
255 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
257 struct dmae_command *dmae = &bp->init_dmae;
258 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
261 if (!bp->dmae_ready) {
262 u32 *data = bnx2x_sp(bp, wb_data[0]);
265 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
266 " using indirect\n", src_addr, len32);
267 for (i = 0; i < len32; i++)
268 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
272 mutex_lock(&bp->dmae_mutex);
274 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
275 memset(dmae, 0, sizeof(struct dmae_command));
277 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
278 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
279 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283 DMAE_CMD_ENDIANITY_DW_SWAP |
285 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
286 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
287 dmae->src_addr_lo = src_addr >> 2;
288 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_val = DMAE_COMP_VAL;
296 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
297 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
298 "dst_addr [%x:%08x (%08x)]\n"
299 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
300 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
301 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
302 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
306 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
310 while (*wb_comp != DMAE_COMP_VAL) {
313 BNX2X_ERR("DMAE timeout!\n");
317 /* adjust delay for emulation/FPGA */
318 if (CHIP_REV_IS_SLOW(bp))
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
511 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
512 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
513 " spq_prod_idx(%u)\n",
514 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
515 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
518 for_each_rx_queue(bp, i) {
519 struct bnx2x_fastpath *fp = &bp->fp[i];
521 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
522 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
523 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
524 i, fp->rx_bd_prod, fp->rx_bd_cons,
525 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
526 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
527 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
528 " fp_u_idx(%x) *sb_u_idx(%x)\n",
529 fp->rx_sge_prod, fp->last_max_sge,
530 le16_to_cpu(fp->fp_u_idx),
531 fp->status_blk->u_status_block.status_block_index);
535 for_each_tx_queue(bp, i) {
536 struct bnx2x_fastpath *fp = &bp->fp[i];
537 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
539 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
540 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
541 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
542 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
543 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
544 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
545 fp->status_blk->c_status_block.status_block_index,
546 hw_prods->packets_prod, hw_prods->bds_prod);
551 for_each_rx_queue(bp, i) {
552 struct bnx2x_fastpath *fp = &bp->fp[i];
554 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
555 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
556 for (j = start; j != end; j = RX_BD(j + 1)) {
557 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
558 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
560 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
561 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
564 start = RX_SGE(fp->rx_sge_prod);
565 end = RX_SGE(fp->last_max_sge);
566 for (j = start; j != end; j = RX_SGE(j + 1)) {
567 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
568 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
570 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
571 i, j, rx_sge[1], rx_sge[0], sw_page->page);
574 start = RCQ_BD(fp->rx_comp_cons - 10);
575 end = RCQ_BD(fp->rx_comp_cons + 503);
576 for (j = start; j != end; j = RCQ_BD(j + 1)) {
577 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
579 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
580 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
585 for_each_tx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
589 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
590 for (j = start; j != end; j = TX_BD(j + 1)) {
591 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
593 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
594 i, j, sw_bd->skb, sw_bd->first_bd);
597 start = TX_BD(fp->tx_bd_cons - 10);
598 end = TX_BD(fp->tx_bd_cons + 254);
599 for (j = start; j != end; j = TX_BD(j + 1)) {
600 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
602 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
603 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
609 BNX2X_ERR("end crash dump -----------------\n");
612 static void bnx2x_int_enable(struct bnx2x *bp)
614 int port = BP_PORT(bp);
615 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
616 u32 val = REG_RD(bp, addr);
617 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
618 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
621 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
622 HC_CONFIG_0_REG_INT_LINE_EN_0);
623 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
624 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
626 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
627 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
628 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
629 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
631 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
632 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
633 HC_CONFIG_0_REG_INT_LINE_EN_0 |
634 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
636 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
639 REG_WR(bp, addr, val);
641 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
645 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
647 REG_WR(bp, addr, val);
649 if (CHIP_IS_E1H(bp)) {
650 /* init leading/trailing edge */
652 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
654 /* enable nig and gpio3 attention */
659 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
660 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
664 static void bnx2x_int_disable(struct bnx2x *bp)
666 int port = BP_PORT(bp);
667 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
668 u32 val = REG_RD(bp, addr);
670 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
678 /* flush all outstanding writes */
681 REG_WR(bp, addr, val);
682 if (REG_RD(bp, addr) != val)
683 BNX2X_ERR("BUG! proper val not read from IGU!\n");
687 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
692 /* disable interrupt handling */
693 atomic_inc(&bp->intr_sem);
695 /* prevent the HW from sending interrupts */
696 bnx2x_int_disable(bp);
698 /* make sure all ISRs are done */
700 synchronize_irq(bp->msix_table[0].vector);
702 for_each_queue(bp, i)
703 synchronize_irq(bp->msix_table[i + offset].vector);
705 synchronize_irq(bp->pdev->irq);
707 /* make sure sp_task is not running */
708 cancel_delayed_work(&bp->sp_task);
709 flush_workqueue(bnx2x_wq);
715 * General service functions
718 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
719 u8 storm, u16 index, u8 op, u8 update)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_INT_ACK);
723 struct igu_ack_register igu_ack;
725 igu_ack.status_block_index = index;
726 igu_ack.sb_id_and_flags =
727 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
728 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
729 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
730 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
732 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
733 (*(u32 *)&igu_ack), hc_addr);
734 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
737 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
739 struct host_status_block *fpsb = fp->status_blk;
742 barrier(); /* status block is written to by the chip */
743 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
744 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
747 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
748 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
754 static u16 bnx2x_ack_int(struct bnx2x *bp)
756 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
757 COMMAND_REG_SIMD_MASK);
758 u32 result = REG_RD(bp, hc_addr);
760 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768 * fast path service functions
771 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
775 /* Tell compiler that status block fields can change */
777 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
778 return (fp->tx_pkt_cons != tx_cons_sb);
781 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
783 /* Tell compiler that consumer and producer can change */
785 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
788 /* free skb in the packet ring at pos idx
789 * return idx of last bd freed
791 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
794 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
795 struct eth_tx_bd *tx_bd;
796 struct sk_buff *skb = tx_buf->skb;
797 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
800 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
804 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
805 tx_bd = &fp->tx_desc_ring[bd_idx];
806 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
807 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809 nbd = le16_to_cpu(tx_bd->nbd) - 1;
810 new_cons = nbd + tx_buf->first_bd;
811 #ifdef BNX2X_STOP_ON_ERROR
812 if (nbd > (MAX_SKB_FRAGS + 2)) {
813 BNX2X_ERR("BAD nbd!\n");
818 /* Skip a parse bd and the TSO split header bd
819 since they have no mapping */
821 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
823 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
824 ETH_TX_BD_FLAGS_TCP_CSUM |
825 ETH_TX_BD_FLAGS_SW_LSO)) {
827 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
828 tx_bd = &fp->tx_desc_ring[bd_idx];
829 /* is this a TSO split header bd? */
830 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
832 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
839 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
840 tx_bd = &fp->tx_desc_ring[bd_idx];
841 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
842 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
844 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
850 tx_buf->first_bd = 0;
856 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
862 barrier(); /* Tell compiler that prod and cons can change */
863 prod = fp->tx_bd_prod;
864 cons = fp->tx_bd_cons;
866 /* NUM_TX_RINGS = number of "next-page" entries
867 It will be used as a threshold */
868 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
870 #ifdef BNX2X_STOP_ON_ERROR
872 WARN_ON(used > fp->bp->tx_ring_size);
873 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
876 return (s16)(fp->bp->tx_ring_size) - used;
879 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
881 struct bnx2x *bp = fp->bp;
882 struct netdev_queue *txq;
883 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
886 #ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
891 txq = netdev_get_tx_queue(bp->dev, fp->index);
892 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
893 sw_cons = fp->tx_pkt_cons;
895 while (sw_cons != hw_cons) {
898 pkt_cons = TX_BD(sw_cons);
900 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
902 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
903 hw_cons, sw_cons, pkt_cons);
905 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
907 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
910 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
915 fp->tx_pkt_cons = sw_cons;
916 fp->tx_bd_cons = bd_cons;
918 /* TBD need a thresh? */
919 if (unlikely(netif_tx_queue_stopped(txq))) {
921 __netif_tx_lock(txq, smp_processor_id());
923 /* Need to make the tx_bd_cons update visible to start_xmit()
924 * before checking for netif_tx_queue_stopped(). Without the
925 * memory barrier, there is a small possibility that
926 * start_xmit() will miss it and cause the queue to be stopped
931 if ((netif_tx_queue_stopped(txq)) &&
932 (bp->state == BNX2X_STATE_OPEN) &&
933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934 netif_tx_wake_queue(txq);
936 __netif_tx_unlock(txq);
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
950 fp->index, cid, command, bp->state,
951 rr_cqe->ramrod_cqe.ramrod_type);
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 fp->state = BNX2X_FP_STATE_OPEN;
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 fp->state = BNX2X_FP_STATE_HALTED;
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
975 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000 bp->set_mac_pending = 0;
1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1009 command, bp->state);
1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022 /* Skip "next page" elements */
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030 sw_buf->page = NULL;
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052 if (unlikely(page == NULL))
1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056 PCI_DMA_FROMDEVICE);
1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1099 /* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1126 u16 last_max = fp->last_max_sge;
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1149 struct bnx2x *bp = fp->bp;
1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
1153 u16 last_max, last_elem, first_elem;
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240 #ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247 fp->tpa_queue_used);
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1256 struct sw_rx_page *rx_pg, old_rx_pg;
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1265 /* This is needed in order to enable forwarding support */
1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268 max(frag_size, (u32)len_on_bd));
1270 #ifdef BNX2X_STOP_ON_ERROR
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289 rx_pg = &fp->rx_page_ring[sge_idx];
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
1296 fp->eth_q_stats.rx_skb_alloc_failed++;
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1311 frag_size -= frag_len;
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1332 if (likely(new_skb)) {
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1344 prefetch(((char *)(skb)) + 128);
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1356 skb_reserve(skb, pad);
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365 iph = (struct iphdr *)skb->data;
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1386 netif_receive_skb(skb);
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1398 /* else drop the packet and keep the buffer in the bin */
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1412 struct ustorm_eth_rx_producers rx_prods = {0};
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433 ((u32 *)&rx_prods)[i]);
1435 mmiowb(); /* keep prod updates ordered */
1437 DP(NETIF_MSG_RX_STATUS,
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444 struct bnx2x *bp = fp->bp;
1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1449 #ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
1462 bd_prod_fw = bd_prod;
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1473 fp->index, hw_comp_cons, sw_comp_cons);
1475 while (sw_comp_cons != hw_comp_cons) {
1476 struct sw_rx_bd *rx_buf = NULL;
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1496 /* is this a slowpath msg? */
1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498 bnx2x_sp_event(fp, cqe);
1501 /* this is an rx packet */
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
1513 u16 queue = cqe->fast_path_cqe.queue_index;
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1520 bnx2x_tpa_start(fp, queue, skb,
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1534 /* This is a size of the linear data
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1556 prefetch(((char *)(skb)) + 128);
1558 /* is this an error packet? */
1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560 DP(NETIF_MSG_RX_ERR,
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
1563 fp->eth_q_stats.rx_err_discard_pkt++;
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1574 new_skb = netdev_alloc_skb(bp->dev,
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
1578 "ERROR packet dropped "
1579 "because of alloc failure\n");
1580 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped because "
1605 "of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1614 skb->ip_summed = CHECKSUM_NONE;
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619 fp->eth_q_stats.hw_csum_err++;
1623 skb_record_rx_queue(skb, fp->index);
1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1632 netif_receive_skb(skb);
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1646 if (rx_pkt == budget)
1650 fp->rx_bd_cons = bd_cons;
1651 fp->rx_bd_prod = bd_prod_fw;
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1659 fp->rx_pkt += rx_pkt;
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
1669 int index = fp->index;
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1681 #ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691 napi_schedule(&bnx2x_fp(bp, index, napi));
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698 struct bnx2x *bp = netdev_priv(dev_instance);
1699 u16 status = bnx2x_ack_int(bp);
1702 /* Return here if interrupt is shared and it's not for us */
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1707 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 #ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
1735 if (unlikely(status & 0x1)) {
1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1750 /* end of fast path */
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1757 * General service functions
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1763 u32 resource_bit = (1 << resource);
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1783 /* Validating that the resource is not already taken */
1784 lock_status = REG_RD(bp, hw_lock_control_reg);
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
1793 /* Try to acquire the lock */
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
1796 if (lock_status & resource_bit)
1801 DP(NETIF_MSG_HW, "Timeout\n");
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1808 u32 resource_bit = (1 << resource);
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1827 /* Validating that the resource is currently taken */
1828 lock_status = REG_RD(bp, hw_lock_control_reg);
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1842 mutex_lock(&bp->port.phy_mutex);
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 mutex_unlock(&bp->port.phy_mutex);
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1987 u32 spio_mask = (1 << spio_num);
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2040 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2045 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046 bp->port.advertising |= ADVERTISED_Asym_Pause;
2050 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2056 static void bnx2x_link_report(struct bnx2x *bp)
2058 if (bp->link_vars.link_up) {
2059 if (bp->state == BNX2X_STATE_OPEN)
2060 netif_carrier_on(bp->dev);
2061 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2063 printk("%d Mbps ", bp->link_vars.line_speed);
2065 if (bp->link_vars.duplex == DUPLEX_FULL)
2066 printk("full duplex");
2068 printk("half duplex");
2070 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072 printk(", receive ");
2073 if (bp->link_vars.flow_ctrl &
2075 printk("& transmit ");
2077 printk(", transmit ");
2079 printk("flow control ON");
2083 } else { /* link_down */
2084 netif_carrier_off(bp->dev);
2085 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2091 if (!BP_NOMCP(bp)) {
2094 /* Initialize link parameters structure variables */
2095 /* It is recommended to turn off RX FC for jumbo frames
2096 for better performance */
2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099 else if (bp->dev->mtu > 5000)
2100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2102 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2104 bnx2x_acquire_phy_lock(bp);
2106 if (load_mode == LOAD_DIAG)
2107 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2109 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2111 bnx2x_release_phy_lock(bp);
2113 bnx2x_calc_fc_adv(bp);
2115 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117 bnx2x_link_report(bp);
2122 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2126 static void bnx2x_link_set(struct bnx2x *bp)
2128 if (!BP_NOMCP(bp)) {
2129 bnx2x_acquire_phy_lock(bp);
2130 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131 bnx2x_release_phy_lock(bp);
2133 bnx2x_calc_fc_adv(bp);
2135 BNX2X_ERR("Bootcode is missing - can not set link\n");
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2140 if (!BP_NOMCP(bp)) {
2141 bnx2x_acquire_phy_lock(bp);
2142 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143 bnx2x_release_phy_lock(bp);
2145 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2152 bnx2x_acquire_phy_lock(bp);
2153 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154 bnx2x_release_phy_lock(bp);
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2161 u32 r_param = bp->link_vars.line_speed / 8;
2162 u32 fair_periodic_timeout_usec;
2165 memset(&(bp->cmng.rs_vars), 0,
2166 sizeof(struct rate_shaping_vars_per_port));
2167 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2169 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2172 /* this is the threshold below which no timer arming will occur
2173 1.25 coefficient is for the threshold to be a little bigger
2174 than the real time, to compensate for timer in-accuracy */
2175 bp->cmng.rs_vars.rs_threshold =
2176 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2178 /* resolution of fairness timer */
2179 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2183 /* this is the threshold below which we won't arm the timer anymore */
2184 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2186 /* we multiply by 1e3/8 to get bytes/msec.
2187 We don't want the credits to pass a credit
2188 of the t_fair*FAIR_MEM (algorithm resolution) */
2189 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190 /* since each tick is 4 usec */
2191 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2196 struct rate_shaping_vars_per_vn m_rs_vn;
2197 struct fairness_vars_per_vn m_fair_vn;
2198 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199 u16 vn_min_rate, vn_max_rate;
2202 /* If function is hidden - set min and max to zeroes */
2203 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2208 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210 /* If fairness is enabled (not all min rates are zeroes) and
2211 if current min rate is zero - set it to 1.
2212 This is a requirement of the algorithm. */
2213 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214 vn_min_rate = DEF_MIN_RATE;
2215 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2220 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2221 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2223 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2226 /* global vn counter - maximal Mbps for this vn */
2227 m_rs_vn.vn_counter.rate = vn_max_rate;
2229 /* quota - number of bytes transmitted in this period */
2230 m_rs_vn.vn_counter.quota =
2231 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2233 if (bp->vn_weight_sum) {
2234 /* credit for each period of the fairness algorithm:
2235 number of bytes in T_FAIR (the vn share the port rate).
2236 vn_weight_sum should not be larger than 10000, thus
2237 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2239 m_fair_vn.vn_credit_delta =
2240 max((u32)(vn_min_rate * (T_FAIR_COEF /
2241 (8 * bp->vn_weight_sum))),
2242 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244 m_fair_vn.vn_credit_delta);
2247 /* Store it to internal memory */
2248 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251 ((u32 *)(&m_rs_vn))[i]);
2253 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256 ((u32 *)(&m_fair_vn))[i]);
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2268 if (bp->link_vars.link_up) {
2270 /* dropless flow control */
2271 if (CHIP_IS_E1H(bp)) {
2272 int port = BP_PORT(bp);
2273 u32 pause_enabled = 0;
2275 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2278 REG_WR(bp, BAR_USTRORM_INTMEM +
2279 USTORM_PAUSE_ENABLED_OFFSET(port),
2283 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284 struct host_port_stats *pstats;
2286 pstats = bnx2x_sp(bp, port_stats);
2287 /* reset old bmac stats */
2288 memset(&(pstats->mac_stx[0]), 0,
2289 sizeof(struct mac_stx));
2291 if ((bp->state == BNX2X_STATE_OPEN) ||
2292 (bp->state == BNX2X_STATE_DISABLED))
2293 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2296 /* indicate link status */
2297 bnx2x_link_report(bp);
2300 int port = BP_PORT(bp);
2304 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305 if (vn == BP_E1HVN(bp))
2308 func = ((vn << 1) | port);
2310 /* Set the attention towards other drivers
2312 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2316 if (bp->link_vars.link_up) {
2319 /* Init rate shaping and fairness contexts */
2320 bnx2x_init_port_minmax(bp);
2322 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323 bnx2x_init_vn_minmax(bp, 2*vn + port);
2325 /* Store it to internal memory */
2327 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330 ((u32 *)(&bp->cmng))[i]);
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2337 if (bp->state != BNX2X_STATE_OPEN)
2340 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2342 if (bp->link_vars.link_up)
2343 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2347 /* indicate link status */
2348 bnx2x_link_report(bp);
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2353 int port = BP_PORT(bp);
2357 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2359 /* enable nig attention */
2360 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2364 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 * General service functions
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377 u32 data_hi, u32 data_lo, int common)
2379 int func = BP_FUNC(bp);
2381 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2383 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2387 #ifdef BNX2X_STOP_ON_ERROR
2388 if (unlikely(bp->panic))
2392 spin_lock_bh(&bp->spq_lock);
2394 if (!bp->spq_left) {
2395 BNX2X_ERR("BUG! SPQ ring full!\n");
2396 spin_unlock_bh(&bp->spq_lock);
2401 /* CID needs port number to be encoded int it */
2402 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2405 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2407 bp->spq_prod_bd->hdr.type |=
2408 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2410 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2415 if (bp->spq_prod_bd == bp->spq_last_bd) {
2416 bp->spq_prod_bd = bp->spq;
2417 bp->spq_prod_idx = 0;
2418 DP(NETIF_MSG_TIMER, "end of spq\n");
2425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2428 spin_unlock_bh(&bp->spq_lock);
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2440 for (j = 0; j < i*10; j++) {
2442 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444 if (val & (1L << 31))
2449 if (!(val & (1L << 31))) {
2450 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2462 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2467 struct host_def_status_block *def_sb = bp->def_status_blk;
2470 barrier(); /* status block is written to by the chip */
2471 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2475 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2479 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2483 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2487 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2495 * slow path service functions
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 int port = BP_PORT(bp);
2501 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502 COMMAND_REG_ATTN_BITS_SET);
2503 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506 NIG_REG_MASK_INTERRUPT_PORT0;
2510 if (bp->attn_state & asserted)
2511 BNX2X_ERR("IGU ERROR\n");
2513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514 aeu_mask = REG_RD(bp, aeu_addr);
2516 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2517 aeu_mask, asserted);
2518 aeu_mask &= ~(asserted & 0xff);
2519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2521 REG_WR(bp, aeu_addr, aeu_mask);
2522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525 bp->attn_state |= asserted;
2526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2528 if (asserted & ATTN_HARD_WIRED_MASK) {
2529 if (asserted & ATTN_NIG_FOR_FUNC) {
2531 bnx2x_acquire_phy_lock(bp);
2533 /* save nig interrupt mask */
2534 nig_mask = REG_RD(bp, nig_int_mask_addr);
2535 REG_WR(bp, nig_int_mask_addr, 0);
2537 bnx2x_link_attn(bp);
2539 /* handle unicore attn? */
2541 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2544 if (asserted & GPIO_2_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2547 if (asserted & GPIO_3_FUNC)
2548 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2550 if (asserted & GPIO_4_FUNC)
2551 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2554 if (asserted & ATTN_GENERAL_ATTN_1) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2558 if (asserted & ATTN_GENERAL_ATTN_2) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2562 if (asserted & ATTN_GENERAL_ATTN_3) {
2563 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2567 if (asserted & ATTN_GENERAL_ATTN_4) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2571 if (asserted & ATTN_GENERAL_ATTN_5) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2575 if (asserted & ATTN_GENERAL_ATTN_6) {
2576 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2581 } /* if hardwired */
2583 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2585 REG_WR(bp, hc_addr, asserted);
2587 /* now set back the mask */
2588 if (asserted & ATTN_NIG_FOR_FUNC) {
2589 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590 bnx2x_release_phy_lock(bp);
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2596 int port = BP_PORT(bp);
2600 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2603 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2605 val = REG_RD(bp, reg_offset);
2606 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607 REG_WR(bp, reg_offset, val);
2609 BNX2X_ERR("SPIO5 hw attention\n");
2611 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613 /* Fan failure attention */
2615 /* The PHY reset is controlled by GPIO 1 */
2616 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618 /* Low power mode is controlled by GPIO 2 */
2619 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621 /* mark the failure */
2622 bp->link_params.ext_phy_config &=
2623 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624 bp->link_params.ext_phy_config |=
2625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 dev_info.port_hw_config[port].
2628 external_phy_config,
2629 bp->link_params.ext_phy_config);
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network"
2632 " Controller %s has caused the driver to"
2633 " shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for"
2635 " assistance\n", bp->dev->name);
2643 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645 bnx2x_acquire_phy_lock(bp);
2646 bnx2x_handle_module_detect_int(&bp->link_params);
2647 bnx2x_release_phy_lock(bp);
2650 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654 REG_WR(bp, reg_offset, val);
2656 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_0));
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2666 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2668 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670 /* DORQ discard attention */
2672 BNX2X_ERR("FATAL error from DORQ\n");
2675 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2677 int port = BP_PORT(bp);
2680 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2683 val = REG_RD(bp, reg_offset);
2684 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685 REG_WR(bp, reg_offset, val);
2687 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688 (attn & HW_INTERRUT_ASSERT_SET_1));
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2697 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2699 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701 /* CFC error attention */
2703 BNX2X_ERR("FATAL error from CFC\n");
2706 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2708 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710 /* RQ_USDMDP_FIFO_OVERFLOW */
2712 BNX2X_ERR("FATAL error from PXP\n");
2715 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2717 int port = BP_PORT(bp);
2720 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2723 val = REG_RD(bp, reg_offset);
2724 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725 REG_WR(bp, reg_offset, val);
2727 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728 (attn & HW_INTERRUT_ASSERT_SET_2));
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2737 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2739 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740 int func = BP_FUNC(bp);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743 bnx2x__link_status_update(bp);
2744 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2746 bnx2x_pmf_update(bp);
2748 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2750 BNX2X_ERR("MC assert!\n");
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2757 } else if (attn & BNX2X_MCP_ASSERT) {
2759 BNX2X_ERR("MCP assert!\n");
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2764 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2767 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769 if (attn & BNX2X_GRC_TIMEOUT) {
2770 val = CHIP_IS_E1H(bp) ?
2771 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2774 if (attn & BNX2X_GRC_RSV) {
2775 val = CHIP_IS_E1H(bp) ?
2776 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2779 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2785 struct attn_route attn;
2786 struct attn_route group_mask;
2787 int port = BP_PORT(bp);
2793 /* need to take HW lock because MCP or other port might also
2794 try to handle this event */
2795 bnx2x_acquire_alr(bp);
2797 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2804 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805 if (deasserted & (1 << index)) {
2806 group_mask = bp->attn_group[index];
2808 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809 index, group_mask.sig[0], group_mask.sig[1],
2810 group_mask.sig[2], group_mask.sig[3]);
2812 bnx2x_attn_int_deasserted3(bp,
2813 attn.sig[3] & group_mask.sig[3]);
2814 bnx2x_attn_int_deasserted1(bp,
2815 attn.sig[1] & group_mask.sig[1]);
2816 bnx2x_attn_int_deasserted2(bp,
2817 attn.sig[2] & group_mask.sig[2]);
2818 bnx2x_attn_int_deasserted0(bp,
2819 attn.sig[0] & group_mask.sig[0]);
2821 if ((attn.sig[0] & group_mask.sig[0] &
2822 HW_PRTY_ASSERT_SET_0) ||
2823 (attn.sig[1] & group_mask.sig[1] &
2824 HW_PRTY_ASSERT_SET_1) ||
2825 (attn.sig[2] & group_mask.sig[2] &
2826 HW_PRTY_ASSERT_SET_2))
2827 BNX2X_ERR("FATAL HW block parity attention\n");
2831 bnx2x_release_alr(bp);
2833 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2836 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2838 REG_WR(bp, reg_addr, val);
2840 if (~bp->attn_state & deasserted)
2841 BNX2X_ERR("IGU ERROR\n");
2843 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847 aeu_mask = REG_RD(bp, reg_addr);
2849 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2850 aeu_mask, deasserted);
2851 aeu_mask |= (deasserted & 0xff);
2852 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2854 REG_WR(bp, reg_addr, aeu_mask);
2855 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2857 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858 bp->attn_state &= ~deasserted;
2859 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2864 /* read local copy of bits */
2865 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2869 u32 attn_state = bp->attn_state;
2871 /* look for changed bits */
2872 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2873 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2876 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2877 attn_bits, attn_ack, asserted, deasserted);
2879 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880 BNX2X_ERR("BAD attention state\n");
2882 /* handle bits that were raised */
2884 bnx2x_attn_int_asserted(bp, asserted);
2887 bnx2x_attn_int_deasserted(bp, deasserted);
2890 static void bnx2x_sp_task(struct work_struct *work)
2892 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2902 status = bnx2x_update_dsb_idx(bp);
2903 /* if (status == 0) */
2904 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2906 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2914 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2916 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2920 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2927 struct net_device *dev = dev_instance;
2928 struct bnx2x *bp = netdev_priv(dev);
2930 /* Return here if interrupt is disabled */
2931 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2936 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2938 #ifdef BNX2X_STOP_ON_ERROR
2939 if (unlikely(bp->panic))
2943 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2948 /* end of slow path */
2952 /****************************************************************************
2954 ****************************************************************************/
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2960 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2966 if (m_lo < s_lo) { \
2968 d_hi = m_hi - s_hi; \
2970 /* we can 'loan' 1 */ \
2972 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2974 /* m_hi <= s_hi */ \
2979 /* m_lo >= s_lo */ \
2980 if (m_hi < s_hi) { \
2984 /* m_hi >= s_hi */ \
2985 d_hi = m_hi - s_hi; \
2986 d_lo = m_lo - s_lo; \
2991 #define UPDATE_STAT64(s, t) \
2993 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998 pstats->mac_stx[1].t##_lo, diff.lo); \
3001 #define UPDATE_STAT64_NIG(s, t) \
3003 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004 diff.lo, new->s##_lo, old->s##_lo); \
3005 ADD_64(estats->t##_hi, diff.hi, \
3006 estats->t##_lo, diff.lo); \
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3013 s_hi += (s_lo < a) ? 1 : 0; \
3016 #define UPDATE_EXTEND_STAT(s) \
3018 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019 pstats->mac_stx[1].s##_lo, \
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3025 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026 old_tclient->s = tclient->s; \
3027 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3032 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033 old_uclient->s = uclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3039 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040 old_xclient->s = xclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3047 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3053 SUB_64(m_hi, 0, m_lo, s); \
3056 #define SUB_EXTEND_USTAT(s, t) \
3058 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3063 * General service functions
3066 static inline long bnx2x_hilo(u32 *hiref)
3068 u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3072 return HILO_U64(hi, lo);
3079 * Init service functions
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3084 if (!bp->stats_pending) {
3085 struct eth_query_ramrod_data ramrod_data = {0};
3088 ramrod_data.drv_counter = bp->stats_counter++;
3089 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090 for_each_queue(bp, i)
3091 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3093 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094 ((u32 *)&ramrod_data)[1],
3095 ((u32 *)&ramrod_data)[0], 0);
3097 /* stats ramrod has it's own slot on the spq */
3099 bp->stats_pending = 1;
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3106 int port = BP_PORT(bp);
3109 bp->stats_pending = 0;
3110 bp->executer_idx = 0;
3111 bp->stats_counter = 0;
3115 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3117 bp->port.port_stx = 0;
3118 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3120 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121 bp->port.old_nig_stats.brb_discard =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123 bp->port.old_nig_stats.brb_truncate =
3124 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3130 /* function stats */
3131 for_each_queue(bp, i) {
3132 struct bnx2x_fastpath *fp = &bp->fp[i];
3134 memset(&fp->old_tclient, 0,
3135 sizeof(struct tstorm_per_client_stats));
3136 memset(&fp->old_uclient, 0,
3137 sizeof(struct ustorm_per_client_stats));
3138 memset(&fp->old_xclient, 0,
3139 sizeof(struct xstorm_per_client_stats));
3140 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3143 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3146 bp->stats_state = STATS_STATE_DISABLED;
3147 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3153 struct dmae_command *dmae = &bp->stats_dmae;
3154 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3156 *stats_comp = DMAE_COMP_VAL;
3157 if (CHIP_REV_IS_SLOW(bp))
3161 if (bp->executer_idx) {
3162 int loader_idx = PMF_DMAE_C(bp);
3164 memset(dmae, 0, sizeof(struct dmae_command));
3166 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168 DMAE_CMD_DST_RESET |
3170 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172 DMAE_CMD_ENDIANITY_DW_SWAP |
3174 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3176 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180 sizeof(struct dmae_command) *
3181 (loader_idx + 1)) >> 2;
3182 dmae->dst_addr_hi = 0;
3183 dmae->len = sizeof(struct dmae_command) >> 2;
3186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187 dmae->comp_addr_hi = 0;
3191 bnx2x_post_dmae(bp, dmae, loader_idx);
3193 } else if (bp->func_stx) {
3195 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3201 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3205 while (*stats_comp != DMAE_COMP_VAL) {
3207 BNX2X_ERR("timeout waiting for stats finished\n");
3217 * Statistics service functions
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3222 struct dmae_command *dmae;
3224 int loader_idx = PMF_DMAE_C(bp);
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3228 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229 BNX2X_ERR("BUG!\n");
3233 bp->executer_idx = 0;
3235 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 DMAE_CMD_ENDIANITY_DW_SWAP |
3243 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248 dmae->src_addr_lo = bp->port.port_stx >> 2;
3249 dmae->src_addr_hi = 0;
3250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->len = DMAE_LEN32_RD_MAX;
3253 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254 dmae->comp_addr_hi = 0;
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260 dmae->src_addr_hi = 0;
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264 DMAE_LEN32_RD_MAX * 4);
3265 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_val = DMAE_COMP_VAL;
3271 bnx2x_hw_stats_post(bp);
3272 bnx2x_stats_comp(bp);
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3277 struct dmae_command *dmae;
3278 int port = BP_PORT(bp);
3279 int vn = BP_E1HVN(bp);
3281 int loader_idx = PMF_DMAE_C(bp);
3283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3286 if (!bp->link_vars.link_up || !bp->port.pmf) {
3287 BNX2X_ERR("BUG!\n");
3291 bp->executer_idx = 0;
3294 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3300 DMAE_CMD_ENDIANITY_DW_SWAP |
3302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303 (vn << DMAE_CMD_E1HVN_SHIFT));
3305 if (bp->port.port_stx) {
3307 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308 dmae->opcode = opcode;
3309 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312 dmae->dst_addr_hi = 0;
3313 dmae->len = sizeof(struct host_port_stats) >> 2;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->dst_addr_lo = bp->func_stx >> 2;
3326 dmae->dst_addr_hi = 0;
3327 dmae->len = sizeof(struct host_func_stats) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3334 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3338 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3340 DMAE_CMD_ENDIANITY_DW_SWAP |
3342 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343 (vn << DMAE_CMD_E1HVN_SHIFT));
3345 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3347 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348 NIG_REG_INGRESS_BMAC0_MEM);
3350 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351 BIGMAC_REGISTER_TX_STAT_GTBYT */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3365 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379 dmae->comp_addr_hi = 0;
3382 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3384 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3386 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3399 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401 dmae->opcode = opcode;
3402 dmae->src_addr_lo = (mac_addr +
3403 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3410 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411 dmae->comp_addr_hi = 0;
3414 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (mac_addr +
3418 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432 dmae->opcode = opcode;
3433 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435 dmae->src_addr_hi = 0;
3436 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440 dmae->comp_addr_hi = 0;
3443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444 dmae->opcode = opcode;
3445 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447 dmae->src_addr_hi = 0;
3448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452 dmae->len = (2*sizeof(u32)) >> 2;
3453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454 dmae->comp_addr_hi = 0;
3457 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464 DMAE_CMD_ENDIANITY_DW_SWAP |
3466 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467 (vn << DMAE_CMD_E1HVN_SHIFT));
3468 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470 dmae->src_addr_hi = 0;
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475 dmae->len = (2*sizeof(u32)) >> 2;
3476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_val = DMAE_COMP_VAL;
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3485 struct dmae_command *dmae = &bp->stats_dmae;
3486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3489 if (!bp->func_stx) {
3490 BNX2X_ERR("BUG!\n");
3494 bp->executer_idx = 0;
3495 memset(dmae, 0, sizeof(struct dmae_command));
3497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->dst_addr_lo = bp->func_stx >> 2;
3510 dmae->dst_addr_hi = 0;
3511 dmae->len = sizeof(struct host_func_stats) >> 2;
3512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_val = DMAE_COMP_VAL;
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3522 bnx2x_port_stats_init(bp);
3524 else if (bp->func_stx)
3525 bnx2x_func_stats_init(bp);
3527 bnx2x_hw_stats_post(bp);
3528 bnx2x_storm_stats_post(bp);
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3533 bnx2x_stats_comp(bp);
3534 bnx2x_stats_pmf_update(bp);
3535 bnx2x_stats_start(bp);
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_start(bp);
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3546 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3554 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566 UPDATE_STAT64(tx_stat_gt127,
3567 tx_stat_etherstatspkts65octetsto127octets);
3568 UPDATE_STAT64(tx_stat_gt255,
3569 tx_stat_etherstatspkts128octetsto255octets);
3570 UPDATE_STAT64(tx_stat_gt511,
3571 tx_stat_etherstatspkts256octetsto511octets);
3572 UPDATE_STAT64(tx_stat_gt1023,
3573 tx_stat_etherstatspkts512octetsto1023octets);
3574 UPDATE_STAT64(tx_stat_gt1518,
3575 tx_stat_etherstatspkts1024octetsto1522octets);
3576 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580 UPDATE_STAT64(tx_stat_gterr,
3581 tx_stat_dot3statsinternalmactransmiterrors);
3582 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3584 estats->pause_frames_received_hi =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586 estats->pause_frames_received_lo =
3587 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3589 estats->pause_frames_sent_hi =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591 estats->pause_frames_sent_lo =
3592 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3597 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3601 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3633 estats->pause_frames_received_hi =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635 estats->pause_frames_received_lo =
3636 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637 ADD_64(estats->pause_frames_received_hi,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639 estats->pause_frames_received_lo,
3640 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3642 estats->pause_frames_sent_hi =
3643 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644 estats->pause_frames_sent_lo =
3645 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646 ADD_64(estats->pause_frames_sent_hi,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648 estats->pause_frames_sent_lo,
3649 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3654 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655 struct nig_stats *old = &(bp->port.old_nig_stats);
3656 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3664 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665 bnx2x_bmac_stats_update(bp);
3667 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668 bnx2x_emac_stats_update(bp);
3670 else { /* unreached */
3671 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3675 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676 new->brb_discard - old->brb_discard);
3677 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678 new->brb_truncate - old->brb_truncate);
3680 UPDATE_STAT64_NIG(egress_mac_pkt0,
3681 etherstatspkts1024octetsto1522octets);
3682 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3684 memcpy(old, new, sizeof(struct nig_stats));
3686 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687 sizeof(struct mac_stx));
3688 estats->brb_drop_hi = pstats->brb_drop_hi;
3689 estats->brb_drop_lo = pstats->brb_drop_lo;
3691 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3693 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694 if (nig_timer_max != estats->nig_timer_max) {
3695 estats->nig_timer_max = nig_timer_max;
3696 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3704 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705 struct tstorm_per_port_stats *tport =
3706 &stats->tstorm_common.port_statistics;
3707 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3711 memset(&(fstats->total_bytes_received_hi), 0,
3712 sizeof(struct host_func_stats) - 2*sizeof(u32));
3713 estats->error_bytes_received_hi = 0;
3714 estats->error_bytes_received_lo = 0;
3715 estats->etherstatsoverrsizepkts_hi = 0;
3716 estats->etherstatsoverrsizepkts_lo = 0;
3717 estats->no_buff_discard_hi = 0;
3718 estats->no_buff_discard_lo = 0;
3720 for_each_queue(bp, i) {
3721 struct bnx2x_fastpath *fp = &bp->fp[i];
3722 int cl_id = fp->cl_id;
3723 struct tstorm_per_client_stats *tclient =
3724 &stats->tstorm_common.client_statistics[cl_id];
3725 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726 struct ustorm_per_client_stats *uclient =
3727 &stats->ustorm_common.client_statistics[cl_id];
3728 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729 struct xstorm_per_client_stats *xclient =
3730 &stats->xstorm_common.client_statistics[cl_id];
3731 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3735 /* are storm stats valid? */
3736 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737 bp->stats_counter) {
3738 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739 " xstorm counter (%d) != stats_counter (%d)\n",
3740 i, xclient->stats_counter, bp->stats_counter);
3743 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744 bp->stats_counter) {
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746 " tstorm counter (%d) != stats_counter (%d)\n",
3747 i, tclient->stats_counter, bp->stats_counter);
3750 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753 " ustorm counter (%d) != stats_counter (%d)\n",
3754 i, uclient->stats_counter, bp->stats_counter);
3758 qstats->total_bytes_received_hi =
3759 qstats->valid_bytes_received_hi =
3760 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761 qstats->total_bytes_received_lo =
3762 qstats->valid_bytes_received_lo =
3763 le32_to_cpu(tclient->total_rcv_bytes.lo);
3765 qstats->error_bytes_received_hi =
3766 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767 qstats->error_bytes_received_lo =
3768 le32_to_cpu(tclient->rcv_error_bytes.lo);
3770 ADD_64(qstats->total_bytes_received_hi,
3771 qstats->error_bytes_received_hi,
3772 qstats->total_bytes_received_lo,
3773 qstats->error_bytes_received_lo);
3775 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776 total_unicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778 total_multicast_packets_received);
3779 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780 total_broadcast_packets_received);
3781 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782 etherstatsoverrsizepkts);
3783 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3785 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786 total_unicast_packets_received);
3787 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788 total_multicast_packets_received);
3789 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790 total_broadcast_packets_received);
3791 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3795 qstats->total_bytes_transmitted_hi =
3796 le32_to_cpu(xclient->total_sent_bytes.hi);
3797 qstats->total_bytes_transmitted_lo =
3798 le32_to_cpu(xclient->total_sent_bytes.lo);
3800 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801 total_unicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803 total_multicast_packets_transmitted);
3804 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805 total_broadcast_packets_transmitted);
3807 old_tclient->checksum_discard = tclient->checksum_discard;
3808 old_tclient->ttl0_discard = tclient->ttl0_discard;
3810 ADD_64(fstats->total_bytes_received_hi,
3811 qstats->total_bytes_received_hi,
3812 fstats->total_bytes_received_lo,
3813 qstats->total_bytes_received_lo);
3814 ADD_64(fstats->total_bytes_transmitted_hi,
3815 qstats->total_bytes_transmitted_hi,
3816 fstats->total_bytes_transmitted_lo,
3817 qstats->total_bytes_transmitted_lo);
3818 ADD_64(fstats->total_unicast_packets_received_hi,
3819 qstats->total_unicast_packets_received_hi,
3820 fstats->total_unicast_packets_received_lo,
3821 qstats->total_unicast_packets_received_lo);
3822 ADD_64(fstats->total_multicast_packets_received_hi,
3823 qstats->total_multicast_packets_received_hi,
3824 fstats->total_multicast_packets_received_lo,
3825 qstats->total_multicast_packets_received_lo);
3826 ADD_64(fstats->total_broadcast_packets_received_hi,
3827 qstats->total_broadcast_packets_received_hi,
3828 fstats->total_broadcast_packets_received_lo,
3829 qstats->total_broadcast_packets_received_lo);
3830 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831 qstats->total_unicast_packets_transmitted_hi,
3832 fstats->total_unicast_packets_transmitted_lo,
3833 qstats->total_unicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835 qstats->total_multicast_packets_transmitted_hi,
3836 fstats->total_multicast_packets_transmitted_lo,
3837 qstats->total_multicast_packets_transmitted_lo);
3838 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839 qstats->total_broadcast_packets_transmitted_hi,
3840 fstats->total_broadcast_packets_transmitted_lo,
3841 qstats->total_broadcast_packets_transmitted_lo);
3842 ADD_64(fstats->valid_bytes_received_hi,
3843 qstats->valid_bytes_received_hi,
3844 fstats->valid_bytes_received_lo,
3845 qstats->valid_bytes_received_lo);
3847 ADD_64(estats->error_bytes_received_hi,
3848 qstats->error_bytes_received_hi,
3849 estats->error_bytes_received_lo,
3850 qstats->error_bytes_received_lo);
3851 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852 qstats->etherstatsoverrsizepkts_hi,
3853 estats->etherstatsoverrsizepkts_lo,
3854 qstats->etherstatsoverrsizepkts_lo);
3855 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3859 ADD_64(fstats->total_bytes_received_hi,
3860 estats->rx_stat_ifhcinbadoctets_hi,
3861 fstats->total_bytes_received_lo,
3862 estats->rx_stat_ifhcinbadoctets_lo);
3864 memcpy(estats, &(fstats->total_bytes_received_hi),
3865 sizeof(struct host_func_stats) - 2*sizeof(u32));
3867 ADD_64(estats->etherstatsoverrsizepkts_hi,
3868 estats->rx_stat_dot3statsframestoolong_hi,
3869 estats->etherstatsoverrsizepkts_lo,
3870 estats->rx_stat_dot3statsframestoolong_lo);
3871 ADD_64(estats->error_bytes_received_hi,
3872 estats->rx_stat_ifhcinbadoctets_hi,
3873 estats->error_bytes_received_lo,
3874 estats->rx_stat_ifhcinbadoctets_lo);
3877 estats->mac_filter_discard =
3878 le32_to_cpu(tport->mac_filter_discard);
3879 estats->xxoverflow_discard =
3880 le32_to_cpu(tport->xxoverflow_discard);
3881 estats->brb_truncate_discard =
3882 le32_to_cpu(tport->brb_truncate_discard);
3883 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3886 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3888 bp->stats_pending = 0;
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3895 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896 struct net_device_stats *nstats = &bp->dev->stats;
3899 nstats->rx_packets =
3900 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3904 nstats->tx_packets =
3905 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3909 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3911 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3913 nstats->rx_dropped = estats->mac_discard;
3914 for_each_queue(bp, i)
3915 nstats->rx_dropped +=
3916 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3918 nstats->tx_dropped = 0;
3921 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3923 nstats->collisions =
3924 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3926 nstats->rx_length_errors =
3927 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930 bnx2x_hilo(&estats->brb_truncate_hi);
3931 nstats->rx_crc_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933 nstats->rx_frame_errors =
3934 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936 nstats->rx_missed_errors = estats->xxoverflow_discard;
3938 nstats->rx_errors = nstats->rx_length_errors +
3939 nstats->rx_over_errors +
3940 nstats->rx_crc_errors +
3941 nstats->rx_frame_errors +
3942 nstats->rx_fifo_errors +
3943 nstats->rx_missed_errors;
3945 nstats->tx_aborted_errors =
3946 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948 nstats->tx_carrier_errors =
3949 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950 nstats->tx_fifo_errors = 0;
3951 nstats->tx_heartbeat_errors = 0;
3952 nstats->tx_window_errors = 0;
3954 nstats->tx_errors = nstats->tx_aborted_errors +
3955 nstats->tx_carrier_errors +
3956 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3964 estats->driver_xoff = 0;
3965 estats->rx_err_discard_pkt = 0;
3966 estats->rx_skb_alloc_failed = 0;
3967 estats->hw_csum_err = 0;
3968 for_each_queue(bp, i) {
3969 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3971 estats->driver_xoff += qstats->driver_xoff;
3972 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974 estats->hw_csum_err += qstats->hw_csum_err;
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3980 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3982 if (*stats_comp != DMAE_COMP_VAL)
3986 bnx2x_hw_stats_update(bp);
3988 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989 BNX2X_ERR("storm stats were not updated for 3 times\n");
3994 bnx2x_net_stats_update(bp);
3995 bnx2x_drv_stats_update(bp);
3997 if (bp->msglevel & NETIF_MSG_TIMER) {
3998 struct tstorm_per_client_stats *old_tclient =
3999 &bp->fp->old_tclient;
4000 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002 struct net_device_stats *nstats = &bp->dev->stats;
4005 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4008 bnx2x_tx_avail(bp->fp),
4009 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4012 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013 bp->fp->rx_comp_cons),
4014 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4016 "brb truncate %u\n",
4017 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018 qstats->driver_xoff,
4019 estats->brb_drop_lo, estats->brb_truncate_lo);
4020 printk(KERN_DEBUG "tstats: checksum_discard %u "
4021 "packets_too_big_discard %lu no_buff_discard %lu "
4022 "mac_discard %u mac_filter_discard %u "
4023 "xxovrflow_discard %u brb_truncate_discard %u "
4024 "ttl0_discard %u\n",
4025 le32_to_cpu(old_tclient->checksum_discard),
4026 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027 bnx2x_hilo(&qstats->no_buff_discard_hi),
4028 estats->mac_discard, estats->mac_filter_discard,
4029 estats->xxoverflow_discard, estats->brb_truncate_discard,
4030 le32_to_cpu(old_tclient->ttl0_discard));
4032 for_each_queue(bp, i) {
4033 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034 bnx2x_fp(bp, i, tx_pkt),
4035 bnx2x_fp(bp, i, rx_pkt),
4036 bnx2x_fp(bp, i, rx_calls));
4040 bnx2x_hw_stats_post(bp);
4041 bnx2x_storm_stats_post(bp);
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4046 struct dmae_command *dmae;
4048 int loader_idx = PMF_DMAE_C(bp);
4049 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4051 bp->executer_idx = 0;
4053 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4055 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4059 DMAE_CMD_ENDIANITY_DW_SWAP |
4061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4064 if (bp->port.port_stx) {
4066 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4070 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074 dmae->dst_addr_hi = 0;
4075 dmae->len = sizeof(struct host_port_stats) >> 2;
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4081 dmae->comp_addr_lo =
4082 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_addr_hi =
4084 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085 dmae->comp_val = DMAE_COMP_VAL;
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->dst_addr_lo = bp->func_stx >> 2;
4098 dmae->dst_addr_hi = 0;
4099 dmae->len = sizeof(struct host_func_stats) >> 2;
4100 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_val = DMAE_COMP_VAL;
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4112 bnx2x_stats_comp(bp);
4115 update = (bnx2x_hw_stats_update(bp) == 0);
4117 update |= (bnx2x_storm_stats_update(bp) == 0);
4120 bnx2x_net_stats_update(bp);
4123 bnx2x_port_stats_stop(bp);
4125 bnx2x_hw_stats_post(bp);
4126 bnx2x_stats_comp(bp);
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4134 static const struct {
4135 void (*action)(struct bnx2x *bp);
4136 enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4140 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4142 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4146 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4147 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4148 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4149 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4155 enum bnx2x_stats_state state = bp->stats_state;
4157 bnx2x_stats_stm[state][event].action(bp);
4158 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4160 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162 state, event, bp->stats_state);
4165 static void bnx2x_timer(unsigned long data)
4167 struct bnx2x *bp = (struct bnx2x *) data;
4169 if (!netif_running(bp->dev))
4172 if (atomic_read(&bp->intr_sem) != 0)
4176 struct bnx2x_fastpath *fp = &bp->fp[0];
4180 rc = bnx2x_rx_int(fp, 1000);
4183 if (!BP_NOMCP(bp)) {
4184 int func = BP_FUNC(bp);
4188 ++bp->fw_drv_pulse_wr_seq;
4189 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190 /* TBD - add SYSTEM_TIME */
4191 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4194 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195 MCP_PULSE_SEQ_MASK);
4196 /* The delta between driver pulse and mcp response
4197 * should be 1 (before mcp response) or 0 (after mcp response)
4199 if ((drv_pulse != mcp_pulse) &&
4200 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201 /* someone lost a heartbeat... */
4202 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203 drv_pulse, mcp_pulse);
4207 if ((bp->state == BNX2X_STATE_OPEN) ||
4208 (bp->state == BNX2X_STATE_DISABLED))
4209 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4212 mod_timer(&bp->timer, jiffies + bp->current_interval);
4215 /* end of Statistics */
4220 * nic init service functions
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4225 int port = BP_PORT(bp);
4227 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4228 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229 sizeof(struct ustorm_status_block)/4);
4230 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232 sizeof(struct cstorm_status_block)/4);
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236 dma_addr_t mapping, int sb_id)
4238 int port = BP_PORT(bp);
4239 int func = BP_FUNC(bp);
4244 section = ((u64)mapping) + offsetof(struct host_status_block,
4246 sb->u_status_block.status_block_id = sb_id;
4248 REG_WR(bp, BAR_USTRORM_INTMEM +
4249 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250 REG_WR(bp, BAR_USTRORM_INTMEM +
4251 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4253 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4256 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4261 section = ((u64)mapping) + offsetof(struct host_status_block,
4263 sb->c_status_block.status_block_id = sb_id;
4265 REG_WR(bp, BAR_CSTRORM_INTMEM +
4266 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267 REG_WR(bp, BAR_CSTRORM_INTMEM +
4268 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4270 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4273 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4277 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4282 int func = BP_FUNC(bp);
4284 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4285 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286 sizeof(struct tstorm_def_status_block)/4);
4287 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4288 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289 sizeof(struct ustorm_def_status_block)/4);
4290 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4291 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292 sizeof(struct cstorm_def_status_block)/4);
4293 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4294 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295 sizeof(struct xstorm_def_status_block)/4);
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299 struct host_def_status_block *def_sb,
4300 dma_addr_t mapping, int sb_id)
4302 int port = BP_PORT(bp);
4303 int func = BP_FUNC(bp);
4304 int index, val, reg_offset;
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 atten_status_block);
4310 def_sb->atten_status_block.status_block_id = sb_id;
4314 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4317 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318 bp->attn_group[index].sig[0] = REG_RD(bp,
4319 reg_offset + 0x10*index);
4320 bp->attn_group[index].sig[1] = REG_RD(bp,
4321 reg_offset + 0x4 + 0x10*index);
4322 bp->attn_group[index].sig[2] = REG_RD(bp,
4323 reg_offset + 0x8 + 0x10*index);
4324 bp->attn_group[index].sig[3] = REG_RD(bp,
4325 reg_offset + 0xc + 0x10*index);
4328 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329 HC_REG_ATTN_MSG0_ADDR_L);
4331 REG_WR(bp, reg_offset, U64_LO(section));
4332 REG_WR(bp, reg_offset + 4, U64_HI(section));
4334 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4336 val = REG_RD(bp, reg_offset);
4338 REG_WR(bp, reg_offset, val);
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 u_def_status_block);
4343 def_sb->u_def_status_block.status_block_id = sb_id;
4345 REG_WR(bp, BAR_USTRORM_INTMEM +
4346 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347 REG_WR(bp, BAR_USTRORM_INTMEM +
4348 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4350 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4353 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 c_def_status_block);
4360 def_sb->c_def_status_block.status_block_id = sb_id;
4362 REG_WR(bp, BAR_CSTRORM_INTMEM +
4363 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364 REG_WR(bp, BAR_CSTRORM_INTMEM +
4365 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4367 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4370 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4375 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376 t_def_status_block);
4377 def_sb->t_def_status_block.status_block_id = sb_id;
4379 REG_WR(bp, BAR_TSTRORM_INTMEM +
4380 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381 REG_WR(bp, BAR_TSTRORM_INTMEM +
4382 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4384 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4387 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4392 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393 x_def_status_block);
4394 def_sb->x_def_status_block.status_block_id = sb_id;
4396 REG_WR(bp, BAR_XSTRORM_INTMEM +
4397 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398 REG_WR(bp, BAR_XSTRORM_INTMEM +
4399 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4401 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4404 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4408 bp->stats_pending = 0;
4409 bp->set_mac_pending = 0;
4411 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4416 int port = BP_PORT(bp);
4419 for_each_queue(bp, i) {
4420 int sb_id = bp->fp[i].sb_id;
4422 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425 U_SB_ETH_RX_CQ_INDEX),
4427 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429 U_SB_ETH_RX_CQ_INDEX),
4430 bp->rx_ticks ? 0 : 1);
4432 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435 C_SB_ETH_TX_CQ_INDEX),
4437 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439 C_SB_ETH_TX_CQ_INDEX),
4440 bp->tx_ticks ? 0 : 1);
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445 struct bnx2x_fastpath *fp, int last)
4449 for (i = 0; i < last; i++) {
4450 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451 struct sk_buff *skb = rx_buf->skb;
4454 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4458 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459 pci_unmap_single(bp->pdev,
4460 pci_unmap_addr(rx_buf, mapping),
4461 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4470 int func = BP_FUNC(bp);
4471 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472 ETH_MAX_AGGREGATION_QUEUES_E1H;
4473 u16 ring_prod, cqe_ring_prod;
4476 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4478 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4480 if (bp->flags & TPA_ENABLE_FLAG) {
4482 for_each_rx_queue(bp, j) {
4483 struct bnx2x_fastpath *fp = &bp->fp[j];
4485 for (i = 0; i < max_agg_queues; i++) {
4486 fp->tpa_pool[i].skb =
4487 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488 if (!fp->tpa_pool[i].skb) {
4489 BNX2X_ERR("Failed to allocate TPA "
4490 "skb pool for queue[%d] - "
4491 "disabling TPA on this "
4493 bnx2x_free_tpa_pool(bp, fp, i);
4494 fp->disable_tpa = 1;
4497 pci_unmap_addr_set((struct sw_rx_bd *)
4498 &bp->fp->tpa_pool[i],
4500 fp->tpa_state[i] = BNX2X_TPA_STOP;
4505 for_each_rx_queue(bp, j) {
4506 struct bnx2x_fastpath *fp = &bp->fp[j];
4509 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4512 /* "next page" elements initialization */
4514 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515 struct eth_rx_sge *sge;
4517 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4519 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4526 bnx2x_init_sge_ring_bit_mask(fp);
4529 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530 struct eth_rx_bd *rx_bd;
4532 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4534 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4537 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4542 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543 struct eth_rx_cqe_next_page *nextpg;
4545 nextpg = (struct eth_rx_cqe_next_page *)
4546 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4548 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4551 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4555 /* Allocate SGEs and initialize the ring elements */
4556 for (i = 0, ring_prod = 0;
4557 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4559 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560 BNX2X_ERR("was only able to allocate "
4562 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563 /* Cleanup already allocated elements */
4564 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566 fp->disable_tpa = 1;
4570 ring_prod = NEXT_SGE_IDX(ring_prod);
4572 fp->rx_sge_prod = ring_prod;
4574 /* Allocate BDs and initialize BD ring */
4575 fp->rx_comp_cons = 0;
4576 cqe_ring_prod = ring_prod = 0;
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579 BNX2X_ERR("was only able to allocate "
4580 "%d rx skbs on queue[%d]\n", i, j);
4581 fp->eth_q_stats.rx_skb_alloc_failed++;
4584 ring_prod = NEXT_RX_IDX(ring_prod);
4585 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586 WARN_ON(ring_prod <= i);
4589 fp->rx_bd_prod = ring_prod;
4590 /* must not have more available CQEs than BDs */
4591 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4593 fp->rx_pkt = fp->rx_calls = 0;
4596 * this will generate an interrupt (to the TSTORM)
4597 * must only be done after chip is initialized
4599 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606 U64_LO(fp->rx_comp_mapping));
4607 REG_WR(bp, BAR_USTRORM_INTMEM +
4608 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609 U64_HI(fp->rx_comp_mapping));
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4617 for_each_tx_queue(bp, j) {
4618 struct bnx2x_fastpath *fp = &bp->fp[j];
4620 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621 struct eth_tx_bd *tx_bd =
4622 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4625 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4628 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4632 fp->tx_pkt_prod = 0;
4633 fp->tx_pkt_cons = 0;
4636 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4643 int func = BP_FUNC(bp);
4645 spin_lock_init(&bp->spq_lock);
4647 bp->spq_left = MAX_SPQ_PENDING;
4648 bp->spq_prod_idx = 0;
4649 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650 bp->spq_prod_bd = bp->spq;
4651 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4653 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654 U64_LO(bp->spq_mapping));
4656 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657 U64_HI(bp->spq_mapping));
4659 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4663 static void bnx2x_init_context(struct bnx2x *bp)
4667 for_each_queue(bp, i) {
4668 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669 struct bnx2x_fastpath *fp = &bp->fp[i];
4670 u8 cl_id = fp->cl_id;
4671 u8 sb_id = fp->sb_id;
4673 context->ustorm_st_context.common.sb_index_numbers =
4674 BNX2X_RX_SB_INDEX_NUM;
4675 context->ustorm_st_context.common.clientId = cl_id;
4676 context->ustorm_st_context.common.status_block_id = sb_id;
4677 context->ustorm_st_context.common.flags =
4678 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680 context->ustorm_st_context.common.statistics_counter_id =
4682 context->ustorm_st_context.common.mc_alignment_log_size =
4683 BNX2X_RX_ALIGN_SHIFT;
4684 context->ustorm_st_context.common.bd_buff_size =
4686 context->ustorm_st_context.common.bd_page_base_hi =
4687 U64_HI(fp->rx_desc_mapping);
4688 context->ustorm_st_context.common.bd_page_base_lo =
4689 U64_LO(fp->rx_desc_mapping);
4690 if (!fp->disable_tpa) {
4691 context->ustorm_st_context.common.flags |=
4692 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694 context->ustorm_st_context.common.sge_buff_size =
4695 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4697 context->ustorm_st_context.common.sge_page_base_hi =
4698 U64_HI(fp->rx_sge_mapping);
4699 context->ustorm_st_context.common.sge_page_base_lo =
4700 U64_LO(fp->rx_sge_mapping);
4703 context->ustorm_ag_context.cdu_usage =
4704 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705 CDU_REGION_NUMBER_UCM_AG,
4706 ETH_CONNECTION_TYPE);
4708 context->xstorm_st_context.tx_bd_page_base_hi =
4709 U64_HI(fp->tx_desc_mapping);
4710 context->xstorm_st_context.tx_bd_page_base_lo =
4711 U64_LO(fp->tx_desc_mapping);
4712 context->xstorm_st_context.db_data_addr_hi =
4713 U64_HI(fp->tx_prods_mapping);
4714 context->xstorm_st_context.db_data_addr_lo =
4715 U64_LO(fp->tx_prods_mapping);
4716 context->xstorm_st_context.statistics_data = (cl_id |
4717 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718 context->cstorm_st_context.sb_index_number =
4719 C_SB_ETH_TX_CQ_INDEX;
4720 context->cstorm_st_context.status_block_id = sb_id;
4722 context->xstorm_ag_context.cdu_reserved =
4723 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724 CDU_REGION_NUMBER_XCM_AG,
4725 ETH_CONNECTION_TYPE);
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4731 int func = BP_FUNC(bp);
4734 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4738 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4739 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742 bp->fp->cl_id + (i % bp->num_rx_queues));
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4747 struct tstorm_eth_client_config tstorm_client = {0};
4748 int port = BP_PORT(bp);
4751 tstorm_client.mtu = bp->dev->mtu;
4752 tstorm_client.config_flags =
4753 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4756 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757 tstorm_client.config_flags |=
4758 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4763 if (bp->flags & TPA_ENABLE_FLAG) {
4764 tstorm_client.max_sges_for_packet =
4765 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766 tstorm_client.max_sges_for_packet =
4767 ((tstorm_client.max_sges_for_packet +
4768 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769 PAGES_PER_SGE_SHIFT;
4771 tstorm_client.config_flags |=
4772 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4775 for_each_queue(bp, i) {
4776 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780 ((u32 *)&tstorm_client)[0]);
4781 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783 ((u32 *)&tstorm_client)[1]);
4786 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4792 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793 int mode = bp->rx_mode;
4794 int mask = (1 << BP_L_ID(bp));
4795 int func = BP_FUNC(bp);
4798 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4801 case BNX2X_RX_MODE_NONE: /* no Rx */
4802 tstorm_mac_filter.ucast_drop_all = mask;
4803 tstorm_mac_filter.mcast_drop_all = mask;
4804 tstorm_mac_filter.bcast_drop_all = mask;
4807 case BNX2X_RX_MODE_NORMAL:
4808 tstorm_mac_filter.bcast_accept_all = mask;
4811 case BNX2X_RX_MODE_ALLMULTI:
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
4816 case BNX2X_RX_MODE_PROMISC:
4817 tstorm_mac_filter.ucast_accept_all = mask;
4818 tstorm_mac_filter.mcast_accept_all = mask;
4819 tstorm_mac_filter.bcast_accept_all = mask;
4823 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4827 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830 ((u32 *)&tstorm_mac_filter)[i]);
4832 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833 ((u32 *)&tstorm_mac_filter)[i]); */
4836 if (mode != BNX2X_RX_MODE_NONE)
4837 bnx2x_set_client_config(bp);
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4844 if (bp->flags & TPA_ENABLE_FLAG) {
4845 struct tstorm_eth_tpa_exist tpa = {0};
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4851 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4855 /* Zero this manually as its initialization is
4856 currently missing in the initTool */
4857 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858 REG_WR(bp, BAR_USTRORM_INTMEM +
4859 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4864 int port = BP_PORT(bp);
4866 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4872 /* Calculates the sum of vn_min_rates.
4873 It's needed for further normalizing of the min_rates.
4875 sum of vn_min_rates.
4877 0 - if all the min_rates are 0.
4878 In the later case fainess algorithm should be deactivated.
4879 If not all min_rates are zero then those that are zeroes will be set to 1.
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4884 int port = BP_PORT(bp);
4887 bp->vn_weight_sum = 0;
4888 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889 int func = 2*vn + port;
4891 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4895 /* Skip hidden vns */
4896 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4899 /* If min rate is zero - set it to 1 */
4901 vn_min_rate = DEF_MIN_RATE;
4905 bp->vn_weight_sum += vn_min_rate;
4908 /* ... only if all min rates are zeros - disable fairness */
4910 bp->vn_weight_sum = 0;
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4915 struct tstorm_eth_function_common_config tstorm_config = {0};
4916 struct stats_indication_flags stats_flags = {0};
4917 int port = BP_PORT(bp);
4918 int func = BP_FUNC(bp);
4924 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925 tstorm_config.rss_result_mask = MULTI_MASK;
4928 tstorm_config.config_flags |=
4929 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4931 tstorm_config.leading_client_id = BP_L_ID(bp);
4933 REG_WR(bp, BAR_TSTRORM_INTMEM +
4934 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935 (*(u32 *)&tstorm_config));
4937 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938 bnx2x_set_storm_rx_mode(bp);
4940 for_each_queue(bp, i) {
4941 u8 cl_id = bp->fp[i].cl_id;
4943 /* reset xstorm per client statistics */
4944 offset = BAR_XSTRORM_INTMEM +
4945 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4950 /* reset tstorm per client statistics */
4951 offset = BAR_TSTRORM_INTMEM +
4952 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4957 /* reset ustorm per client statistics */
4958 offset = BAR_USTRORM_INTMEM +
4959 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4965 /* Init statistics related context */
4966 stats_flags.collect_eth = 1;
4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969 ((u32 *)&stats_flags)[0]);
4970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971 ((u32 *)&stats_flags)[1]);
4973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974 ((u32 *)&stats_flags)[0]);
4975 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976 ((u32 *)&stats_flags)[1]);
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979 ((u32 *)&stats_flags)[0]);
4980 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981 ((u32 *)&stats_flags)[1]);
4983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984 ((u32 *)&stats_flags)[0]);
4985 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986 ((u32 *)&stats_flags)[1]);
4988 REG_WR(bp, BAR_XSTRORM_INTMEM +
4989 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995 REG_WR(bp, BAR_TSTRORM_INTMEM +
4996 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_TSTRORM_INTMEM +
4999 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002 REG_WR(bp, BAR_USTRORM_INTMEM +
5003 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_USTRORM_INTMEM +
5006 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009 if (CHIP_IS_E1H(bp)) {
5010 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5012 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5014 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5016 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5019 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5023 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5025 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026 SGE_PAGE_SIZE * PAGES_PER_SGE),
5028 for_each_rx_queue(bp, i) {
5029 struct bnx2x_fastpath *fp = &bp->fp[i];
5031 REG_WR(bp, BAR_USTRORM_INTMEM +
5032 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033 U64_LO(fp->rx_comp_mapping));
5034 REG_WR(bp, BAR_USTRORM_INTMEM +
5035 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036 U64_HI(fp->rx_comp_mapping));
5038 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5043 /* dropless flow control */
5044 if (CHIP_IS_E1H(bp)) {
5045 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5047 rx_pause.bd_thr_low = 250;
5048 rx_pause.cqe_thr_low = 250;
5050 rx_pause.sge_thr_low = 0;
5051 rx_pause.bd_thr_high = 350;
5052 rx_pause.cqe_thr_high = 350;
5053 rx_pause.sge_thr_high = 0;
5055 for_each_rx_queue(bp, i) {
5056 struct bnx2x_fastpath *fp = &bp->fp[i];
5058 if (!fp->disable_tpa) {
5059 rx_pause.sge_thr_low = 150;
5060 rx_pause.sge_thr_high = 250;
5064 offset = BAR_USTRORM_INTMEM +
5065 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5068 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5070 REG_WR(bp, offset + j*4,
5071 ((u32 *)&rx_pause)[j]);
5075 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5077 /* Init rate shaping and fairness contexts */
5081 /* During init there is no active link
5082 Until link is up, set link rate to 10Gbps */
5083 bp->link_vars.line_speed = SPEED_10000;
5084 bnx2x_init_port_minmax(bp);
5086 bnx2x_calc_vn_weight_sum(bp);
5088 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089 bnx2x_init_vn_minmax(bp, 2*vn + port);
5091 /* Enable rate shaping and fairness */
5092 bp->cmng.flags.cmng_enables =
5093 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094 if (bp->vn_weight_sum)
5095 bp->cmng.flags.cmng_enables |=
5096 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5098 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099 " fairness will be disabled\n");
5101 /* rate shaping and fairness are disabled */
5103 "single function mode minmax will be disabled\n");
5107 /* Store it to internal memory */
5109 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110 REG_WR(bp, BAR_XSTRORM_INTMEM +
5111 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112 ((u32 *)(&bp->cmng))[i]);
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5117 switch (load_code) {
5118 case FW_MSG_CODE_DRV_LOAD_COMMON:
5119 bnx2x_init_internal_common(bp);
5122 case FW_MSG_CODE_DRV_LOAD_PORT:
5123 bnx2x_init_internal_port(bp);
5126 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127 bnx2x_init_internal_func(bp);
5131 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5140 for_each_queue(bp, i) {
5141 struct bnx2x_fastpath *fp = &bp->fp[i];
5144 fp->state = BNX2X_FP_STATE_CLOSED;
5146 fp->cl_id = BP_L_ID(bp) + i;
5147 fp->sb_id = fp->cl_id;
5149 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5150 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5153 bnx2x_update_fpsb_idx(fp);
5156 /* ensure status block indices were read */
5160 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5162 bnx2x_update_dsb_idx(bp);
5163 bnx2x_update_coalesce(bp);
5164 bnx2x_init_rx_rings(bp);
5165 bnx2x_init_tx_ring(bp);
5166 bnx2x_init_sp_ring(bp);
5167 bnx2x_init_context(bp);
5168 bnx2x_init_internal(bp, load_code);
5169 bnx2x_init_ind_table(bp);
5170 bnx2x_stats_init(bp);
5172 /* At this point, we are ready for interrupts */
5173 atomic_set(&bp->intr_sem, 0);
5175 /* flush all before enabling interrupts */
5179 bnx2x_int_enable(bp);
5182 /* end of nic init */
5185 * gzip service functions
5188 static int bnx2x_gunzip_init(struct bnx2x *bp)
5190 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5191 &bp->gunzip_mapping);
5192 if (bp->gunzip_buf == NULL)
5195 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5196 if (bp->strm == NULL)
5199 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5201 if (bp->strm->workspace == NULL)
5211 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5212 bp->gunzip_mapping);
5213 bp->gunzip_buf = NULL;
5216 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5217 " un-compression\n", bp->dev->name);
5221 static void bnx2x_gunzip_end(struct bnx2x *bp)
5223 kfree(bp->strm->workspace);
5228 if (bp->gunzip_buf) {
5229 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5230 bp->gunzip_mapping);
5231 bp->gunzip_buf = NULL;
5235 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5239 /* check gzip header */
5240 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5247 if (zbuf[3] & FNAME)
5248 while ((zbuf[n++] != 0) && (n < len));
5250 bp->strm->next_in = zbuf + n;
5251 bp->strm->avail_in = len - n;
5252 bp->strm->next_out = bp->gunzip_buf;
5253 bp->strm->avail_out = FW_BUF_SIZE;
5255 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5259 rc = zlib_inflate(bp->strm, Z_FINISH);
5260 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5261 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5262 bp->dev->name, bp->strm->msg);
5264 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5265 if (bp->gunzip_outlen & 0x3)
5266 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5267 " gunzip_outlen (%d) not aligned\n",
5268 bp->dev->name, bp->gunzip_outlen);
5269 bp->gunzip_outlen >>= 2;
5271 zlib_inflateEnd(bp->strm);
5273 if (rc == Z_STREAM_END)
5279 /* nic load/unload */
5282 * General service functions
5285 /* send a NIG loopback debug packet */
5286 static void bnx2x_lb_pckt(struct bnx2x *bp)
5290 /* Ethernet source and destination addresses */
5291 wb_write[0] = 0x55555555;
5292 wb_write[1] = 0x55555555;
5293 wb_write[2] = 0x20; /* SOP */
5294 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5296 /* NON-IP protocol */
5297 wb_write[0] = 0x09000000;
5298 wb_write[1] = 0x55555555;
5299 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5300 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5303 /* some of the internal memories
5304 * are not directly readable from the driver
5305 * to test them we send debug packets
5307 static int bnx2x_int_mem_test(struct bnx2x *bp)
5313 if (CHIP_REV_IS_FPGA(bp))
5315 else if (CHIP_REV_IS_EMUL(bp))
5320 DP(NETIF_MSG_HW, "start part1\n");
5322 /* Disable inputs of parser neighbor blocks */
5323 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5324 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5325 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5326 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5328 /* Write 0 to parser credits for CFC search request */
5329 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5331 /* send Ethernet packet */
5334 /* TODO do i reset NIG statistic? */
5335 /* Wait until NIG register shows 1 packet of size 0x10 */
5336 count = 1000 * factor;
5339 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5340 val = *bnx2x_sp(bp, wb_data[0]);
5348 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5352 /* Wait until PRS register shows 1 packet */
5353 count = 1000 * factor;
5355 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5363 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5367 /* Reset and init BRB, PRS */
5368 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5370 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5372 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5373 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5375 DP(NETIF_MSG_HW, "part2\n");
5377 /* Disable inputs of parser neighbor blocks */
5378 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5379 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5380 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5381 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5383 /* Write 0 to parser credits for CFC search request */
5384 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5386 /* send 10 Ethernet packets */
5387 for (i = 0; i < 10; i++)
5390 /* Wait until NIG register shows 10 + 1
5391 packets of size 11*0x10 = 0xb0 */
5392 count = 1000 * factor;
5395 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5396 val = *bnx2x_sp(bp, wb_data[0]);
5404 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5408 /* Wait until PRS register shows 2 packets */
5409 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5411 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5413 /* Write 1 to parser credits for CFC search request */
5414 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5416 /* Wait until PRS register shows 3 packets */
5417 msleep(10 * factor);
5418 /* Wait until NIG register shows 1 packet of size 0x10 */
5419 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5423 /* clear NIG EOP FIFO */
5424 for (i = 0; i < 11; i++)
5425 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5426 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5428 BNX2X_ERR("clear of NIG failed\n");
5432 /* Reset and init BRB, PRS, NIG */
5433 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5435 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5437 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5438 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5441 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5444 /* Enable inputs of parser neighbor blocks */
5445 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5446 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5447 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5448 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5450 DP(NETIF_MSG_HW, "done\n");
5455 static void enable_blocks_attention(struct bnx2x *bp)
5457 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5458 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5459 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5460 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5461 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5462 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5463 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5464 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5465 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5466 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5467 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5468 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5469 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5470 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5471 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5472 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5473 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5474 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5475 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5476 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5477 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5478 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5479 if (CHIP_REV_IS_FPGA(bp))
5480 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5482 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5483 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5484 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5485 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5486 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5487 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5488 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5489 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5490 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5491 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5495 static void bnx2x_reset_common(struct bnx2x *bp)
5498 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5503 static int bnx2x_init_common(struct bnx2x *bp)
5507 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5509 bnx2x_reset_common(bp);
5510 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5511 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5513 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5514 if (CHIP_IS_E1H(bp))
5515 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5517 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5519 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5521 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5522 if (CHIP_IS_E1(bp)) {
5523 /* enable HW interrupt from PXP on USDM overflow
5524 bit 16 on INT_MASK_0 */
5525 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5528 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5532 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5535 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5536 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5537 /* make sure this value is 0 */
5538 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5540 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5541 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5544 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5547 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5549 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5550 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5551 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5554 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5555 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5557 /* let the HW do it's magic ... */
5559 /* finish PXP init */
5560 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5562 BNX2X_ERR("PXP2 CFG failed\n");
5565 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5567 BNX2X_ERR("PXP2 RD_INIT failed\n");
5571 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5572 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5574 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5576 /* clean the DMAE memory */
5578 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5580 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5581 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5582 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5583 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5585 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5587 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5588 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5590 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5591 /* soft reset pulse */
5592 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5593 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5596 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5599 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5600 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5601 if (!CHIP_REV_IS_SLOW(bp)) {
5602 /* enable hw interrupt from doorbell Q */
5603 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5606 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5607 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5608 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5610 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5611 if (CHIP_IS_E1H(bp))
5612 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5614 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5615 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5616 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5617 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5619 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5622 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5624 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5625 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5626 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5627 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5630 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5632 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5635 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5636 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5637 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5639 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5640 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5641 REG_WR(bp, i, 0xc0cac01a);
5642 /* TODO: replace with something meaningful */
5644 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5645 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5647 if (sizeof(union cdu_context) != 1024)
5648 /* we currently assume that a context is 1024 bytes */
5649 printk(KERN_ALERT PFX "please adjust the size of"
5650 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5652 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5653 val = (4 << 24) + (0 << 12) + 1024;
5654 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5655 if (CHIP_IS_E1(bp)) {
5656 /* !!! fix pxp client crdit until excel update */
5657 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5658 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5661 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5662 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5663 /* enable context validation interrupt from CFC */
5664 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5666 /* set the thresholds to prevent CFC/CDU race */
5667 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5669 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5670 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5672 /* PXPCS COMMON comes here */
5673 /* Reset PCIE errors for debug */
5674 REG_WR(bp, 0x2814, 0xffffffff);
5675 REG_WR(bp, 0x3820, 0xffffffff);
5677 /* EMAC0 COMMON comes here */
5678 /* EMAC1 COMMON comes here */
5679 /* DBU COMMON comes here */
5680 /* DBG COMMON comes here */
5682 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5683 if (CHIP_IS_E1H(bp)) {
5684 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5685 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5688 if (CHIP_REV_IS_SLOW(bp))
5691 /* finish CFC init */
5692 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5694 BNX2X_ERR("CFC LL_INIT failed\n");
5697 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5699 BNX2X_ERR("CFC AC_INIT failed\n");
5702 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5704 BNX2X_ERR("CFC CAM_INIT failed\n");
5707 REG_WR(bp, CFC_REG_DEBUG0, 0);
5709 /* read NIG statistic
5710 to see if this is our first up since powerup */
5711 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5712 val = *bnx2x_sp(bp, wb_data[0]);
5714 /* do internal memory self test */
5715 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5716 BNX2X_ERR("internal mem self test failed\n");
5720 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5724 bp->port.need_hw_lock = 1;
5727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5728 /* Fan failure is indicated by SPIO 5 */
5729 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5730 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5732 /* set to active low mode */
5733 val = REG_RD(bp, MISC_REG_SPIO_INT);
5734 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5735 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5736 REG_WR(bp, MISC_REG_SPIO_INT, val);
5738 /* enable interrupt to signal the IGU */
5739 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5740 val |= (1 << MISC_REGISTERS_SPIO_5);
5741 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5748 /* clear PXP2 attentions */
5749 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5751 enable_blocks_attention(bp);
5753 if (!BP_NOMCP(bp)) {
5754 bnx2x_acquire_phy_lock(bp);
5755 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5756 bnx2x_release_phy_lock(bp);
5758 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5763 static int bnx2x_init_port(struct bnx2x *bp)
5765 int port = BP_PORT(bp);
5769 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5771 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5773 /* Port PXP comes here */
5774 /* Port PXP2 comes here */
5779 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5780 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5781 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5782 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5787 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5788 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5789 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5790 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5795 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5800 /* Port CMs come here */
5801 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5802 (port ? XCM_PORT1_END : XCM_PORT0_END));
5804 /* Port QM comes here */
5806 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5807 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5809 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5810 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5812 /* Port DQ comes here */
5814 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5815 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5816 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5817 /* no pause for emulation and FPGA */
5822 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5823 else if (bp->dev->mtu > 4096) {
5824 if (bp->flags & ONE_PORT_FLAG)
5828 /* (24*1024 + val*4)/256 */
5829 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5832 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5833 high = low + 56; /* 14*1024/256 */
5835 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5836 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5839 /* Port PRS comes here */
5840 /* Port TSDM comes here */
5841 /* Port CSDM comes here */
5842 /* Port USDM comes here */
5843 /* Port XSDM comes here */
5845 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5846 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5847 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5848 port ? USEM_PORT1_END : USEM_PORT0_END);
5849 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5850 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5851 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5852 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5854 /* Port UPB comes here */
5855 /* Port XPB comes here */
5857 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5858 port ? PBF_PORT1_END : PBF_PORT0_END);
5860 /* configure PBF to work without PAUSE mtu 9000 */
5861 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5863 /* update threshold */
5864 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5865 /* update init credit */
5866 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5869 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5871 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5874 /* tell the searcher where the T2 table is */
5875 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5877 wb_write[0] = U64_LO(bp->t2_mapping);
5878 wb_write[1] = U64_HI(bp->t2_mapping);
5879 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5880 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5881 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5882 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5884 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5885 /* Port SRCH comes here */
5887 /* Port CDU comes here */
5888 /* Port CFC comes here */
5890 if (CHIP_IS_E1(bp)) {
5891 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5892 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5894 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5895 port ? HC_PORT1_END : HC_PORT0_END);
5897 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5898 MISC_AEU_PORT0_START,
5899 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5900 /* init aeu_mask_attn_func_0/1:
5901 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5902 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5903 * bits 4-7 are used for "per vn group attention" */
5904 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5905 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5907 /* Port PXPCS comes here */
5908 /* Port EMAC0 comes here */
5909 /* Port EMAC1 comes here */
5910 /* Port DBU comes here */
5911 /* Port DBG comes here */
5913 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5914 port ? NIG_PORT1_END : NIG_PORT0_END);
5916 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5918 if (CHIP_IS_E1H(bp)) {
5919 /* 0x2 disable e1hov, 0x1 enable */
5920 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5921 (IS_E1HMF(bp) ? 0x1 : 0x2));
5923 /* support pause requests from USDM, TSDM and BRB */
5924 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5927 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5928 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5929 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5933 /* Port MCP comes here */
5934 /* Port DMAE comes here */
5936 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5939 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5941 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5942 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5944 /* The GPIO should be swapped if the swap register is
5946 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5947 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5949 /* Select function upon port-swap configuration */
5951 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5952 aeu_gpio_mask = (swap_val && swap_override) ?
5953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5954 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5956 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5957 aeu_gpio_mask = (swap_val && swap_override) ?
5958 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5959 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5961 val = REG_RD(bp, offset);
5962 /* add GPIO3 to group */
5963 val |= aeu_gpio_mask;
5964 REG_WR(bp, offset, val);
5968 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5969 /* add SPIO 5 to group 0 */
5970 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5971 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5972 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5979 bnx2x__link_reset(bp);
5984 #define ILT_PER_FUNC (768/2)
5985 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5986 /* the phys address is shifted right 12 bits and has an added
5987 1=valid bit added to the 53rd bit
5988 then since this is a wide register(TM)
5989 we split it into two 32 bit writes
5991 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5992 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5993 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5994 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5996 #define CNIC_ILT_LINES 0
5998 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6002 if (CHIP_IS_E1H(bp))
6003 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6005 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6007 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6010 static int bnx2x_init_func(struct bnx2x *bp)
6012 int port = BP_PORT(bp);
6013 int func = BP_FUNC(bp);
6017 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6019 /* set MSI reconfigure capability */
6020 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6021 val = REG_RD(bp, addr);
6022 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6023 REG_WR(bp, addr, val);
6025 i = FUNC_ILT_BASE(func);
6027 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6028 if (CHIP_IS_E1H(bp)) {
6029 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6030 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6032 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6033 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6036 if (CHIP_IS_E1H(bp)) {
6037 for (i = 0; i < 9; i++)
6038 bnx2x_init_block(bp,
6039 cm_start[func][i], cm_end[func][i]);
6041 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6042 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6045 /* HC init per function */
6046 if (CHIP_IS_E1H(bp)) {
6047 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6049 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6050 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6052 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6054 /* Reset PCIE errors for debug */
6055 REG_WR(bp, 0x2114, 0xffffffff);
6056 REG_WR(bp, 0x2120, 0xffffffff);
6061 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6065 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6066 BP_FUNC(bp), load_code);
6069 mutex_init(&bp->dmae_mutex);
6070 bnx2x_gunzip_init(bp);
6072 switch (load_code) {
6073 case FW_MSG_CODE_DRV_LOAD_COMMON:
6074 rc = bnx2x_init_common(bp);
6079 case FW_MSG_CODE_DRV_LOAD_PORT:
6081 rc = bnx2x_init_port(bp);
6086 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6088 rc = bnx2x_init_func(bp);
6094 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6098 if (!BP_NOMCP(bp)) {
6099 int func = BP_FUNC(bp);
6101 bp->fw_drv_pulse_wr_seq =
6102 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6103 DRV_PULSE_SEQ_MASK);
6104 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6105 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6106 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6110 /* this needs to be done before gunzip end */
6111 bnx2x_zero_def_sb(bp);
6112 for_each_queue(bp, i)
6113 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6116 bnx2x_gunzip_end(bp);
6121 /* send the MCP a request, block until there is a reply */
6122 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6124 int func = BP_FUNC(bp);
6125 u32 seq = ++bp->fw_seq;
6128 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6130 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6131 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6134 /* let the FW do it's magic ... */
6137 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6139 /* Give the FW up to 2 second (200*10ms) */
6140 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6142 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6143 cnt*delay, rc, seq);
6145 /* is this a reply to our command? */
6146 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6147 rc &= FW_MSG_CODE_MASK;
6151 BNX2X_ERR("FW failed to respond!\n");
6159 static void bnx2x_free_mem(struct bnx2x *bp)
6162 #define BNX2X_PCI_FREE(x, y, size) \
6165 pci_free_consistent(bp->pdev, size, x, y); \
6171 #define BNX2X_FREE(x) \
6183 for_each_queue(bp, i) {
6186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6187 bnx2x_fp(bp, i, status_blk_mapping),
6188 sizeof(struct host_status_block) +
6189 sizeof(struct eth_tx_db_data));
6192 for_each_rx_queue(bp, i) {
6194 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6195 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6196 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6197 bnx2x_fp(bp, i, rx_desc_mapping),
6198 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6200 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6201 bnx2x_fp(bp, i, rx_comp_mapping),
6202 sizeof(struct eth_fast_path_rx_cqe) *
6206 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6207 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6208 bnx2x_fp(bp, i, rx_sge_mapping),
6209 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6212 for_each_tx_queue(bp, i) {
6214 /* fastpath tx rings: tx_buf tx_desc */
6215 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6216 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6217 bnx2x_fp(bp, i, tx_desc_mapping),
6218 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6220 /* end of fastpath */
6222 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6223 sizeof(struct host_def_status_block));
6225 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6226 sizeof(struct bnx2x_slowpath));
6229 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6230 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6231 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6232 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6234 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6236 #undef BNX2X_PCI_FREE
6240 static int bnx2x_alloc_mem(struct bnx2x *bp)
6243 #define BNX2X_PCI_ALLOC(x, y, size) \
6245 x = pci_alloc_consistent(bp->pdev, size, y); \
6247 goto alloc_mem_err; \
6248 memset(x, 0, size); \
6251 #define BNX2X_ALLOC(x, size) \
6253 x = vmalloc(size); \
6255 goto alloc_mem_err; \
6256 memset(x, 0, size); \
6263 for_each_queue(bp, i) {
6264 bnx2x_fp(bp, i, bp) = bp;
6267 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6268 &bnx2x_fp(bp, i, status_blk_mapping),
6269 sizeof(struct host_status_block) +
6270 sizeof(struct eth_tx_db_data));
6273 for_each_rx_queue(bp, i) {
6275 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6276 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6277 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6278 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6279 &bnx2x_fp(bp, i, rx_desc_mapping),
6280 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6282 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6283 &bnx2x_fp(bp, i, rx_comp_mapping),
6284 sizeof(struct eth_fast_path_rx_cqe) *
6288 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6289 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6291 &bnx2x_fp(bp, i, rx_sge_mapping),
6292 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6295 for_each_tx_queue(bp, i) {
6297 bnx2x_fp(bp, i, hw_tx_prods) =
6298 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6300 bnx2x_fp(bp, i, tx_prods_mapping) =
6301 bnx2x_fp(bp, i, status_blk_mapping) +
6302 sizeof(struct host_status_block);
6304 /* fastpath tx rings: tx_buf tx_desc */
6305 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6306 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6308 &bnx2x_fp(bp, i, tx_desc_mapping),
6309 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6311 /* end of fastpath */
6313 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6314 sizeof(struct host_def_status_block));
6316 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6317 sizeof(struct bnx2x_slowpath));
6320 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6323 for (i = 0; i < 64*1024; i += 64) {
6324 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6325 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6328 /* allocate searcher T2 table
6329 we allocate 1/4 of alloc num for T2
6330 (which is not entered into the ILT) */
6331 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6334 for (i = 0; i < 16*1024; i += 64)
6335 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6337 /* now fixup the last line in the block to point to the next block */
6338 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6340 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6341 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6343 /* QM queues (128*MAX_CONN) */
6344 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6347 /* Slow path ring */
6348 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6356 #undef BNX2X_PCI_ALLOC
6360 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6364 for_each_tx_queue(bp, i) {
6365 struct bnx2x_fastpath *fp = &bp->fp[i];
6367 u16 bd_cons = fp->tx_bd_cons;
6368 u16 sw_prod = fp->tx_pkt_prod;
6369 u16 sw_cons = fp->tx_pkt_cons;
6371 while (sw_cons != sw_prod) {
6372 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6378 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6382 for_each_rx_queue(bp, j) {
6383 struct bnx2x_fastpath *fp = &bp->fp[j];
6385 for (i = 0; i < NUM_RX_BD; i++) {
6386 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6387 struct sk_buff *skb = rx_buf->skb;
6392 pci_unmap_single(bp->pdev,
6393 pci_unmap_addr(rx_buf, mapping),
6394 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6399 if (!fp->disable_tpa)
6400 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6401 ETH_MAX_AGGREGATION_QUEUES_E1 :
6402 ETH_MAX_AGGREGATION_QUEUES_E1H);
6406 static void bnx2x_free_skbs(struct bnx2x *bp)
6408 bnx2x_free_tx_skbs(bp);
6409 bnx2x_free_rx_skbs(bp);
6412 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6416 free_irq(bp->msix_table[0].vector, bp->dev);
6417 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6418 bp->msix_table[0].vector);
6420 for_each_queue(bp, i) {
6421 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6422 "state %x\n", i, bp->msix_table[i + offset].vector,
6423 bnx2x_fp(bp, i, state));
6425 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6429 static void bnx2x_free_irq(struct bnx2x *bp)
6431 if (bp->flags & USING_MSIX_FLAG) {
6432 bnx2x_free_msix_irqs(bp);
6433 pci_disable_msix(bp->pdev);
6434 bp->flags &= ~USING_MSIX_FLAG;
6436 } else if (bp->flags & USING_MSI_FLAG) {
6437 free_irq(bp->pdev->irq, bp->dev);
6438 pci_disable_msi(bp->pdev);
6439 bp->flags &= ~USING_MSI_FLAG;
6442 free_irq(bp->pdev->irq, bp->dev);
6445 static int bnx2x_enable_msix(struct bnx2x *bp)
6447 int i, rc, offset = 1;
6450 bp->msix_table[0].entry = igu_vec;
6451 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6453 for_each_queue(bp, i) {
6454 igu_vec = BP_L_ID(bp) + offset + i;
6455 bp->msix_table[i + offset].entry = igu_vec;
6456 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6457 "(fastpath #%u)\n", i + offset, igu_vec, i);
6460 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6461 BNX2X_NUM_QUEUES(bp) + offset);
6463 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6467 bp->flags |= USING_MSIX_FLAG;
6472 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6474 int i, rc, offset = 1;
6476 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6477 bp->dev->name, bp->dev);
6479 BNX2X_ERR("request sp irq failed\n");
6483 for_each_queue(bp, i) {
6484 struct bnx2x_fastpath *fp = &bp->fp[i];
6486 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6487 rc = request_irq(bp->msix_table[i + offset].vector,
6488 bnx2x_msix_fp_int, 0, fp->name, fp);
6490 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6491 bnx2x_free_msix_irqs(bp);
6495 fp->state = BNX2X_FP_STATE_IRQ;
6498 i = BNX2X_NUM_QUEUES(bp);
6500 printk(KERN_INFO PFX
6501 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6502 bp->dev->name, bp->msix_table[0].vector,
6503 bp->msix_table[offset].vector,
6504 bp->msix_table[offset + i - 1].vector);
6506 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6507 bp->dev->name, bp->msix_table[0].vector,
6508 bp->msix_table[offset + i - 1].vector);
6513 static int bnx2x_enable_msi(struct bnx2x *bp)
6517 rc = pci_enable_msi(bp->pdev);
6519 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6522 bp->flags |= USING_MSI_FLAG;
6527 static int bnx2x_req_irq(struct bnx2x *bp)
6529 unsigned long flags;
6532 if (bp->flags & USING_MSI_FLAG)
6535 flags = IRQF_SHARED;
6537 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6538 bp->dev->name, bp->dev);
6540 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6545 static void bnx2x_napi_enable(struct bnx2x *bp)
6549 for_each_rx_queue(bp, i)
6550 napi_enable(&bnx2x_fp(bp, i, napi));
6553 static void bnx2x_napi_disable(struct bnx2x *bp)
6557 for_each_rx_queue(bp, i)
6558 napi_disable(&bnx2x_fp(bp, i, napi));
6561 static void bnx2x_netif_start(struct bnx2x *bp)
6563 if (atomic_dec_and_test(&bp->intr_sem)) {
6564 if (netif_running(bp->dev)) {
6565 bnx2x_napi_enable(bp);
6566 bnx2x_int_enable(bp);
6567 if (bp->state == BNX2X_STATE_OPEN)
6568 netif_tx_wake_all_queues(bp->dev);
6573 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6575 bnx2x_int_disable_sync(bp, disable_hw);
6576 bnx2x_napi_disable(bp);
6577 netif_tx_disable(bp->dev);
6578 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6582 * Init service functions
6585 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6587 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6588 int port = BP_PORT(bp);
6591 * unicasts 0-31:port0 32-63:port1
6592 * multicast 64-127:port0 128-191:port1
6594 config->hdr.length = 2;
6595 config->hdr.offset = port ? 32 : 0;
6596 config->hdr.client_id = bp->fp->cl_id;
6597 config->hdr.reserved1 = 0;
6600 config->config_table[0].cam_entry.msb_mac_addr =
6601 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6602 config->config_table[0].cam_entry.middle_mac_addr =
6603 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6604 config->config_table[0].cam_entry.lsb_mac_addr =
6605 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6606 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6608 config->config_table[0].target_table_entry.flags = 0;
6610 CAM_INVALIDATE(config->config_table[0]);
6611 config->config_table[0].target_table_entry.client_id = 0;
6612 config->config_table[0].target_table_entry.vlan_id = 0;
6614 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6615 (set ? "setting" : "clearing"),
6616 config->config_table[0].cam_entry.msb_mac_addr,
6617 config->config_table[0].cam_entry.middle_mac_addr,
6618 config->config_table[0].cam_entry.lsb_mac_addr);
6621 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6622 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6623 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6624 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6626 config->config_table[1].target_table_entry.flags =
6627 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6629 CAM_INVALIDATE(config->config_table[1]);
6630 config->config_table[1].target_table_entry.client_id = 0;
6631 config->config_table[1].target_table_entry.vlan_id = 0;
6633 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6634 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6635 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6638 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6640 struct mac_configuration_cmd_e1h *config =
6641 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6643 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6644 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6648 /* CAM allocation for E1H
6649 * unicasts: by func number
6650 * multicast: 20+FUNC*20, 20 each
6652 config->hdr.length = 1;
6653 config->hdr.offset = BP_FUNC(bp);
6654 config->hdr.client_id = bp->fp->cl_id;
6655 config->hdr.reserved1 = 0;
6658 config->config_table[0].msb_mac_addr =
6659 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6660 config->config_table[0].middle_mac_addr =
6661 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6662 config->config_table[0].lsb_mac_addr =
6663 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6664 config->config_table[0].client_id = BP_L_ID(bp);
6665 config->config_table[0].vlan_id = 0;
6666 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6668 config->config_table[0].flags = BP_PORT(bp);
6670 config->config_table[0].flags =
6671 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6673 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6674 (set ? "setting" : "clearing"),
6675 config->config_table[0].msb_mac_addr,
6676 config->config_table[0].middle_mac_addr,
6677 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6679 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6680 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6681 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6684 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6685 int *state_p, int poll)
6687 /* can take a while if any port is running */
6690 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6691 poll ? "polling" : "waiting", state, idx);
6696 bnx2x_rx_int(bp->fp, 10);
6697 /* if index is different from 0
6698 * the reply for some commands will
6699 * be on the non default queue
6702 bnx2x_rx_int(&bp->fp[idx], 10);
6705 mb(); /* state is changed by bnx2x_sp_event() */
6706 if (*state_p == state) {
6707 #ifdef BNX2X_STOP_ON_ERROR
6708 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6717 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6718 poll ? "polling" : "waiting", state, idx);
6719 #ifdef BNX2X_STOP_ON_ERROR
6726 static int bnx2x_setup_leading(struct bnx2x *bp)
6730 /* reset IGU state */
6731 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6734 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6736 /* Wait for completion */
6737 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6742 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6744 struct bnx2x_fastpath *fp = &bp->fp[index];
6746 /* reset IGU state */
6747 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6750 fp->state = BNX2X_FP_STATE_OPENING;
6751 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6754 /* Wait for completion */
6755 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6759 static int bnx2x_poll(struct napi_struct *napi, int budget);
6761 static void bnx2x_set_int_mode(struct bnx2x *bp)
6769 bp->num_rx_queues = num_queues;
6770 bp->num_tx_queues = num_queues;
6772 "set number of queues to %d\n", num_queues);
6777 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6778 num_queues = min_t(u32, num_online_cpus(),
6779 BNX2X_MAX_QUEUES(bp));
6782 bp->num_rx_queues = num_queues;
6783 bp->num_tx_queues = num_queues;
6784 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6785 " number of tx queues to %d\n",
6786 bp->num_rx_queues, bp->num_tx_queues);
6787 /* if we can't use MSI-X we only need one fp,
6788 * so try to enable MSI-X with the requested number of fp's
6789 * and fallback to MSI or legacy INTx with one fp
6791 if (bnx2x_enable_msix(bp)) {
6792 /* failed to enable MSI-X */
6794 bp->num_rx_queues = num_queues;
6795 bp->num_tx_queues = num_queues;
6797 BNX2X_ERR("Multi requested but failed to "
6798 "enable MSI-X set number of "
6799 "queues to %d\n", num_queues);
6803 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6806 static void bnx2x_set_rx_mode(struct net_device *dev);
6808 /* must be called with rtnl_lock */
6809 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6813 #ifdef BNX2X_STOP_ON_ERROR
6814 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6815 if (unlikely(bp->panic))
6819 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6821 bnx2x_set_int_mode(bp);
6823 if (bnx2x_alloc_mem(bp))
6826 for_each_rx_queue(bp, i)
6827 bnx2x_fp(bp, i, disable_tpa) =
6828 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6830 for_each_rx_queue(bp, i)
6831 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6834 #ifdef BNX2X_STOP_ON_ERROR
6835 for_each_rx_queue(bp, i) {
6836 struct bnx2x_fastpath *fp = &bp->fp[i];
6838 fp->poll_no_work = 0;
6840 fp->poll_max_calls = 0;
6841 fp->poll_complete = 0;
6845 bnx2x_napi_enable(bp);
6847 if (bp->flags & USING_MSIX_FLAG) {
6848 rc = bnx2x_req_msix_irqs(bp);
6850 pci_disable_msix(bp->pdev);
6854 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6855 bnx2x_enable_msi(bp);
6857 rc = bnx2x_req_irq(bp);
6859 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6860 if (bp->flags & USING_MSI_FLAG)
6861 pci_disable_msi(bp->pdev);
6864 if (bp->flags & USING_MSI_FLAG) {
6865 bp->dev->irq = bp->pdev->irq;
6866 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6867 bp->dev->name, bp->pdev->irq);
6871 /* Send LOAD_REQUEST command to MCP
6872 Returns the type of LOAD command:
6873 if it is the first port to be initialized
6874 common blocks should be initialized, otherwise - not
6876 if (!BP_NOMCP(bp)) {
6877 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6879 BNX2X_ERR("MCP response failure, aborting\n");
6883 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6884 rc = -EBUSY; /* other port in diagnostic mode */
6889 int port = BP_PORT(bp);
6891 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6892 load_count[0], load_count[1], load_count[2]);
6894 load_count[1 + port]++;
6895 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6896 load_count[0], load_count[1], load_count[2]);
6897 if (load_count[0] == 1)
6898 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6899 else if (load_count[1 + port] == 1)
6900 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6902 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6905 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6906 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6910 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6913 rc = bnx2x_init_hw(bp, load_code);
6915 BNX2X_ERR("HW init failed, aborting\n");
6919 /* Setup NIC internals and enable interrupts */
6920 bnx2x_nic_init(bp, load_code);
6922 /* Send LOAD_DONE command to MCP */
6923 if (!BP_NOMCP(bp)) {
6924 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6926 BNX2X_ERR("MCP response failure, aborting\n");
6932 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6934 rc = bnx2x_setup_leading(bp);
6936 BNX2X_ERR("Setup leading failed!\n");
6940 if (CHIP_IS_E1H(bp))
6941 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6942 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6943 bp->state = BNX2X_STATE_DISABLED;
6946 if (bp->state == BNX2X_STATE_OPEN)
6947 for_each_nondefault_queue(bp, i) {
6948 rc = bnx2x_setup_multi(bp, i);
6954 bnx2x_set_mac_addr_e1(bp, 1);
6956 bnx2x_set_mac_addr_e1h(bp, 1);
6959 bnx2x_initial_phy_init(bp, load_mode);
6961 /* Start fast path */
6962 switch (load_mode) {
6964 /* Tx queue should be only reenabled */
6965 netif_tx_wake_all_queues(bp->dev);
6966 /* Initialize the receive filter. */
6967 bnx2x_set_rx_mode(bp->dev);
6971 netif_tx_start_all_queues(bp->dev);
6972 /* Initialize the receive filter. */
6973 bnx2x_set_rx_mode(bp->dev);
6977 /* Initialize the receive filter. */
6978 bnx2x_set_rx_mode(bp->dev);
6979 bp->state = BNX2X_STATE_DIAG;
6987 bnx2x__link_status_update(bp);
6989 /* start the timer */
6990 mod_timer(&bp->timer, jiffies + bp->current_interval);
6996 bnx2x_int_disable_sync(bp, 1);
6997 if (!BP_NOMCP(bp)) {
6998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6999 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7002 /* Free SKBs, SGEs, TPA pool and driver internals */
7003 bnx2x_free_skbs(bp);
7004 for_each_rx_queue(bp, i)
7005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7010 bnx2x_napi_disable(bp);
7011 for_each_rx_queue(bp, i)
7012 netif_napi_del(&bnx2x_fp(bp, i, napi));
7018 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7020 struct bnx2x_fastpath *fp = &bp->fp[index];
7023 /* halt the connection */
7024 fp->state = BNX2X_FP_STATE_HALTING;
7025 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7027 /* Wait for completion */
7028 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7030 if (rc) /* timeout */
7033 /* delete cfc entry */
7034 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7036 /* Wait for completion */
7037 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7042 static int bnx2x_stop_leading(struct bnx2x *bp)
7044 __le16 dsb_sp_prod_idx;
7045 /* if the other port is handling traffic,
7046 this can take a lot of time */
7052 /* Send HALT ramrod */
7053 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7054 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7056 /* Wait for completion */
7057 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7058 &(bp->fp[0].state), 1);
7059 if (rc) /* timeout */
7062 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7064 /* Send PORT_DELETE ramrod */
7065 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7067 /* Wait for completion to arrive on default status block
7068 we are going to reset the chip anyway
7069 so there is not much to do if this times out
7071 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7073 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7074 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7075 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7076 #ifdef BNX2X_STOP_ON_ERROR
7084 rmb(); /* Refresh the dsb_sp_prod */
7086 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7087 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7092 static void bnx2x_reset_func(struct bnx2x *bp)
7094 int port = BP_PORT(bp);
7095 int func = BP_FUNC(bp);
7099 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7100 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7103 base = FUNC_ILT_BASE(func);
7104 for (i = base; i < base + ILT_PER_FUNC; i++)
7105 bnx2x_ilt_wr(bp, i, 0);
7108 static void bnx2x_reset_port(struct bnx2x *bp)
7110 int port = BP_PORT(bp);
7113 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7115 /* Do not rcv packets to BRB */
7116 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7117 /* Do not direct rcv packets that are not for MCP to the BRB */
7118 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7119 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7122 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7125 /* Check for BRB port occupancy */
7126 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7128 DP(NETIF_MSG_IFDOWN,
7129 "BRB1 is not empty %d blocks are occupied\n", val);
7131 /* TODO: Close Doorbell port? */
7134 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7136 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7137 BP_FUNC(bp), reset_code);
7139 switch (reset_code) {
7140 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7141 bnx2x_reset_port(bp);
7142 bnx2x_reset_func(bp);
7143 bnx2x_reset_common(bp);
7146 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7147 bnx2x_reset_port(bp);
7148 bnx2x_reset_func(bp);
7151 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7152 bnx2x_reset_func(bp);
7156 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7161 /* must be called with rtnl_lock */
7162 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7164 int port = BP_PORT(bp);
7168 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7170 bp->rx_mode = BNX2X_RX_MODE_NONE;
7171 bnx2x_set_storm_rx_mode(bp);
7173 bnx2x_netif_stop(bp, 1);
7175 del_timer_sync(&bp->timer);
7176 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7177 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7178 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7183 /* Wait until tx fastpath tasks complete */
7184 for_each_tx_queue(bp, i) {
7185 struct bnx2x_fastpath *fp = &bp->fp[i];
7188 while (bnx2x_has_tx_work_unload(fp)) {
7192 BNX2X_ERR("timeout waiting for queue[%d]\n",
7194 #ifdef BNX2X_STOP_ON_ERROR
7205 /* Give HW time to discard old tx messages */
7208 if (CHIP_IS_E1(bp)) {
7209 struct mac_configuration_cmd *config =
7210 bnx2x_sp(bp, mcast_config);
7212 bnx2x_set_mac_addr_e1(bp, 0);
7214 for (i = 0; i < config->hdr.length; i++)
7215 CAM_INVALIDATE(config->config_table[i]);
7217 config->hdr.length = i;
7218 if (CHIP_REV_IS_SLOW(bp))
7219 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7221 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7222 config->hdr.client_id = bp->fp->cl_id;
7223 config->hdr.reserved1 = 0;
7225 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7226 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7227 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7230 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7232 bnx2x_set_mac_addr_e1h(bp, 0);
7234 for (i = 0; i < MC_HASH_SIZE; i++)
7235 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7238 if (unload_mode == UNLOAD_NORMAL)
7239 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7241 else if (bp->flags & NO_WOL_FLAG) {
7242 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7243 if (CHIP_IS_E1H(bp))
7244 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7246 } else if (bp->wol) {
7247 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7248 u8 *mac_addr = bp->dev->dev_addr;
7250 /* The mac address is written to entries 1-4 to
7251 preserve entry 0 which is used by the PMF */
7252 u8 entry = (BP_E1HVN(bp) + 1)*8;
7254 val = (mac_addr[0] << 8) | mac_addr[1];
7255 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7257 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7258 (mac_addr[4] << 8) | mac_addr[5];
7259 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7261 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7266 /* Close multi and leading connections
7267 Completions for ramrods are collected in a synchronous way */
7268 for_each_nondefault_queue(bp, i)
7269 if (bnx2x_stop_multi(bp, i))
7272 rc = bnx2x_stop_leading(bp);
7274 BNX2X_ERR("Stop leading failed!\n");
7275 #ifdef BNX2X_STOP_ON_ERROR
7284 reset_code = bnx2x_fw_command(bp, reset_code);
7286 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7287 load_count[0], load_count[1], load_count[2]);
7289 load_count[1 + port]--;
7290 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7291 load_count[0], load_count[1], load_count[2]);
7292 if (load_count[0] == 0)
7293 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7294 else if (load_count[1 + port] == 0)
7295 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7297 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7300 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7301 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7302 bnx2x__link_reset(bp);
7304 /* Reset the chip */
7305 bnx2x_reset_chip(bp, reset_code);
7307 /* Report UNLOAD_DONE to MCP */
7309 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7313 /* Free SKBs, SGEs, TPA pool and driver internals */
7314 bnx2x_free_skbs(bp);
7315 for_each_rx_queue(bp, i)
7316 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7317 for_each_rx_queue(bp, i)
7318 netif_napi_del(&bnx2x_fp(bp, i, napi));
7321 bp->state = BNX2X_STATE_CLOSED;
7323 netif_carrier_off(bp->dev);
7328 static void bnx2x_reset_task(struct work_struct *work)
7330 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7332 #ifdef BNX2X_STOP_ON_ERROR
7333 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7334 " so reset not done to allow debug dump,\n"
7335 KERN_ERR " you will need to reboot when done\n");
7341 if (!netif_running(bp->dev))
7342 goto reset_task_exit;
7344 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7345 bnx2x_nic_load(bp, LOAD_NORMAL);
7351 /* end of nic load/unload */
7356 * Init service functions
7359 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7362 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7363 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7364 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7365 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7366 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7367 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7368 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7369 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7371 BNX2X_ERR("Unsupported function index: %d\n", func);
7376 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7378 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7380 /* Flush all outstanding writes */
7383 /* Pretend to be function 0 */
7385 /* Flush the GRC transaction (in the chip) */
7386 new_val = REG_RD(bp, reg);
7388 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7393 /* From now we are in the "like-E1" mode */
7394 bnx2x_int_disable(bp);
7396 /* Flush all outstanding writes */
7399 /* Restore the original funtion settings */
7400 REG_WR(bp, reg, orig_func);
7401 new_val = REG_RD(bp, reg);
7402 if (new_val != orig_func) {
7403 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7404 orig_func, new_val);
7409 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7411 if (CHIP_IS_E1H(bp))
7412 bnx2x_undi_int_disable_e1h(bp, func);
7414 bnx2x_int_disable(bp);
7417 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7421 /* Check if there is any driver already loaded */
7422 val = REG_RD(bp, MISC_REG_UNPREPARED);
7424 /* Check if it is the UNDI driver
7425 * UNDI driver initializes CID offset for normal bell to 0x7
7427 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7428 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7430 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7432 int func = BP_FUNC(bp);
7436 /* clear the UNDI indication */
7437 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7439 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7441 /* try unload UNDI on port 0 */
7444 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7445 DRV_MSG_SEQ_NUMBER_MASK);
7446 reset_code = bnx2x_fw_command(bp, reset_code);
7448 /* if UNDI is loaded on the other port */
7449 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7451 /* send "DONE" for previous unload */
7452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454 /* unload UNDI on port 1 */
7457 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7458 DRV_MSG_SEQ_NUMBER_MASK);
7459 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7461 bnx2x_fw_command(bp, reset_code);
7464 /* now it's safe to release the lock */
7465 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7467 bnx2x_undi_int_disable(bp, func);
7469 /* close input traffic and wait for it */
7470 /* Do not rcv packets to BRB */
7472 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7473 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7474 /* Do not direct rcv packets that are not for MCP to
7477 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7478 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7481 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7482 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7485 /* save NIG port swap info */
7486 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7487 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7490 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7493 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7495 /* take the NIG out of reset and restore swap values */
7497 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7498 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7499 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7500 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7502 /* send unload done to the MCP */
7503 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7505 /* restore our func and fw_seq */
7508 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7509 DRV_MSG_SEQ_NUMBER_MASK);
7512 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7516 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7518 u32 val, val2, val3, val4, id;
7521 /* Get the chip revision id and number. */
7522 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7523 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7524 id = ((val & 0xffff) << 16);
7525 val = REG_RD(bp, MISC_REG_CHIP_REV);
7526 id |= ((val & 0xf) << 12);
7527 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7528 id |= ((val & 0xff) << 4);
7529 val = REG_RD(bp, MISC_REG_BOND_ID);
7531 bp->common.chip_id = id;
7532 bp->link_params.chip_id = bp->common.chip_id;
7533 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7535 val = (REG_RD(bp, 0x2874) & 0x55);
7536 if ((bp->common.chip_id & 0x1) ||
7537 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7538 bp->flags |= ONE_PORT_FLAG;
7539 BNX2X_DEV_INFO("single port device\n");
7542 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7543 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7544 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7545 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7546 bp->common.flash_size, bp->common.flash_size);
7548 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7549 bp->link_params.shmem_base = bp->common.shmem_base;
7550 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7552 if (!bp->common.shmem_base ||
7553 (bp->common.shmem_base < 0xA0000) ||
7554 (bp->common.shmem_base >= 0xC0000)) {
7555 BNX2X_DEV_INFO("MCP not active\n");
7556 bp->flags |= NO_MCP_FLAG;
7560 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7561 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7562 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7563 BNX2X_ERR("BAD MCP validity signature\n");
7565 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7566 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7568 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7569 SHARED_HW_CFG_LED_MODE_MASK) >>
7570 SHARED_HW_CFG_LED_MODE_SHIFT);
7572 bp->link_params.feature_config_flags = 0;
7573 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7574 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7575 bp->link_params.feature_config_flags |=
7576 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7578 bp->link_params.feature_config_flags &=
7579 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7581 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7582 bp->common.bc_ver = val;
7583 BNX2X_DEV_INFO("bc_ver %X\n", val);
7584 if (val < BNX2X_BC_VER) {
7585 /* for now only warn
7586 * later we might need to enforce this */
7587 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7588 " please upgrade BC\n", BNX2X_BC_VER, val);
7591 if (BP_E1HVN(bp) == 0) {
7592 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7593 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7595 /* no WOL capability for E1HVN != 0 */
7596 bp->flags |= NO_WOL_FLAG;
7598 BNX2X_DEV_INFO("%sWoL capable\n",
7599 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7601 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7602 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7603 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7604 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7606 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7607 val, val2, val3, val4);
7610 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7613 int port = BP_PORT(bp);
7616 switch (switch_cfg) {
7618 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7621 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7622 switch (ext_phy_type) {
7623 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7624 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7627 bp->port.supported |= (SUPPORTED_10baseT_Half |
7628 SUPPORTED_10baseT_Full |
7629 SUPPORTED_100baseT_Half |
7630 SUPPORTED_100baseT_Full |
7631 SUPPORTED_1000baseT_Full |
7632 SUPPORTED_2500baseX_Full |
7637 SUPPORTED_Asym_Pause);
7640 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7641 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7644 bp->port.supported |= (SUPPORTED_10baseT_Half |
7645 SUPPORTED_10baseT_Full |
7646 SUPPORTED_100baseT_Half |
7647 SUPPORTED_100baseT_Full |
7648 SUPPORTED_1000baseT_Full |
7653 SUPPORTED_Asym_Pause);
7657 BNX2X_ERR("NVRAM config error. "
7658 "BAD SerDes ext_phy_config 0x%x\n",
7659 bp->link_params.ext_phy_config);
7663 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7665 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7668 case SWITCH_CFG_10G:
7669 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7672 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7673 switch (ext_phy_type) {
7674 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7675 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7678 bp->port.supported |= (SUPPORTED_10baseT_Half |
7679 SUPPORTED_10baseT_Full |
7680 SUPPORTED_100baseT_Half |
7681 SUPPORTED_100baseT_Full |
7682 SUPPORTED_1000baseT_Full |
7683 SUPPORTED_2500baseX_Full |
7684 SUPPORTED_10000baseT_Full |
7689 SUPPORTED_Asym_Pause);
7692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7693 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7696 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7697 SUPPORTED_1000baseT_Full |
7701 SUPPORTED_Asym_Pause);
7704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7705 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7708 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7709 SUPPORTED_2500baseX_Full |
7710 SUPPORTED_1000baseT_Full |
7714 SUPPORTED_Asym_Pause);
7717 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7718 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7721 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7724 SUPPORTED_Asym_Pause);
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7731 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7732 SUPPORTED_1000baseT_Full |
7735 SUPPORTED_Asym_Pause);
7738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7739 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7742 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7743 SUPPORTED_1000baseT_Full |
7747 SUPPORTED_Asym_Pause);
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7754 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7758 SUPPORTED_Asym_Pause);
7761 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7762 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7765 bp->port.supported |= (SUPPORTED_10baseT_Half |
7766 SUPPORTED_10baseT_Full |
7767 SUPPORTED_100baseT_Half |
7768 SUPPORTED_100baseT_Full |
7769 SUPPORTED_1000baseT_Full |
7770 SUPPORTED_10000baseT_Full |
7774 SUPPORTED_Asym_Pause);
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7778 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7779 bp->link_params.ext_phy_config);
7783 BNX2X_ERR("NVRAM config error. "
7784 "BAD XGXS ext_phy_config 0x%x\n",
7785 bp->link_params.ext_phy_config);
7789 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7791 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7796 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7797 bp->port.link_config);
7800 bp->link_params.phy_addr = bp->port.phy_addr;
7802 /* mask what we support according to speed_cap_mask */
7803 if (!(bp->link_params.speed_cap_mask &
7804 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7805 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7807 if (!(bp->link_params.speed_cap_mask &
7808 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7809 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7811 if (!(bp->link_params.speed_cap_mask &
7812 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7813 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7815 if (!(bp->link_params.speed_cap_mask &
7816 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7817 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7819 if (!(bp->link_params.speed_cap_mask &
7820 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7821 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7822 SUPPORTED_1000baseT_Full);
7824 if (!(bp->link_params.speed_cap_mask &
7825 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7826 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7828 if (!(bp->link_params.speed_cap_mask &
7829 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7830 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7832 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7835 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7837 bp->link_params.req_duplex = DUPLEX_FULL;
7839 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7840 case PORT_FEATURE_LINK_SPEED_AUTO:
7841 if (bp->port.supported & SUPPORTED_Autoneg) {
7842 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7843 bp->port.advertising = bp->port.supported;
7846 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7848 if ((ext_phy_type ==
7849 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7851 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7852 /* force 10G, no AN */
7853 bp->link_params.req_line_speed = SPEED_10000;
7854 bp->port.advertising =
7855 (ADVERTISED_10000baseT_Full |
7859 BNX2X_ERR("NVRAM config error. "
7860 "Invalid link_config 0x%x"
7861 " Autoneg not supported\n",
7862 bp->port.link_config);
7867 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7868 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7869 bp->link_params.req_line_speed = SPEED_10;
7870 bp->port.advertising = (ADVERTISED_10baseT_Full |
7873 BNX2X_ERR("NVRAM config error. "
7874 "Invalid link_config 0x%x"
7875 " speed_cap_mask 0x%x\n",
7876 bp->port.link_config,
7877 bp->link_params.speed_cap_mask);
7882 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7883 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7884 bp->link_params.req_line_speed = SPEED_10;
7885 bp->link_params.req_duplex = DUPLEX_HALF;
7886 bp->port.advertising = (ADVERTISED_10baseT_Half |
7889 BNX2X_ERR("NVRAM config error. "
7890 "Invalid link_config 0x%x"
7891 " speed_cap_mask 0x%x\n",
7892 bp->port.link_config,
7893 bp->link_params.speed_cap_mask);
7898 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7899 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7900 bp->link_params.req_line_speed = SPEED_100;
7901 bp->port.advertising = (ADVERTISED_100baseT_Full |
7904 BNX2X_ERR("NVRAM config error. "
7905 "Invalid link_config 0x%x"
7906 " speed_cap_mask 0x%x\n",
7907 bp->port.link_config,
7908 bp->link_params.speed_cap_mask);
7913 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7914 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7915 bp->link_params.req_line_speed = SPEED_100;
7916 bp->link_params.req_duplex = DUPLEX_HALF;
7917 bp->port.advertising = (ADVERTISED_100baseT_Half |
7920 BNX2X_ERR("NVRAM config error. "
7921 "Invalid link_config 0x%x"
7922 " speed_cap_mask 0x%x\n",
7923 bp->port.link_config,
7924 bp->link_params.speed_cap_mask);
7929 case PORT_FEATURE_LINK_SPEED_1G:
7930 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7931 bp->link_params.req_line_speed = SPEED_1000;
7932 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7935 BNX2X_ERR("NVRAM config error. "
7936 "Invalid link_config 0x%x"
7937 " speed_cap_mask 0x%x\n",
7938 bp->port.link_config,
7939 bp->link_params.speed_cap_mask);
7944 case PORT_FEATURE_LINK_SPEED_2_5G:
7945 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7946 bp->link_params.req_line_speed = SPEED_2500;
7947 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7950 BNX2X_ERR("NVRAM config error. "
7951 "Invalid link_config 0x%x"
7952 " speed_cap_mask 0x%x\n",
7953 bp->port.link_config,
7954 bp->link_params.speed_cap_mask);
7959 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7960 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7961 case PORT_FEATURE_LINK_SPEED_10G_KR:
7962 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7963 bp->link_params.req_line_speed = SPEED_10000;
7964 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7967 BNX2X_ERR("NVRAM config error. "
7968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
7970 bp->port.link_config,
7971 bp->link_params.speed_cap_mask);
7977 BNX2X_ERR("NVRAM config error. "
7978 "BAD link speed link_config 0x%x\n",
7979 bp->port.link_config);
7980 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7981 bp->port.advertising = bp->port.supported;
7985 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7986 PORT_FEATURE_FLOW_CONTROL_MASK);
7987 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7988 !(bp->port.supported & SUPPORTED_Autoneg))
7989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7991 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7992 " advertising 0x%x\n",
7993 bp->link_params.req_line_speed,
7994 bp->link_params.req_duplex,
7995 bp->link_params.req_flow_ctrl, bp->port.advertising);
7998 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8000 int port = BP_PORT(bp);
8005 bp->link_params.bp = bp;
8006 bp->link_params.port = port;
8008 bp->link_params.lane_config =
8009 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8010 bp->link_params.ext_phy_config =
8012 dev_info.port_hw_config[port].external_phy_config);
8013 bp->link_params.speed_cap_mask =
8015 dev_info.port_hw_config[port].speed_capability_mask);
8017 bp->port.link_config =
8018 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8020 /* Get the 4 lanes xgxs config rx and tx */
8021 for (i = 0; i < 2; i++) {
8023 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8024 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8025 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8028 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8029 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8030 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8033 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8034 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8035 bp->link_params.feature_config_flags |=
8036 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8038 bp->link_params.feature_config_flags &=
8039 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8041 /* If the device is capable of WoL, set the default state according
8044 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8045 (config & PORT_FEATURE_WOL_ENABLED));
8047 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8048 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8049 bp->link_params.lane_config,
8050 bp->link_params.ext_phy_config,
8051 bp->link_params.speed_cap_mask, bp->port.link_config);
8053 bp->link_params.switch_cfg = (bp->port.link_config &
8054 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8055 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8057 bnx2x_link_settings_requested(bp);
8059 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8060 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8061 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8062 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8063 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8064 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8065 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8066 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8067 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8068 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8071 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8073 int func = BP_FUNC(bp);
8077 bnx2x_get_common_hwinfo(bp);
8081 if (CHIP_IS_E1H(bp)) {
8083 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8085 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8086 FUNC_MF_CFG_E1HOV_TAG_MASK);
8087 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8091 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8093 func, bp->e1hov, bp->e1hov);
8095 BNX2X_DEV_INFO("single function mode\n");
8097 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8098 " aborting\n", func);
8104 if (!BP_NOMCP(bp)) {
8105 bnx2x_get_port_hwinfo(bp);
8107 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8108 DRV_MSG_SEQ_NUMBER_MASK);
8109 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8113 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8114 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8115 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8116 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8117 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8118 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8119 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8120 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8121 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8122 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8123 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8125 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8133 /* only supposed to happen on emulation/FPGA */
8134 BNX2X_ERR("warning random MAC workaround active\n");
8135 random_ether_addr(bp->dev->dev_addr);
8136 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8142 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8144 int func = BP_FUNC(bp);
8148 /* Disable interrupt handling until HW is initialized */
8149 atomic_set(&bp->intr_sem, 1);
8151 mutex_init(&bp->port.phy_mutex);
8153 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8154 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8156 rc = bnx2x_get_hwinfo(bp);
8158 /* need to reset chip if undi was active */
8160 bnx2x_undi_unload(bp);
8162 if (CHIP_REV_IS_FPGA(bp))
8163 printk(KERN_ERR PFX "FPGA detected\n");
8165 if (BP_NOMCP(bp) && (func == 0))
8167 "MCP disabled, must load devices in order!\n");
8169 /* Set multi queue mode */
8170 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8171 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8173 "Multi disabled since int_mode requested is not MSI-X\n");
8174 multi_mode = ETH_RSS_MODE_DISABLED;
8176 bp->multi_mode = multi_mode;
8181 bp->flags &= ~TPA_ENABLE_FLAG;
8182 bp->dev->features &= ~NETIF_F_LRO;
8184 bp->flags |= TPA_ENABLE_FLAG;
8185 bp->dev->features |= NETIF_F_LRO;
8190 bp->tx_ring_size = MAX_TX_AVAIL;
8191 bp->rx_ring_size = MAX_RX_AVAIL;
8198 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8199 bp->current_interval = (poll ? poll : timer_interval);
8201 init_timer(&bp->timer);
8202 bp->timer.expires = jiffies + bp->current_interval;
8203 bp->timer.data = (unsigned long) bp;
8204 bp->timer.function = bnx2x_timer;
8210 * ethtool service functions
8213 /* All ethtool functions called with rtnl_lock */
8215 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8217 struct bnx2x *bp = netdev_priv(dev);
8219 cmd->supported = bp->port.supported;
8220 cmd->advertising = bp->port.advertising;
8222 if (netif_carrier_ok(dev)) {
8223 cmd->speed = bp->link_vars.line_speed;
8224 cmd->duplex = bp->link_vars.duplex;
8226 cmd->speed = bp->link_params.req_line_speed;
8227 cmd->duplex = bp->link_params.req_duplex;
8232 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8233 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8234 if (vn_max_rate < cmd->speed)
8235 cmd->speed = vn_max_rate;
8238 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8240 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8242 switch (ext_phy_type) {
8243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8249 cmd->port = PORT_FIBRE;
8252 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8253 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8254 cmd->port = PORT_TP;
8257 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8258 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8259 bp->link_params.ext_phy_config);
8263 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8264 bp->link_params.ext_phy_config);
8268 cmd->port = PORT_TP;
8270 cmd->phy_address = bp->port.phy_addr;
8271 cmd->transceiver = XCVR_INTERNAL;
8273 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8274 cmd->autoneg = AUTONEG_ENABLE;
8276 cmd->autoneg = AUTONEG_DISABLE;
8281 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8282 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8283 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8284 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8285 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8286 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8287 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8292 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8294 struct bnx2x *bp = netdev_priv(dev);
8300 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8301 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8302 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8303 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8304 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8305 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8306 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8308 if (cmd->autoneg == AUTONEG_ENABLE) {
8309 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8310 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8314 /* advertise the requested speed and duplex if supported */
8315 cmd->advertising &= bp->port.supported;
8317 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8318 bp->link_params.req_duplex = DUPLEX_FULL;
8319 bp->port.advertising |= (ADVERTISED_Autoneg |
8322 } else { /* forced speed */
8323 /* advertise the requested speed and duplex if supported */
8324 switch (cmd->speed) {
8326 if (cmd->duplex == DUPLEX_FULL) {
8327 if (!(bp->port.supported &
8328 SUPPORTED_10baseT_Full)) {
8330 "10M full not supported\n");
8334 advertising = (ADVERTISED_10baseT_Full |
8337 if (!(bp->port.supported &
8338 SUPPORTED_10baseT_Half)) {
8340 "10M half not supported\n");
8344 advertising = (ADVERTISED_10baseT_Half |
8350 if (cmd->duplex == DUPLEX_FULL) {
8351 if (!(bp->port.supported &
8352 SUPPORTED_100baseT_Full)) {
8354 "100M full not supported\n");
8358 advertising = (ADVERTISED_100baseT_Full |
8361 if (!(bp->port.supported &
8362 SUPPORTED_100baseT_Half)) {
8364 "100M half not supported\n");
8368 advertising = (ADVERTISED_100baseT_Half |
8374 if (cmd->duplex != DUPLEX_FULL) {
8375 DP(NETIF_MSG_LINK, "1G half not supported\n");
8379 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8380 DP(NETIF_MSG_LINK, "1G full not supported\n");
8384 advertising = (ADVERTISED_1000baseT_Full |
8389 if (cmd->duplex != DUPLEX_FULL) {
8391 "2.5G half not supported\n");
8395 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8397 "2.5G full not supported\n");
8401 advertising = (ADVERTISED_2500baseX_Full |
8406 if (cmd->duplex != DUPLEX_FULL) {
8407 DP(NETIF_MSG_LINK, "10G half not supported\n");
8411 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8412 DP(NETIF_MSG_LINK, "10G full not supported\n");
8416 advertising = (ADVERTISED_10000baseT_Full |
8421 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8425 bp->link_params.req_line_speed = cmd->speed;
8426 bp->link_params.req_duplex = cmd->duplex;
8427 bp->port.advertising = advertising;
8430 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8431 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8432 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8433 bp->port.advertising);
8435 if (netif_running(dev)) {
8436 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8443 #define PHY_FW_VER_LEN 10
8445 static void bnx2x_get_drvinfo(struct net_device *dev,
8446 struct ethtool_drvinfo *info)
8448 struct bnx2x *bp = netdev_priv(dev);
8449 u8 phy_fw_ver[PHY_FW_VER_LEN];
8451 strcpy(info->driver, DRV_MODULE_NAME);
8452 strcpy(info->version, DRV_MODULE_VERSION);
8454 phy_fw_ver[0] = '\0';
8456 bnx2x_acquire_phy_lock(bp);
8457 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8458 (bp->state != BNX2X_STATE_CLOSED),
8459 phy_fw_ver, PHY_FW_VER_LEN);
8460 bnx2x_release_phy_lock(bp);
8463 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8464 (bp->common.bc_ver & 0xff0000) >> 16,
8465 (bp->common.bc_ver & 0xff00) >> 8,
8466 (bp->common.bc_ver & 0xff),
8467 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8468 strcpy(info->bus_info, pci_name(bp->pdev));
8469 info->n_stats = BNX2X_NUM_STATS;
8470 info->testinfo_len = BNX2X_NUM_TESTS;
8471 info->eedump_len = bp->common.flash_size;
8472 info->regdump_len = 0;
8475 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8476 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8478 static int bnx2x_get_regs_len(struct net_device *dev)
8480 static u32 regdump_len;
8481 struct bnx2x *bp = netdev_priv(dev);
8487 if (CHIP_IS_E1(bp)) {
8488 for (i = 0; i < REGS_COUNT; i++)
8489 if (IS_E1_ONLINE(reg_addrs[i].info))
8490 regdump_len += reg_addrs[i].size;
8492 for (i = 0; i < WREGS_COUNT_E1; i++)
8493 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8494 regdump_len += wreg_addrs_e1[i].size *
8495 (1 + wreg_addrs_e1[i].read_regs_count);
8498 for (i = 0; i < REGS_COUNT; i++)
8499 if (IS_E1H_ONLINE(reg_addrs[i].info))
8500 regdump_len += reg_addrs[i].size;
8502 for (i = 0; i < WREGS_COUNT_E1H; i++)
8503 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8504 regdump_len += wreg_addrs_e1h[i].size *
8505 (1 + wreg_addrs_e1h[i].read_regs_count);
8508 regdump_len += sizeof(struct dump_hdr);
8513 static void bnx2x_get_regs(struct net_device *dev,
8514 struct ethtool_regs *regs, void *_p)
8517 struct bnx2x *bp = netdev_priv(dev);
8518 struct dump_hdr dump_hdr = {0};
8521 memset(p, 0, regs->len);
8523 if (!netif_running(bp->dev))
8526 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8527 dump_hdr.dump_sign = dump_sign_all;
8528 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8529 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8530 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8531 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8532 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8534 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8535 p += dump_hdr.hdr_size + 1;
8537 if (CHIP_IS_E1(bp)) {
8538 for (i = 0; i < REGS_COUNT; i++)
8539 if (IS_E1_ONLINE(reg_addrs[i].info))
8540 for (j = 0; j < reg_addrs[i].size; j++)
8542 reg_addrs[i].addr + j*4);
8545 for (i = 0; i < REGS_COUNT; i++)
8546 if (IS_E1H_ONLINE(reg_addrs[i].info))
8547 for (j = 0; j < reg_addrs[i].size; j++)
8549 reg_addrs[i].addr + j*4);
8553 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8555 struct bnx2x *bp = netdev_priv(dev);
8557 if (bp->flags & NO_WOL_FLAG) {
8561 wol->supported = WAKE_MAGIC;
8563 wol->wolopts = WAKE_MAGIC;
8567 memset(&wol->sopass, 0, sizeof(wol->sopass));
8570 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8572 struct bnx2x *bp = netdev_priv(dev);
8574 if (wol->wolopts & ~WAKE_MAGIC)
8577 if (wol->wolopts & WAKE_MAGIC) {
8578 if (bp->flags & NO_WOL_FLAG)
8588 static u32 bnx2x_get_msglevel(struct net_device *dev)
8590 struct bnx2x *bp = netdev_priv(dev);
8592 return bp->msglevel;
8595 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8597 struct bnx2x *bp = netdev_priv(dev);
8599 if (capable(CAP_NET_ADMIN))
8600 bp->msglevel = level;
8603 static int bnx2x_nway_reset(struct net_device *dev)
8605 struct bnx2x *bp = netdev_priv(dev);
8610 if (netif_running(dev)) {
8611 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8618 static int bnx2x_get_eeprom_len(struct net_device *dev)
8620 struct bnx2x *bp = netdev_priv(dev);
8622 return bp->common.flash_size;
8625 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8627 int port = BP_PORT(bp);
8631 /* adjust timeout for emulation/FPGA */
8632 count = NVRAM_TIMEOUT_COUNT;
8633 if (CHIP_REV_IS_SLOW(bp))
8636 /* request access to nvram interface */
8637 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8638 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8640 for (i = 0; i < count*10; i++) {
8641 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8642 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8648 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8649 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8656 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8658 int port = BP_PORT(bp);
8662 /* adjust timeout for emulation/FPGA */
8663 count = NVRAM_TIMEOUT_COUNT;
8664 if (CHIP_REV_IS_SLOW(bp))
8667 /* relinquish nvram interface */
8668 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8669 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8671 for (i = 0; i < count*10; i++) {
8672 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8673 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8679 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8680 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8687 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8691 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8693 /* enable both bits, even on read */
8694 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8695 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8696 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8699 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8703 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8705 /* disable both bits, even after read */
8706 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8707 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8708 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8711 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8717 /* build the command word */
8718 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8720 /* need to clear DONE bit separately */
8721 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8723 /* address of the NVRAM to read from */
8724 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8725 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8727 /* issue a read command */
8728 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8730 /* adjust timeout for emulation/FPGA */
8731 count = NVRAM_TIMEOUT_COUNT;
8732 if (CHIP_REV_IS_SLOW(bp))
8735 /* wait for completion */
8738 for (i = 0; i < count; i++) {
8740 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8742 if (val & MCPR_NVM_COMMAND_DONE) {
8743 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8744 /* we read nvram data in cpu order
8745 * but ethtool sees it as an array of bytes
8746 * converting to big-endian will do the work */
8747 *ret_val = cpu_to_be32(val);
8756 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8763 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8765 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8770 if (offset + buf_size > bp->common.flash_size) {
8771 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8772 " buf_size (0x%x) > flash_size (0x%x)\n",
8773 offset, buf_size, bp->common.flash_size);
8777 /* request access to nvram interface */
8778 rc = bnx2x_acquire_nvram_lock(bp);
8782 /* enable access to nvram interface */
8783 bnx2x_enable_nvram_access(bp);
8785 /* read the first word(s) */
8786 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8787 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8788 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8789 memcpy(ret_buf, &val, 4);
8791 /* advance to the next dword */
8792 offset += sizeof(u32);
8793 ret_buf += sizeof(u32);
8794 buf_size -= sizeof(u32);
8799 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8800 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8801 memcpy(ret_buf, &val, 4);
8804 /* disable access to nvram interface */
8805 bnx2x_disable_nvram_access(bp);
8806 bnx2x_release_nvram_lock(bp);
8811 static int bnx2x_get_eeprom(struct net_device *dev,
8812 struct ethtool_eeprom *eeprom, u8 *eebuf)
8814 struct bnx2x *bp = netdev_priv(dev);
8817 if (!netif_running(dev))
8820 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8821 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8822 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8823 eeprom->len, eeprom->len);
8825 /* parameters already validated in ethtool_get_eeprom */
8827 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8832 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8837 /* build the command word */
8838 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8840 /* need to clear DONE bit separately */
8841 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8843 /* write the data */
8844 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8846 /* address of the NVRAM to write to */
8847 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8848 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8850 /* issue the write command */
8851 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8853 /* adjust timeout for emulation/FPGA */
8854 count = NVRAM_TIMEOUT_COUNT;
8855 if (CHIP_REV_IS_SLOW(bp))
8858 /* wait for completion */
8860 for (i = 0; i < count; i++) {
8862 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8863 if (val & MCPR_NVM_COMMAND_DONE) {
8872 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8874 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8882 if (offset + buf_size > bp->common.flash_size) {
8883 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8884 " buf_size (0x%x) > flash_size (0x%x)\n",
8885 offset, buf_size, bp->common.flash_size);
8889 /* request access to nvram interface */
8890 rc = bnx2x_acquire_nvram_lock(bp);
8894 /* enable access to nvram interface */
8895 bnx2x_enable_nvram_access(bp);
8897 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8898 align_offset = (offset & ~0x03);
8899 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8902 val &= ~(0xff << BYTE_OFFSET(offset));
8903 val |= (*data_buf << BYTE_OFFSET(offset));
8905 /* nvram data is returned as an array of bytes
8906 * convert it back to cpu order */
8907 val = be32_to_cpu(val);
8909 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8913 /* disable access to nvram interface */
8914 bnx2x_disable_nvram_access(bp);
8915 bnx2x_release_nvram_lock(bp);
8920 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8928 if (buf_size == 1) /* ethtool */
8929 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8931 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8933 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8938 if (offset + buf_size > bp->common.flash_size) {
8939 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8940 " buf_size (0x%x) > flash_size (0x%x)\n",
8941 offset, buf_size, bp->common.flash_size);
8945 /* request access to nvram interface */
8946 rc = bnx2x_acquire_nvram_lock(bp);
8950 /* enable access to nvram interface */
8951 bnx2x_enable_nvram_access(bp);
8954 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8955 while ((written_so_far < buf_size) && (rc == 0)) {
8956 if (written_so_far == (buf_size - sizeof(u32)))
8957 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8958 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8959 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8960 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8961 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8963 memcpy(&val, data_buf, 4);
8965 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8967 /* advance to the next dword */
8968 offset += sizeof(u32);
8969 data_buf += sizeof(u32);
8970 written_so_far += sizeof(u32);
8974 /* disable access to nvram interface */
8975 bnx2x_disable_nvram_access(bp);
8976 bnx2x_release_nvram_lock(bp);
8981 static int bnx2x_set_eeprom(struct net_device *dev,
8982 struct ethtool_eeprom *eeprom, u8 *eebuf)
8984 struct bnx2x *bp = netdev_priv(dev);
8987 if (!netif_running(dev))
8990 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8991 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8992 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8993 eeprom->len, eeprom->len);
8995 /* parameters already validated in ethtool_set_eeprom */
8997 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8998 if (eeprom->magic == 0x00504859)
9001 bnx2x_acquire_phy_lock(bp);
9002 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9003 bp->link_params.ext_phy_config,
9004 (bp->state != BNX2X_STATE_CLOSED),
9005 eebuf, eeprom->len);
9006 if ((bp->state == BNX2X_STATE_OPEN) ||
9007 (bp->state == BNX2X_STATE_DISABLED)) {
9008 rc |= bnx2x_link_reset(&bp->link_params,
9010 rc |= bnx2x_phy_init(&bp->link_params,
9013 bnx2x_release_phy_lock(bp);
9015 } else /* Only the PMF can access the PHY */
9018 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9023 static int bnx2x_get_coalesce(struct net_device *dev,
9024 struct ethtool_coalesce *coal)
9026 struct bnx2x *bp = netdev_priv(dev);
9028 memset(coal, 0, sizeof(struct ethtool_coalesce));
9030 coal->rx_coalesce_usecs = bp->rx_ticks;
9031 coal->tx_coalesce_usecs = bp->tx_ticks;
9036 static int bnx2x_set_coalesce(struct net_device *dev,
9037 struct ethtool_coalesce *coal)
9039 struct bnx2x *bp = netdev_priv(dev);
9041 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9042 if (bp->rx_ticks > 3000)
9043 bp->rx_ticks = 3000;
9045 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9046 if (bp->tx_ticks > 0x3000)
9047 bp->tx_ticks = 0x3000;
9049 if (netif_running(dev))
9050 bnx2x_update_coalesce(bp);
9055 static void bnx2x_get_ringparam(struct net_device *dev,
9056 struct ethtool_ringparam *ering)
9058 struct bnx2x *bp = netdev_priv(dev);
9060 ering->rx_max_pending = MAX_RX_AVAIL;
9061 ering->rx_mini_max_pending = 0;
9062 ering->rx_jumbo_max_pending = 0;
9064 ering->rx_pending = bp->rx_ring_size;
9065 ering->rx_mini_pending = 0;
9066 ering->rx_jumbo_pending = 0;
9068 ering->tx_max_pending = MAX_TX_AVAIL;
9069 ering->tx_pending = bp->tx_ring_size;
9072 static int bnx2x_set_ringparam(struct net_device *dev,
9073 struct ethtool_ringparam *ering)
9075 struct bnx2x *bp = netdev_priv(dev);
9078 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9079 (ering->tx_pending > MAX_TX_AVAIL) ||
9080 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9083 bp->rx_ring_size = ering->rx_pending;
9084 bp->tx_ring_size = ering->tx_pending;
9086 if (netif_running(dev)) {
9087 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9088 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9094 static void bnx2x_get_pauseparam(struct net_device *dev,
9095 struct ethtool_pauseparam *epause)
9097 struct bnx2x *bp = netdev_priv(dev);
9099 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9100 BNX2X_FLOW_CTRL_AUTO) &&
9101 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9103 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9104 BNX2X_FLOW_CTRL_RX);
9105 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9106 BNX2X_FLOW_CTRL_TX);
9108 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9109 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9110 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9113 static int bnx2x_set_pauseparam(struct net_device *dev,
9114 struct ethtool_pauseparam *epause)
9116 struct bnx2x *bp = netdev_priv(dev);
9121 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9122 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9123 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9125 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9127 if (epause->rx_pause)
9128 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9130 if (epause->tx_pause)
9131 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9133 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9134 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9136 if (epause->autoneg) {
9137 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9138 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9142 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9143 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9147 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9149 if (netif_running(dev)) {
9150 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9157 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9159 struct bnx2x *bp = netdev_priv(dev);
9163 /* TPA requires Rx CSUM offloading */
9164 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9165 if (!(dev->features & NETIF_F_LRO)) {
9166 dev->features |= NETIF_F_LRO;
9167 bp->flags |= TPA_ENABLE_FLAG;
9171 } else if (dev->features & NETIF_F_LRO) {
9172 dev->features &= ~NETIF_F_LRO;
9173 bp->flags &= ~TPA_ENABLE_FLAG;
9177 if (changed && netif_running(dev)) {
9178 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9179 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9185 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9187 struct bnx2x *bp = netdev_priv(dev);
9192 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9194 struct bnx2x *bp = netdev_priv(dev);
9199 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9200 TPA'ed packets will be discarded due to wrong TCP CSUM */
9202 u32 flags = ethtool_op_get_flags(dev);
9204 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9210 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9213 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9214 dev->features |= NETIF_F_TSO6;
9216 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9217 dev->features &= ~NETIF_F_TSO6;
9223 static const struct {
9224 char string[ETH_GSTRING_LEN];
9225 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9226 { "register_test (offline)" },
9227 { "memory_test (offline)" },
9228 { "loopback_test (offline)" },
9229 { "nvram_test (online)" },
9230 { "interrupt_test (online)" },
9231 { "link_test (online)" },
9232 { "idle check (online)" }
9235 static int bnx2x_self_test_count(struct net_device *dev)
9237 return BNX2X_NUM_TESTS;
9240 static int bnx2x_test_registers(struct bnx2x *bp)
9242 int idx, i, rc = -ENODEV;
9244 int port = BP_PORT(bp);
9245 static const struct {
9250 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9251 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9252 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9253 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9254 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9255 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9256 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9257 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9258 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9259 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9260 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9261 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9262 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9263 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9264 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9265 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9266 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9267 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9268 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9269 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9270 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9271 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9272 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9273 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9274 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9275 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9276 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9277 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9278 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9279 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9280 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9281 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9282 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9283 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9284 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9285 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9286 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9287 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9289 { 0xffffffff, 0, 0x00000000 }
9292 if (!netif_running(bp->dev))
9295 /* Repeat the test twice:
9296 First by writing 0x00000000, second by writing 0xffffffff */
9297 for (idx = 0; idx < 2; idx++) {
9304 wr_val = 0xffffffff;
9308 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9309 u32 offset, mask, save_val, val;
9311 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9312 mask = reg_tbl[i].mask;
9314 save_val = REG_RD(bp, offset);
9316 REG_WR(bp, offset, wr_val);
9317 val = REG_RD(bp, offset);
9319 /* Restore the original register's value */
9320 REG_WR(bp, offset, save_val);
9322 /* verify that value is as expected value */
9323 if ((val & mask) != (wr_val & mask))
9334 static int bnx2x_test_memory(struct bnx2x *bp)
9336 int i, j, rc = -ENODEV;
9338 static const struct {
9342 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9343 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9344 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9345 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9346 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9347 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9348 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9352 static const struct {
9358 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9359 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9360 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9361 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9362 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9363 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9365 { NULL, 0xffffffff, 0, 0 }
9368 if (!netif_running(bp->dev))
9371 /* Go through all the memories */
9372 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9373 for (j = 0; j < mem_tbl[i].size; j++)
9374 REG_RD(bp, mem_tbl[i].offset + j*4);
9376 /* Check the parity status */
9377 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9378 val = REG_RD(bp, prty_tbl[i].offset);
9379 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9380 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9382 "%s is 0x%x\n", prty_tbl[i].name, val);
9393 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9398 while (bnx2x_link_test(bp) && cnt--)
9402 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9404 unsigned int pkt_size, num_pkts, i;
9405 struct sk_buff *skb;
9406 unsigned char *packet;
9407 struct bnx2x_fastpath *fp = &bp->fp[0];
9408 u16 tx_start_idx, tx_idx;
9409 u16 rx_start_idx, rx_idx;
9411 struct sw_tx_bd *tx_buf;
9412 struct eth_tx_bd *tx_bd;
9414 union eth_rx_cqe *cqe;
9416 struct sw_rx_bd *rx_buf;
9420 /* check the loopback mode */
9421 switch (loopback_mode) {
9422 case BNX2X_PHY_LOOPBACK:
9423 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9426 case BNX2X_MAC_LOOPBACK:
9427 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9428 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9434 /* prepare the loopback packet */
9435 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9436 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9437 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9440 goto test_loopback_exit;
9442 packet = skb_put(skb, pkt_size);
9443 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9444 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9445 for (i = ETH_HLEN; i < pkt_size; i++)
9446 packet[i] = (unsigned char) (i & 0xff);
9448 /* send the loopback packet */
9450 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9451 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9453 pkt_prod = fp->tx_pkt_prod++;
9454 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9455 tx_buf->first_bd = fp->tx_bd_prod;
9458 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9459 mapping = pci_map_single(bp->pdev, skb->data,
9460 skb_headlen(skb), PCI_DMA_TODEVICE);
9461 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9462 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9463 tx_bd->nbd = cpu_to_le16(1);
9464 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9465 tx_bd->vlan = cpu_to_le16(pkt_prod);
9466 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9467 ETH_TX_BD_FLAGS_END_BD);
9468 tx_bd->general_data = ((UNICAST_ADDRESS <<
9469 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9473 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9474 mb(); /* FW restriction: must not reorder writing nbd and packets */
9475 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9476 DOORBELL(bp, fp->index, 0);
9482 bp->dev->trans_start = jiffies;
9486 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9487 if (tx_idx != tx_start_idx + num_pkts)
9488 goto test_loopback_exit;
9490 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9491 if (rx_idx != rx_start_idx + num_pkts)
9492 goto test_loopback_exit;
9494 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9495 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9496 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9497 goto test_loopback_rx_exit;
9499 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9500 if (len != pkt_size)
9501 goto test_loopback_rx_exit;
9503 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9505 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9506 for (i = ETH_HLEN; i < pkt_size; i++)
9507 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9508 goto test_loopback_rx_exit;
9512 test_loopback_rx_exit:
9514 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9515 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9516 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9517 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9519 /* Update producers */
9520 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9524 bp->link_params.loopback_mode = LOOPBACK_NONE;
9529 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9533 if (!netif_running(bp->dev))
9534 return BNX2X_LOOPBACK_FAILED;
9536 bnx2x_netif_stop(bp, 1);
9537 bnx2x_acquire_phy_lock(bp);
9539 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9541 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9542 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9545 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9547 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9548 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9551 bnx2x_release_phy_lock(bp);
9552 bnx2x_netif_start(bp);
9557 #define CRC32_RESIDUAL 0xdebb20e3
9559 static int bnx2x_test_nvram(struct bnx2x *bp)
9561 static const struct {
9565 { 0, 0x14 }, /* bootstrap */
9566 { 0x14, 0xec }, /* dir */
9567 { 0x100, 0x350 }, /* manuf_info */
9568 { 0x450, 0xf0 }, /* feature_info */
9569 { 0x640, 0x64 }, /* upgrade_key_info */
9571 { 0x708, 0x70 }, /* manuf_key_info */
9575 __be32 buf[0x350 / 4];
9576 u8 *data = (u8 *)buf;
9580 rc = bnx2x_nvram_read(bp, 0, data, 4);
9582 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9583 goto test_nvram_exit;
9586 magic = be32_to_cpu(buf[0]);
9587 if (magic != 0x669955aa) {
9588 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9590 goto test_nvram_exit;
9593 for (i = 0; nvram_tbl[i].size; i++) {
9595 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9599 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9600 goto test_nvram_exit;
9603 csum = ether_crc_le(nvram_tbl[i].size, data);
9604 if (csum != CRC32_RESIDUAL) {
9606 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9608 goto test_nvram_exit;
9616 static int bnx2x_test_intr(struct bnx2x *bp)
9618 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9621 if (!netif_running(bp->dev))
9624 config->hdr.length = 0;
9626 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9628 config->hdr.offset = BP_FUNC(bp);
9629 config->hdr.client_id = bp->fp->cl_id;
9630 config->hdr.reserved1 = 0;
9632 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9633 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9634 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9636 bp->set_mac_pending++;
9637 for (i = 0; i < 10; i++) {
9638 if (!bp->set_mac_pending)
9640 msleep_interruptible(10);
9649 static void bnx2x_self_test(struct net_device *dev,
9650 struct ethtool_test *etest, u64 *buf)
9652 struct bnx2x *bp = netdev_priv(dev);
9654 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9656 if (!netif_running(dev))
9659 /* offline tests are not supported in MF mode */
9661 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9663 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9666 link_up = bp->link_vars.link_up;
9667 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9668 bnx2x_nic_load(bp, LOAD_DIAG);
9669 /* wait until link state is restored */
9670 bnx2x_wait_for_link(bp, link_up);
9672 if (bnx2x_test_registers(bp) != 0) {
9674 etest->flags |= ETH_TEST_FL_FAILED;
9676 if (bnx2x_test_memory(bp) != 0) {
9678 etest->flags |= ETH_TEST_FL_FAILED;
9680 buf[2] = bnx2x_test_loopback(bp, link_up);
9682 etest->flags |= ETH_TEST_FL_FAILED;
9684 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9685 bnx2x_nic_load(bp, LOAD_NORMAL);
9686 /* wait until link state is restored */
9687 bnx2x_wait_for_link(bp, link_up);
9689 if (bnx2x_test_nvram(bp) != 0) {
9691 etest->flags |= ETH_TEST_FL_FAILED;
9693 if (bnx2x_test_intr(bp) != 0) {
9695 etest->flags |= ETH_TEST_FL_FAILED;
9698 if (bnx2x_link_test(bp) != 0) {
9700 etest->flags |= ETH_TEST_FL_FAILED;
9703 #ifdef BNX2X_EXTRA_DEBUG
9704 bnx2x_panic_dump(bp);
9708 static const struct {
9711 u8 string[ETH_GSTRING_LEN];
9712 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9713 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9714 { Q_STATS_OFFSET32(error_bytes_received_hi),
9715 8, "[%d]: rx_error_bytes" },
9716 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9717 8, "[%d]: rx_ucast_packets" },
9718 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9719 8, "[%d]: rx_mcast_packets" },
9720 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9721 8, "[%d]: rx_bcast_packets" },
9722 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9723 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9724 4, "[%d]: rx_phy_ip_err_discards"},
9725 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9726 4, "[%d]: rx_skb_alloc_discard" },
9727 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9729 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9730 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9731 8, "[%d]: tx_packets" }
9734 static const struct {
9738 #define STATS_FLAGS_PORT 1
9739 #define STATS_FLAGS_FUNC 2
9740 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9741 u8 string[ETH_GSTRING_LEN];
9742 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9743 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9744 8, STATS_FLAGS_BOTH, "rx_bytes" },
9745 { STATS_OFFSET32(error_bytes_received_hi),
9746 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9747 { STATS_OFFSET32(total_unicast_packets_received_hi),
9748 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9749 { STATS_OFFSET32(total_multicast_packets_received_hi),
9750 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9751 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9752 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9753 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9754 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9755 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9756 8, STATS_FLAGS_PORT, "rx_align_errors" },
9757 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9758 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9759 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9760 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9761 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9762 8, STATS_FLAGS_PORT, "rx_fragments" },
9763 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9764 8, STATS_FLAGS_PORT, "rx_jabbers" },
9765 { STATS_OFFSET32(no_buff_discard_hi),
9766 8, STATS_FLAGS_BOTH, "rx_discards" },
9767 { STATS_OFFSET32(mac_filter_discard),
9768 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9769 { STATS_OFFSET32(xxoverflow_discard),
9770 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9771 { STATS_OFFSET32(brb_drop_hi),
9772 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9773 { STATS_OFFSET32(brb_truncate_hi),
9774 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9775 { STATS_OFFSET32(pause_frames_received_hi),
9776 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9777 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9778 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9779 { STATS_OFFSET32(nig_timer_max),
9780 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9781 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9782 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9783 { STATS_OFFSET32(rx_skb_alloc_failed),
9784 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9785 { STATS_OFFSET32(hw_csum_err),
9786 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9788 { STATS_OFFSET32(total_bytes_transmitted_hi),
9789 8, STATS_FLAGS_BOTH, "tx_bytes" },
9790 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9791 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9792 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9793 8, STATS_FLAGS_BOTH, "tx_packets" },
9794 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9795 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9796 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9797 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9798 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9799 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9800 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9801 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9802 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9803 8, STATS_FLAGS_PORT, "tx_deferred" },
9804 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9805 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9806 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9807 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9808 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9809 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9810 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9811 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9812 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9813 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9814 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9815 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9816 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9817 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9818 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9819 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9820 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9821 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9822 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9823 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9824 { STATS_OFFSET32(pause_frames_sent_hi),
9825 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9828 #define IS_PORT_STAT(i) \
9829 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9830 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9831 #define IS_E1HMF_MODE_STAT(bp) \
9832 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9834 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9836 struct bnx2x *bp = netdev_priv(dev);
9839 switch (stringset) {
9843 for_each_queue(bp, i) {
9844 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9845 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9846 bnx2x_q_stats_arr[j].string, i);
9847 k += BNX2X_NUM_Q_STATS;
9849 if (IS_E1HMF_MODE_STAT(bp))
9851 for (j = 0; j < BNX2X_NUM_STATS; j++)
9852 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9853 bnx2x_stats_arr[j].string);
9855 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9856 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9858 strcpy(buf + j*ETH_GSTRING_LEN,
9859 bnx2x_stats_arr[i].string);
9866 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9871 static int bnx2x_get_stats_count(struct net_device *dev)
9873 struct bnx2x *bp = netdev_priv(dev);
9877 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9878 if (!IS_E1HMF_MODE_STAT(bp))
9879 num_stats += BNX2X_NUM_STATS;
9881 if (IS_E1HMF_MODE_STAT(bp)) {
9883 for (i = 0; i < BNX2X_NUM_STATS; i++)
9884 if (IS_FUNC_STAT(i))
9887 num_stats = BNX2X_NUM_STATS;
9893 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9894 struct ethtool_stats *stats, u64 *buf)
9896 struct bnx2x *bp = netdev_priv(dev);
9897 u32 *hw_stats, *offset;
9902 for_each_queue(bp, i) {
9903 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9904 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9905 if (bnx2x_q_stats_arr[j].size == 0) {
9906 /* skip this counter */
9910 offset = (hw_stats +
9911 bnx2x_q_stats_arr[j].offset);
9912 if (bnx2x_q_stats_arr[j].size == 4) {
9913 /* 4-byte counter */
9914 buf[k + j] = (u64) *offset;
9917 /* 8-byte counter */
9918 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9920 k += BNX2X_NUM_Q_STATS;
9922 if (IS_E1HMF_MODE_STAT(bp))
9924 hw_stats = (u32 *)&bp->eth_stats;
9925 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9926 if (bnx2x_stats_arr[j].size == 0) {
9927 /* skip this counter */
9931 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9932 if (bnx2x_stats_arr[j].size == 4) {
9933 /* 4-byte counter */
9934 buf[k + j] = (u64) *offset;
9937 /* 8-byte counter */
9938 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9941 hw_stats = (u32 *)&bp->eth_stats;
9942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9943 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9945 if (bnx2x_stats_arr[i].size == 0) {
9946 /* skip this counter */
9951 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9952 if (bnx2x_stats_arr[i].size == 4) {
9953 /* 4-byte counter */
9954 buf[j] = (u64) *offset;
9958 /* 8-byte counter */
9959 buf[j] = HILO_U64(*offset, *(offset + 1));
9965 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9967 struct bnx2x *bp = netdev_priv(dev);
9968 int port = BP_PORT(bp);
9971 if (!netif_running(dev))
9980 for (i = 0; i < (data * 2); i++) {
9982 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9983 bp->link_params.hw_led_mode,
9984 bp->link_params.chip_id);
9986 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9987 bp->link_params.hw_led_mode,
9988 bp->link_params.chip_id);
9990 msleep_interruptible(500);
9991 if (signal_pending(current))
9995 if (bp->link_vars.link_up)
9996 bnx2x_set_led(bp, port, LED_MODE_OPER,
9997 bp->link_vars.line_speed,
9998 bp->link_params.hw_led_mode,
9999 bp->link_params.chip_id);
10004 static struct ethtool_ops bnx2x_ethtool_ops = {
10005 .get_settings = bnx2x_get_settings,
10006 .set_settings = bnx2x_set_settings,
10007 .get_drvinfo = bnx2x_get_drvinfo,
10008 .get_regs_len = bnx2x_get_regs_len,
10009 .get_regs = bnx2x_get_regs,
10010 .get_wol = bnx2x_get_wol,
10011 .set_wol = bnx2x_set_wol,
10012 .get_msglevel = bnx2x_get_msglevel,
10013 .set_msglevel = bnx2x_set_msglevel,
10014 .nway_reset = bnx2x_nway_reset,
10015 .get_link = ethtool_op_get_link,
10016 .get_eeprom_len = bnx2x_get_eeprom_len,
10017 .get_eeprom = bnx2x_get_eeprom,
10018 .set_eeprom = bnx2x_set_eeprom,
10019 .get_coalesce = bnx2x_get_coalesce,
10020 .set_coalesce = bnx2x_set_coalesce,
10021 .get_ringparam = bnx2x_get_ringparam,
10022 .set_ringparam = bnx2x_set_ringparam,
10023 .get_pauseparam = bnx2x_get_pauseparam,
10024 .set_pauseparam = bnx2x_set_pauseparam,
10025 .get_rx_csum = bnx2x_get_rx_csum,
10026 .set_rx_csum = bnx2x_set_rx_csum,
10027 .get_tx_csum = ethtool_op_get_tx_csum,
10028 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10029 .set_flags = bnx2x_set_flags,
10030 .get_flags = ethtool_op_get_flags,
10031 .get_sg = ethtool_op_get_sg,
10032 .set_sg = ethtool_op_set_sg,
10033 .get_tso = ethtool_op_get_tso,
10034 .set_tso = bnx2x_set_tso,
10035 .self_test_count = bnx2x_self_test_count,
10036 .self_test = bnx2x_self_test,
10037 .get_strings = bnx2x_get_strings,
10038 .phys_id = bnx2x_phys_id,
10039 .get_stats_count = bnx2x_get_stats_count,
10040 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10043 /* end of ethtool_ops */
10045 /****************************************************************************
10046 * General service functions
10047 ****************************************************************************/
10049 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10053 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10057 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10058 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10059 PCI_PM_CTRL_PME_STATUS));
10061 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10062 /* delay required during transition out of D3hot */
10067 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10071 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10073 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10076 /* No more memory access after this point until
10077 * device is brought back to D0.
10087 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10091 /* Tell compiler that status block fields can change */
10093 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10094 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10096 return (fp->rx_comp_cons != rx_cons_sb);
10100 * net_device service functions
10103 static int bnx2x_poll(struct napi_struct *napi, int budget)
10105 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10107 struct bnx2x *bp = fp->bp;
10110 #ifdef BNX2X_STOP_ON_ERROR
10111 if (unlikely(bp->panic))
10115 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10116 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10117 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10119 bnx2x_update_fpsb_idx(fp);
10121 if (bnx2x_has_tx_work(fp))
10124 if (bnx2x_has_rx_work(fp)) {
10125 work_done = bnx2x_rx_int(fp, budget);
10127 /* must not complete if we consumed full budget */
10128 if (work_done >= budget)
10132 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10133 * ensure that status block indices have been actually read
10134 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10135 * so that we won't write the "newer" value of the status block to IGU
10136 * (if there was a DMA right after BNX2X_HAS_WORK and
10137 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10138 * may be postponed to right before bnx2x_ack_sb). In this case
10139 * there will never be another interrupt until there is another update
10140 * of the status block, while there is still unhandled work.
10144 if (!BNX2X_HAS_WORK(fp)) {
10145 #ifdef BNX2X_STOP_ON_ERROR
10148 napi_complete(napi);
10150 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10151 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10152 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10153 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10161 /* we split the first BD into headers and data BDs
10162 * to ease the pain of our fellow microcode engineers
10163 * we use one mapping for both BDs
10164 * So far this has only been observed to happen
10165 * in Other Operating Systems(TM)
10167 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10168 struct bnx2x_fastpath *fp,
10169 struct eth_tx_bd **tx_bd, u16 hlen,
10170 u16 bd_prod, int nbd)
10172 struct eth_tx_bd *h_tx_bd = *tx_bd;
10173 struct eth_tx_bd *d_tx_bd;
10174 dma_addr_t mapping;
10175 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10177 /* first fix first BD */
10178 h_tx_bd->nbd = cpu_to_le16(nbd);
10179 h_tx_bd->nbytes = cpu_to_le16(hlen);
10181 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10182 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10183 h_tx_bd->addr_lo, h_tx_bd->nbd);
10185 /* now get a new data BD
10186 * (after the pbd) and fill it */
10187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10188 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10193 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10194 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10195 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10197 /* this marks the BD as one that has no individual mapping
10198 * the FW ignores this flag in a BD not marked start
10200 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10201 DP(NETIF_MSG_TX_QUEUED,
10202 "TSO split data size is %d (%x:%x)\n",
10203 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10205 /* update tx_bd for marking the last BD flag */
10211 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10214 csum = (u16) ~csum_fold(csum_sub(csum,
10215 csum_partial(t_header - fix, fix, 0)));
10218 csum = (u16) ~csum_fold(csum_add(csum,
10219 csum_partial(t_header, -fix, 0)));
10221 return swab16(csum);
10224 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10228 if (skb->ip_summed != CHECKSUM_PARTIAL)
10232 if (skb->protocol == htons(ETH_P_IPV6)) {
10234 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10235 rc |= XMIT_CSUM_TCP;
10239 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10240 rc |= XMIT_CSUM_TCP;
10244 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10247 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10253 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10254 /* check if packet requires linearization (packet is too fragmented)
10255 no need to check fragmentation if page size > 8K (there will be no
10256 violation to FW restrictions) */
10257 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10262 int first_bd_sz = 0;
10264 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10265 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10267 if (xmit_type & XMIT_GSO) {
10268 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10269 /* Check if LSO packet needs to be copied:
10270 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10271 int wnd_size = MAX_FETCH_BD - 3;
10272 /* Number of windows to check */
10273 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10278 /* Headers length */
10279 hlen = (int)(skb_transport_header(skb) - skb->data) +
10282 /* Amount of data (w/o headers) on linear part of SKB*/
10283 first_bd_sz = skb_headlen(skb) - hlen;
10285 wnd_sum = first_bd_sz;
10287 /* Calculate the first sum - it's special */
10288 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10290 skb_shinfo(skb)->frags[frag_idx].size;
10292 /* If there was data on linear skb data - check it */
10293 if (first_bd_sz > 0) {
10294 if (unlikely(wnd_sum < lso_mss)) {
10299 wnd_sum -= first_bd_sz;
10302 /* Others are easier: run through the frag list and
10303 check all windows */
10304 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10306 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10308 if (unlikely(wnd_sum < lso_mss)) {
10313 skb_shinfo(skb)->frags[wnd_idx].size;
10316 /* in non-LSO too fragmented packet should always
10323 if (unlikely(to_copy))
10324 DP(NETIF_MSG_TX_QUEUED,
10325 "Linearization IS REQUIRED for %s packet. "
10326 "num_frags %d hlen %d first_bd_sz %d\n",
10327 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10328 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10334 /* called with netif_tx_lock
10335 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10336 * netif_wake_queue()
10338 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10340 struct bnx2x *bp = netdev_priv(dev);
10341 struct bnx2x_fastpath *fp;
10342 struct netdev_queue *txq;
10343 struct sw_tx_bd *tx_buf;
10344 struct eth_tx_bd *tx_bd;
10345 struct eth_tx_parse_bd *pbd = NULL;
10346 u16 pkt_prod, bd_prod;
10348 dma_addr_t mapping;
10349 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10350 int vlan_off = (bp->e1hov ? 4 : 0);
10354 #ifdef BNX2X_STOP_ON_ERROR
10355 if (unlikely(bp->panic))
10356 return NETDEV_TX_BUSY;
10359 fp_index = skb_get_queue_mapping(skb);
10360 txq = netdev_get_tx_queue(dev, fp_index);
10362 fp = &bp->fp[fp_index];
10364 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10365 fp->eth_q_stats.driver_xoff++,
10366 netif_tx_stop_queue(txq);
10367 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10368 return NETDEV_TX_BUSY;
10371 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10372 " gso type %x xmit_type %x\n",
10373 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10374 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10376 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10377 /* First, check if we need to linearize the skb (due to FW
10378 restrictions). No need to check fragmentation if page size > 8K
10379 (there will be no violation to FW restrictions) */
10380 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10381 /* Statistics of linearization */
10383 if (skb_linearize(skb) != 0) {
10384 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10385 "silently dropping this SKB\n");
10386 dev_kfree_skb_any(skb);
10387 return NETDEV_TX_OK;
10393 Please read carefully. First we use one BD which we mark as start,
10394 then for TSO or xsum we have a parsing info BD,
10395 and only then we have the rest of the TSO BDs.
10396 (don't forget to mark the last one as last,
10397 and to unmap only AFTER you write to the BD ...)
10398 And above all, all pdb sizes are in words - NOT DWORDS!
10401 pkt_prod = fp->tx_pkt_prod++;
10402 bd_prod = TX_BD(fp->tx_bd_prod);
10404 /* get a tx_buf and first BD */
10405 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10406 tx_bd = &fp->tx_desc_ring[bd_prod];
10408 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10409 tx_bd->general_data = (UNICAST_ADDRESS <<
10410 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10412 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10414 /* remember the first BD of the packet */
10415 tx_buf->first_bd = fp->tx_bd_prod;
10418 DP(NETIF_MSG_TX_QUEUED,
10419 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10420 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10423 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10424 (bp->flags & HW_VLAN_TX_FLAG)) {
10425 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10426 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10430 tx_bd->vlan = cpu_to_le16(pkt_prod);
10433 /* turn on parsing and get a BD */
10434 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10435 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10437 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10440 if (xmit_type & XMIT_CSUM) {
10441 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10443 /* for now NS flag is not used in Linux */
10445 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10446 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10448 pbd->ip_hlen = (skb_transport_header(skb) -
10449 skb_network_header(skb)) / 2;
10451 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10453 pbd->total_hlen = cpu_to_le16(hlen);
10454 hlen = hlen*2 - vlan_off;
10456 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10458 if (xmit_type & XMIT_CSUM_V4)
10459 tx_bd->bd_flags.as_bitfield |=
10460 ETH_TX_BD_FLAGS_IP_CSUM;
10462 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10464 if (xmit_type & XMIT_CSUM_TCP) {
10465 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10468 s8 fix = SKB_CS_OFF(skb); /* signed! */
10470 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10471 pbd->cs_offset = fix / 2;
10473 DP(NETIF_MSG_TX_QUEUED,
10474 "hlen %d offset %d fix %d csum before fix %x\n",
10475 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10478 /* HW bug: fixup the CSUM */
10479 pbd->tcp_pseudo_csum =
10480 bnx2x_csum_fix(skb_transport_header(skb),
10483 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10484 pbd->tcp_pseudo_csum);
10488 mapping = pci_map_single(bp->pdev, skb->data,
10489 skb_headlen(skb), PCI_DMA_TODEVICE);
10491 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10492 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10493 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10494 tx_bd->nbd = cpu_to_le16(nbd);
10495 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10497 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10498 " nbytes %d flags %x vlan %x\n",
10499 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10500 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10501 le16_to_cpu(tx_bd->vlan));
10503 if (xmit_type & XMIT_GSO) {
10505 DP(NETIF_MSG_TX_QUEUED,
10506 "TSO packet len %d hlen %d total len %d tso size %d\n",
10507 skb->len, hlen, skb_headlen(skb),
10508 skb_shinfo(skb)->gso_size);
10510 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10512 if (unlikely(skb_headlen(skb) > hlen))
10513 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10516 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10517 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10518 pbd->tcp_flags = pbd_tcp_flags(skb);
10520 if (xmit_type & XMIT_GSO_V4) {
10521 pbd->ip_id = swab16(ip_hdr(skb)->id);
10522 pbd->tcp_pseudo_csum =
10523 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10524 ip_hdr(skb)->daddr,
10525 0, IPPROTO_TCP, 0));
10528 pbd->tcp_pseudo_csum =
10529 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10530 &ipv6_hdr(skb)->daddr,
10531 0, IPPROTO_TCP, 0));
10533 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10537 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10539 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10540 tx_bd = &fp->tx_desc_ring[bd_prod];
10542 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10543 frag->size, PCI_DMA_TODEVICE);
10545 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10546 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10547 tx_bd->nbytes = cpu_to_le16(frag->size);
10548 tx_bd->vlan = cpu_to_le16(pkt_prod);
10549 tx_bd->bd_flags.as_bitfield = 0;
10551 DP(NETIF_MSG_TX_QUEUED,
10552 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10553 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10554 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10557 /* now at last mark the BD as the last BD */
10558 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10560 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10561 tx_bd, tx_bd->bd_flags.as_bitfield);
10563 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10565 /* now send a tx doorbell, counting the next BD
10566 * if the packet contains or ends with it
10568 if (TX_BD_POFF(bd_prod) < nbd)
10572 DP(NETIF_MSG_TX_QUEUED,
10573 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10574 " tcp_flags %x xsum %x seq %u hlen %u\n",
10575 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10576 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10577 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10579 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10582 * Make sure that the BD data is updated before updating the producer
10583 * since FW might read the BD right after the producer is updated.
10584 * This is only applicable for weak-ordered memory model archs such
10585 * as IA-64. The following barrier is also mandatory since FW will
10586 * assumes packets must have BDs.
10590 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10591 mb(); /* FW restriction: must not reorder writing nbd and packets */
10592 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10593 DOORBELL(bp, fp->index, 0);
10597 fp->tx_bd_prod += nbd;
10598 dev->trans_start = jiffies;
10600 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10601 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10602 if we put Tx into XOFF state. */
10604 netif_tx_stop_queue(txq);
10605 fp->eth_q_stats.driver_xoff++;
10606 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10607 netif_tx_wake_queue(txq);
10611 return NETDEV_TX_OK;
10614 /* called with rtnl_lock */
10615 static int bnx2x_open(struct net_device *dev)
10617 struct bnx2x *bp = netdev_priv(dev);
10619 netif_carrier_off(dev);
10621 bnx2x_set_power_state(bp, PCI_D0);
10623 return bnx2x_nic_load(bp, LOAD_OPEN);
10626 /* called with rtnl_lock */
10627 static int bnx2x_close(struct net_device *dev)
10629 struct bnx2x *bp = netdev_priv(dev);
10631 /* Unload the driver, release IRQs */
10632 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10633 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10634 if (!CHIP_REV_IS_SLOW(bp))
10635 bnx2x_set_power_state(bp, PCI_D3hot);
10640 /* called with netif_tx_lock from dev_mcast.c */
10641 static void bnx2x_set_rx_mode(struct net_device *dev)
10643 struct bnx2x *bp = netdev_priv(dev);
10644 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10645 int port = BP_PORT(bp);
10647 if (bp->state != BNX2X_STATE_OPEN) {
10648 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10652 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10654 if (dev->flags & IFF_PROMISC)
10655 rx_mode = BNX2X_RX_MODE_PROMISC;
10657 else if ((dev->flags & IFF_ALLMULTI) ||
10658 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10659 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10661 else { /* some multicasts */
10662 if (CHIP_IS_E1(bp)) {
10663 int i, old, offset;
10664 struct dev_mc_list *mclist;
10665 struct mac_configuration_cmd *config =
10666 bnx2x_sp(bp, mcast_config);
10668 for (i = 0, mclist = dev->mc_list;
10669 mclist && (i < dev->mc_count);
10670 i++, mclist = mclist->next) {
10672 config->config_table[i].
10673 cam_entry.msb_mac_addr =
10674 swab16(*(u16 *)&mclist->dmi_addr[0]);
10675 config->config_table[i].
10676 cam_entry.middle_mac_addr =
10677 swab16(*(u16 *)&mclist->dmi_addr[2]);
10678 config->config_table[i].
10679 cam_entry.lsb_mac_addr =
10680 swab16(*(u16 *)&mclist->dmi_addr[4]);
10681 config->config_table[i].cam_entry.flags =
10683 config->config_table[i].
10684 target_table_entry.flags = 0;
10685 config->config_table[i].
10686 target_table_entry.client_id = 0;
10687 config->config_table[i].
10688 target_table_entry.vlan_id = 0;
10691 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10692 config->config_table[i].
10693 cam_entry.msb_mac_addr,
10694 config->config_table[i].
10695 cam_entry.middle_mac_addr,
10696 config->config_table[i].
10697 cam_entry.lsb_mac_addr);
10699 old = config->hdr.length;
10701 for (; i < old; i++) {
10702 if (CAM_IS_INVALID(config->
10703 config_table[i])) {
10704 /* already invalidated */
10708 CAM_INVALIDATE(config->
10713 if (CHIP_REV_IS_SLOW(bp))
10714 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10716 offset = BNX2X_MAX_MULTICAST*(1 + port);
10718 config->hdr.length = i;
10719 config->hdr.offset = offset;
10720 config->hdr.client_id = bp->fp->cl_id;
10721 config->hdr.reserved1 = 0;
10723 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10724 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10725 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10728 /* Accept one or more multicasts */
10729 struct dev_mc_list *mclist;
10730 u32 mc_filter[MC_HASH_SIZE];
10731 u32 crc, bit, regidx;
10734 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10736 for (i = 0, mclist = dev->mc_list;
10737 mclist && (i < dev->mc_count);
10738 i++, mclist = mclist->next) {
10740 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10743 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10744 bit = (crc >> 24) & 0xff;
10747 mc_filter[regidx] |= (1 << bit);
10750 for (i = 0; i < MC_HASH_SIZE; i++)
10751 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10756 bp->rx_mode = rx_mode;
10757 bnx2x_set_storm_rx_mode(bp);
10760 /* called with rtnl_lock */
10761 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10763 struct sockaddr *addr = p;
10764 struct bnx2x *bp = netdev_priv(dev);
10766 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10769 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10770 if (netif_running(dev)) {
10771 if (CHIP_IS_E1(bp))
10772 bnx2x_set_mac_addr_e1(bp, 1);
10774 bnx2x_set_mac_addr_e1h(bp, 1);
10780 /* called with rtnl_lock */
10781 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10783 struct mii_ioctl_data *data = if_mii(ifr);
10784 struct bnx2x *bp = netdev_priv(dev);
10785 int port = BP_PORT(bp);
10790 data->phy_id = bp->port.phy_addr;
10794 case SIOCGMIIREG: {
10797 if (!netif_running(dev))
10800 mutex_lock(&bp->port.phy_mutex);
10801 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10802 DEFAULT_PHY_DEV_ADDR,
10803 (data->reg_num & 0x1f), &mii_regval);
10804 data->val_out = mii_regval;
10805 mutex_unlock(&bp->port.phy_mutex);
10810 if (!capable(CAP_NET_ADMIN))
10813 if (!netif_running(dev))
10816 mutex_lock(&bp->port.phy_mutex);
10817 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10818 DEFAULT_PHY_DEV_ADDR,
10819 (data->reg_num & 0x1f), data->val_in);
10820 mutex_unlock(&bp->port.phy_mutex);
10828 return -EOPNOTSUPP;
10831 /* called with rtnl_lock */
10832 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10834 struct bnx2x *bp = netdev_priv(dev);
10837 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10838 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10841 /* This does not race with packet allocation
10842 * because the actual alloc size is
10843 * only updated as part of load
10845 dev->mtu = new_mtu;
10847 if (netif_running(dev)) {
10848 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10849 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10855 static void bnx2x_tx_timeout(struct net_device *dev)
10857 struct bnx2x *bp = netdev_priv(dev);
10859 #ifdef BNX2X_STOP_ON_ERROR
10863 /* This allows the netif to be shutdown gracefully before resetting */
10864 schedule_work(&bp->reset_task);
10868 /* called with rtnl_lock */
10869 static void bnx2x_vlan_rx_register(struct net_device *dev,
10870 struct vlan_group *vlgrp)
10872 struct bnx2x *bp = netdev_priv(dev);
10876 /* Set flags according to the required capabilities */
10877 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10879 if (dev->features & NETIF_F_HW_VLAN_TX)
10880 bp->flags |= HW_VLAN_TX_FLAG;
10882 if (dev->features & NETIF_F_HW_VLAN_RX)
10883 bp->flags |= HW_VLAN_RX_FLAG;
10885 if (netif_running(dev))
10886 bnx2x_set_client_config(bp);
10891 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10892 static void poll_bnx2x(struct net_device *dev)
10894 struct bnx2x *bp = netdev_priv(dev);
10896 disable_irq(bp->pdev->irq);
10897 bnx2x_interrupt(bp->pdev->irq, dev);
10898 enable_irq(bp->pdev->irq);
10902 static const struct net_device_ops bnx2x_netdev_ops = {
10903 .ndo_open = bnx2x_open,
10904 .ndo_stop = bnx2x_close,
10905 .ndo_start_xmit = bnx2x_start_xmit,
10906 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10907 .ndo_set_mac_address = bnx2x_change_mac_addr,
10908 .ndo_validate_addr = eth_validate_addr,
10909 .ndo_do_ioctl = bnx2x_ioctl,
10910 .ndo_change_mtu = bnx2x_change_mtu,
10911 .ndo_tx_timeout = bnx2x_tx_timeout,
10913 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10915 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10916 .ndo_poll_controller = poll_bnx2x,
10920 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10921 struct net_device *dev)
10926 SET_NETDEV_DEV(dev, &pdev->dev);
10927 bp = netdev_priv(dev);
10932 bp->func = PCI_FUNC(pdev->devfn);
10934 rc = pci_enable_device(pdev);
10936 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10940 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10941 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10944 goto err_out_disable;
10947 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10948 printk(KERN_ERR PFX "Cannot find second PCI device"
10949 " base address, aborting\n");
10951 goto err_out_disable;
10954 if (atomic_read(&pdev->enable_cnt) == 1) {
10955 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10957 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10959 goto err_out_disable;
10962 pci_set_master(pdev);
10963 pci_save_state(pdev);
10966 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10967 if (bp->pm_cap == 0) {
10968 printk(KERN_ERR PFX "Cannot find power management"
10969 " capability, aborting\n");
10971 goto err_out_release;
10974 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10975 if (bp->pcie_cap == 0) {
10976 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10979 goto err_out_release;
10982 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
10983 bp->flags |= USING_DAC_FLAG;
10984 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
10985 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10986 " failed, aborting\n");
10988 goto err_out_release;
10991 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
10992 printk(KERN_ERR PFX "System does not support DMA,"
10995 goto err_out_release;
10998 dev->mem_start = pci_resource_start(pdev, 0);
10999 dev->base_addr = dev->mem_start;
11000 dev->mem_end = pci_resource_end(pdev, 0);
11002 dev->irq = pdev->irq;
11004 bp->regview = pci_ioremap_bar(pdev, 0);
11005 if (!bp->regview) {
11006 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11008 goto err_out_release;
11011 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11012 min_t(u64, BNX2X_DB_SIZE,
11013 pci_resource_len(pdev, 2)));
11014 if (!bp->doorbells) {
11015 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11017 goto err_out_unmap;
11020 bnx2x_set_power_state(bp, PCI_D0);
11022 /* clean indirect addresses */
11023 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11024 PCICFG_VENDOR_ID_OFFSET);
11025 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11026 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11027 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11028 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11030 dev->watchdog_timeo = TX_TIMEOUT;
11032 dev->netdev_ops = &bnx2x_netdev_ops;
11033 dev->ethtool_ops = &bnx2x_ethtool_ops;
11034 dev->features |= NETIF_F_SG;
11035 dev->features |= NETIF_F_HW_CSUM;
11036 if (bp->flags & USING_DAC_FLAG)
11037 dev->features |= NETIF_F_HIGHDMA;
11039 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11040 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11042 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11043 dev->features |= NETIF_F_TSO6;
11049 iounmap(bp->regview);
11050 bp->regview = NULL;
11052 if (bp->doorbells) {
11053 iounmap(bp->doorbells);
11054 bp->doorbells = NULL;
11058 if (atomic_read(&pdev->enable_cnt) == 1)
11059 pci_release_regions(pdev);
11062 pci_disable_device(pdev);
11063 pci_set_drvdata(pdev, NULL);
11069 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11071 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11073 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11077 /* return value of 1=2.5GHz 2=5GHz */
11078 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11080 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11082 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11086 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11087 const struct pci_device_id *ent)
11089 static int version_printed;
11090 struct net_device *dev = NULL;
11094 if (version_printed++ == 0)
11095 printk(KERN_INFO "%s", version);
11097 /* dev zeroed in init_etherdev */
11098 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11100 printk(KERN_ERR PFX "Cannot allocate net device\n");
11104 bp = netdev_priv(dev);
11105 bp->msglevel = debug;
11107 rc = bnx2x_init_dev(pdev, dev);
11113 pci_set_drvdata(pdev, dev);
11115 rc = bnx2x_init_bp(bp);
11117 goto init_one_exit;
11119 rc = register_netdev(dev);
11121 dev_err(&pdev->dev, "Cannot register net device\n");
11122 goto init_one_exit;
11125 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11126 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11127 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11128 bnx2x_get_pcie_width(bp),
11129 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11130 dev->base_addr, bp->pdev->irq);
11131 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11137 iounmap(bp->regview);
11140 iounmap(bp->doorbells);
11144 if (atomic_read(&pdev->enable_cnt) == 1)
11145 pci_release_regions(pdev);
11147 pci_disable_device(pdev);
11148 pci_set_drvdata(pdev, NULL);
11153 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11155 struct net_device *dev = pci_get_drvdata(pdev);
11159 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11162 bp = netdev_priv(dev);
11164 unregister_netdev(dev);
11167 iounmap(bp->regview);
11170 iounmap(bp->doorbells);
11174 if (atomic_read(&pdev->enable_cnt) == 1)
11175 pci_release_regions(pdev);
11177 pci_disable_device(pdev);
11178 pci_set_drvdata(pdev, NULL);
11181 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11183 struct net_device *dev = pci_get_drvdata(pdev);
11187 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11190 bp = netdev_priv(dev);
11194 pci_save_state(pdev);
11196 if (!netif_running(dev)) {
11201 netif_device_detach(dev);
11203 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11205 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11212 static int bnx2x_resume(struct pci_dev *pdev)
11214 struct net_device *dev = pci_get_drvdata(pdev);
11219 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11222 bp = netdev_priv(dev);
11226 pci_restore_state(pdev);
11228 if (!netif_running(dev)) {
11233 bnx2x_set_power_state(bp, PCI_D0);
11234 netif_device_attach(dev);
11236 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11243 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11247 bp->state = BNX2X_STATE_ERROR;
11249 bp->rx_mode = BNX2X_RX_MODE_NONE;
11251 bnx2x_netif_stop(bp, 0);
11253 del_timer_sync(&bp->timer);
11254 bp->stats_state = STATS_STATE_DISABLED;
11255 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11258 bnx2x_free_irq(bp);
11260 if (CHIP_IS_E1(bp)) {
11261 struct mac_configuration_cmd *config =
11262 bnx2x_sp(bp, mcast_config);
11264 for (i = 0; i < config->hdr.length; i++)
11265 CAM_INVALIDATE(config->config_table[i]);
11268 /* Free SKBs, SGEs, TPA pool and driver internals */
11269 bnx2x_free_skbs(bp);
11270 for_each_rx_queue(bp, i)
11271 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11272 for_each_rx_queue(bp, i)
11273 netif_napi_del(&bnx2x_fp(bp, i, napi));
11274 bnx2x_free_mem(bp);
11276 bp->state = BNX2X_STATE_CLOSED;
11278 netif_carrier_off(bp->dev);
11283 static void bnx2x_eeh_recover(struct bnx2x *bp)
11287 mutex_init(&bp->port.phy_mutex);
11289 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11290 bp->link_params.shmem_base = bp->common.shmem_base;
11291 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11293 if (!bp->common.shmem_base ||
11294 (bp->common.shmem_base < 0xA0000) ||
11295 (bp->common.shmem_base >= 0xC0000)) {
11296 BNX2X_DEV_INFO("MCP not active\n");
11297 bp->flags |= NO_MCP_FLAG;
11301 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11302 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11303 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11304 BNX2X_ERR("BAD MCP validity signature\n");
11306 if (!BP_NOMCP(bp)) {
11307 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11308 & DRV_MSG_SEQ_NUMBER_MASK);
11309 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11314 * bnx2x_io_error_detected - called when PCI error is detected
11315 * @pdev: Pointer to PCI device
11316 * @state: The current pci connection state
11318 * This function is called after a PCI bus error affecting
11319 * this device has been detected.
11321 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11322 pci_channel_state_t state)
11324 struct net_device *dev = pci_get_drvdata(pdev);
11325 struct bnx2x *bp = netdev_priv(dev);
11329 netif_device_detach(dev);
11331 if (netif_running(dev))
11332 bnx2x_eeh_nic_unload(bp);
11334 pci_disable_device(pdev);
11338 /* Request a slot reset */
11339 return PCI_ERS_RESULT_NEED_RESET;
11343 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11344 * @pdev: Pointer to PCI device
11346 * Restart the card from scratch, as if from a cold-boot.
11348 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11350 struct net_device *dev = pci_get_drvdata(pdev);
11351 struct bnx2x *bp = netdev_priv(dev);
11355 if (pci_enable_device(pdev)) {
11356 dev_err(&pdev->dev,
11357 "Cannot re-enable PCI device after reset\n");
11359 return PCI_ERS_RESULT_DISCONNECT;
11362 pci_set_master(pdev);
11363 pci_restore_state(pdev);
11365 if (netif_running(dev))
11366 bnx2x_set_power_state(bp, PCI_D0);
11370 return PCI_ERS_RESULT_RECOVERED;
11374 * bnx2x_io_resume - called when traffic can start flowing again
11375 * @pdev: Pointer to PCI device
11377 * This callback is called when the error recovery driver tells us that
11378 * its OK to resume normal operation.
11380 static void bnx2x_io_resume(struct pci_dev *pdev)
11382 struct net_device *dev = pci_get_drvdata(pdev);
11383 struct bnx2x *bp = netdev_priv(dev);
11387 bnx2x_eeh_recover(bp);
11389 if (netif_running(dev))
11390 bnx2x_nic_load(bp, LOAD_NORMAL);
11392 netif_device_attach(dev);
11397 static struct pci_error_handlers bnx2x_err_handler = {
11398 .error_detected = bnx2x_io_error_detected,
11399 .slot_reset = bnx2x_io_slot_reset,
11400 .resume = bnx2x_io_resume,
11403 static struct pci_driver bnx2x_pci_driver = {
11404 .name = DRV_MODULE_NAME,
11405 .id_table = bnx2x_pci_tbl,
11406 .probe = bnx2x_init_one,
11407 .remove = __devexit_p(bnx2x_remove_one),
11408 .suspend = bnx2x_suspend,
11409 .resume = bnx2x_resume,
11410 .err_handler = &bnx2x_err_handler,
11413 static int __init bnx2x_init(void)
11415 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11416 if (bnx2x_wq == NULL) {
11417 printk(KERN_ERR PFX "Cannot create workqueue\n");
11421 return pci_register_driver(&bnx2x_pci_driver);
11424 static void __exit bnx2x_cleanup(void)
11426 pci_unregister_driver(&bnx2x_pci_driver);
11428 destroy_workqueue(bnx2x_wq);
11431 module_init(bnx2x_init);
11432 module_exit(bnx2x_cleanup);