1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
57 #define DRV_MODULE_VERSION "1.48.102"
58 #define DRV_MODULE_RELDATE "2009/02/12"
59 #define BNX2X_BC_VER 0x040200
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT (5*HZ)
64 static char version[] __devinitdata =
65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
76 static int disable_tpa;
77 module_param(disable_tpa, int, 0);
78 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
81 module_param(int_mode, int, 0);
82 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
85 module_param(poll, int, 0);
86 MODULE_PARM_DESC(poll, " Use polling (for debug)");
89 module_param(mrrs, int, 0);
90 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(debug, " Default debug msglevel");
96 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
98 static struct workqueue_struct *bnx2x_wq;
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
516 for_each_rx_queue(bp, i) {
517 struct bnx2x_fastpath *fp = &bp->fp[i];
519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
522 i, fp->rx_bd_prod, fp->rx_bd_cons,
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554 for (j = start; j != end; j = RX_BD(j + 1)) {
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
564 for (j = start; j != end; j = RX_SGE(j + 1)) {
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
607 BNX2X_ERR("end crash dump -----------------\n");
610 static void bnx2x_int_enable(struct bnx2x *bp)
612 int port = BP_PORT(bp);
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
637 REG_WR(bp, addr, val);
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
645 REG_WR(bp, addr, val);
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
652 /* enable nig and gpio3 attention */
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
662 static void bnx2x_int_disable(struct bnx2x *bp)
664 int port = BP_PORT(bp);
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 /* flush all outstanding writes */
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
685 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
690 /* disable interrupt handling */
691 atomic_inc(&bp->intr_sem);
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
696 /* make sure all ISRs are done */
698 synchronize_irq(bp->msix_table[0].vector);
700 for_each_queue(bp, i)
701 synchronize_irq(bp->msix_table[i + offset].vector);
703 synchronize_irq(bp->pdev->irq);
705 /* make sure sp_task is not running */
706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
713 * General service functions
716 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
717 u8 storm, u16 index, u8 op, u8 update)
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
721 struct igu_ack_register igu_ack;
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
735 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
737 struct host_status_block *fpsb = fp->status_blk;
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
752 static u16 bnx2x_ack_int(struct bnx2x *bp)
754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
766 * fast path service functions
769 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
773 /* Tell compiler that status block fields can change */
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
776 return (fp->tx_pkt_cons != tx_cons_sb);
779 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
781 /* Tell compiler that consumer and producer can change */
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
786 /* free skb in the packet ring at pos idx
787 * return idx of last bd freed
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
808 new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
811 BNX2X_ERR("BAD nbd!\n");
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 tx_buf->first_bd = 0;
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
860 barrier(); /* Tell compiler that prod and cons can change */
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
868 #ifdef BNX2X_STOP_ON_ERROR
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
874 return (s16)(fp->bp->tx_ring_size) - used;
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
879 struct bnx2x *bp = fp->bp;
880 struct netdev_queue *txq;
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
884 #ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
889 txq = netdev_get_tx_queue(bp->dev, fp->index);
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
893 while (sw_cons != hw_cons) {
896 pkt_cons = TX_BD(sw_cons);
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
901 hw_cons, sw_cons, pkt_cons);
903 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
926 /* TBD need a thresh? */
927 if (unlikely(netif_tx_queue_stopped(txq))) {
929 __netif_tx_lock(txq, smp_processor_id());
931 if ((netif_tx_queue_stopped(txq)) &&
932 (bp->state == BNX2X_STATE_OPEN) &&
933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934 netif_tx_wake_queue(txq);
936 __netif_tx_unlock(txq);
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
950 fp->index, cid, command, bp->state,
951 rr_cqe->ramrod_cqe.ramrod_type);
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 fp->state = BNX2X_FP_STATE_OPEN;
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 fp->state = BNX2X_FP_STATE_HALTED;
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
975 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000 bp->set_mac_pending = 0;
1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1009 command, bp->state);
1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022 /* Skip "next page" elements */
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030 sw_buf->page = NULL;
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052 if (unlikely(page == NULL))
1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056 PCI_DMA_FROMDEVICE);
1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1099 /* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1126 u16 last_max = fp->last_max_sge;
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1149 struct bnx2x *bp = fp->bp;
1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
1153 u16 last_max, last_elem, first_elem;
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240 #ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247 fp->tpa_queue_used);
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1256 struct sw_rx_page *rx_pg, old_rx_pg;
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1265 /* This is needed in order to enable forwarding support */
1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268 max(frag_size, (u32)len_on_bd));
1270 #ifdef BNX2X_STOP_ON_ERROR
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289 rx_pg = &fp->rx_page_ring[sge_idx];
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
1296 fp->eth_q_stats.rx_skb_alloc_failed++;
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1311 frag_size -= frag_len;
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1332 if (likely(new_skb)) {
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1344 prefetch(((char *)(skb)) + 128);
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1356 skb_reserve(skb, pad);
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365 iph = (struct iphdr *)skb->data;
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1386 netif_receive_skb(skb);
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1398 /* else drop the packet and keep the buffer in the bin */
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1412 struct ustorm_eth_rx_producers rx_prods = {0};
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433 ((u32 *)&rx_prods)[i]);
1435 mmiowb(); /* keep prod updates ordered */
1437 DP(NETIF_MSG_RX_STATUS,
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444 struct bnx2x *bp = fp->bp;
1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1449 #ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
1462 bd_prod_fw = bd_prod;
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1473 fp->index, hw_comp_cons, sw_comp_cons);
1475 while (sw_comp_cons != hw_comp_cons) {
1476 struct sw_rx_bd *rx_buf = NULL;
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1496 /* is this a slowpath msg? */
1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498 bnx2x_sp_event(fp, cqe);
1501 /* this is an rx packet */
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
1513 u16 queue = cqe->fast_path_cqe.queue_index;
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1520 bnx2x_tpa_start(fp, queue, skb,
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1534 /* This is a size of the linear data
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1556 prefetch(((char *)(skb)) + 128);
1558 /* is this an error packet? */
1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560 DP(NETIF_MSG_RX_ERR,
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
1563 fp->eth_q_stats.rx_err_discard_pkt++;
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1574 new_skb = netdev_alloc_skb(bp->dev,
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
1578 "ERROR packet dropped "
1579 "because of alloc failure\n");
1580 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped because "
1605 "of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1614 skb->ip_summed = CHECKSUM_NONE;
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619 fp->eth_q_stats.hw_csum_err++;
1623 skb_record_rx_queue(skb, fp->index);
1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1632 netif_receive_skb(skb);
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1646 if (rx_pkt == budget)
1650 fp->rx_bd_cons = bd_cons;
1651 fp->rx_bd_prod = bd_prod_fw;
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1659 fp->rx_pkt += rx_pkt;
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
1669 int index = fp->index;
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1681 #ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691 napi_schedule(&bnx2x_fp(bp, index, napi));
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698 struct bnx2x *bp = netdev_priv(dev_instance);
1699 u16 status = bnx2x_ack_int(bp);
1702 /* Return here if interrupt is shared and it's not for us */
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1707 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 #ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
1735 if (unlikely(status & 0x1)) {
1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1750 /* end of fast path */
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1757 * General service functions
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1763 u32 resource_bit = (1 << resource);
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1783 /* Validating that the resource is not already taken */
1784 lock_status = REG_RD(bp, hw_lock_control_reg);
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
1793 /* Try to acquire the lock */
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
1796 if (lock_status & resource_bit)
1801 DP(NETIF_MSG_HW, "Timeout\n");
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1808 u32 resource_bit = (1 << resource);
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1827 /* Validating that the resource is currently taken */
1828 lock_status = REG_RD(bp, hw_lock_control_reg);
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1842 mutex_lock(&bp->port.phy_mutex);
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 mutex_unlock(&bp->port.phy_mutex);
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1987 u32 spio_mask = (1 << spio_num);
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2040 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2045 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046 bp->port.advertising |= ADVERTISED_Asym_Pause;
2050 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2056 static void bnx2x_link_report(struct bnx2x *bp)
2058 if (bp->link_vars.link_up) {
2059 if (bp->state == BNX2X_STATE_OPEN)
2060 netif_carrier_on(bp->dev);
2061 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2063 printk("%d Mbps ", bp->link_vars.line_speed);
2065 if (bp->link_vars.duplex == DUPLEX_FULL)
2066 printk("full duplex");
2068 printk("half duplex");
2070 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072 printk(", receive ");
2073 if (bp->link_vars.flow_ctrl &
2075 printk("& transmit ");
2077 printk(", transmit ");
2079 printk("flow control ON");
2083 } else { /* link_down */
2084 netif_carrier_off(bp->dev);
2085 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2091 if (!BP_NOMCP(bp)) {
2094 /* Initialize link parameters structure variables */
2095 /* It is recommended to turn off RX FC for jumbo frames
2096 for better performance */
2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099 else if (bp->dev->mtu > 5000)
2100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2102 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2104 bnx2x_acquire_phy_lock(bp);
2106 if (load_mode == LOAD_DIAG)
2107 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2109 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2111 bnx2x_release_phy_lock(bp);
2113 bnx2x_calc_fc_adv(bp);
2115 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117 bnx2x_link_report(bp);
2122 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2126 static void bnx2x_link_set(struct bnx2x *bp)
2128 if (!BP_NOMCP(bp)) {
2129 bnx2x_acquire_phy_lock(bp);
2130 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131 bnx2x_release_phy_lock(bp);
2133 bnx2x_calc_fc_adv(bp);
2135 BNX2X_ERR("Bootcode is missing - can not set link\n");
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2140 if (!BP_NOMCP(bp)) {
2141 bnx2x_acquire_phy_lock(bp);
2142 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143 bnx2x_release_phy_lock(bp);
2145 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2152 bnx2x_acquire_phy_lock(bp);
2153 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154 bnx2x_release_phy_lock(bp);
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2161 u32 r_param = bp->link_vars.line_speed / 8;
2162 u32 fair_periodic_timeout_usec;
2165 memset(&(bp->cmng.rs_vars), 0,
2166 sizeof(struct rate_shaping_vars_per_port));
2167 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2169 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2172 /* this is the threshold below which no timer arming will occur
2173 1.25 coefficient is for the threshold to be a little bigger
2174 than the real time, to compensate for timer in-accuracy */
2175 bp->cmng.rs_vars.rs_threshold =
2176 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2178 /* resolution of fairness timer */
2179 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2183 /* this is the threshold below which we won't arm the timer anymore */
2184 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2186 /* we multiply by 1e3/8 to get bytes/msec.
2187 We don't want the credits to pass a credit
2188 of the t_fair*FAIR_MEM (algorithm resolution) */
2189 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190 /* since each tick is 4 usec */
2191 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2196 struct rate_shaping_vars_per_vn m_rs_vn;
2197 struct fairness_vars_per_vn m_fair_vn;
2198 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199 u16 vn_min_rate, vn_max_rate;
2202 /* If function is hidden - set min and max to zeroes */
2203 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2208 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210 /* If fairness is enabled (not all min rates are zeroes) and
2211 if current min rate is zero - set it to 1.
2212 This is a requirement of the algorithm. */
2213 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214 vn_min_rate = DEF_MIN_RATE;
2215 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2220 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2221 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2223 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2226 /* global vn counter - maximal Mbps for this vn */
2227 m_rs_vn.vn_counter.rate = vn_max_rate;
2229 /* quota - number of bytes transmitted in this period */
2230 m_rs_vn.vn_counter.quota =
2231 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2233 if (bp->vn_weight_sum) {
2234 /* credit for each period of the fairness algorithm:
2235 number of bytes in T_FAIR (the vn share the port rate).
2236 vn_weight_sum should not be larger than 10000, thus
2237 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2239 m_fair_vn.vn_credit_delta =
2240 max((u32)(vn_min_rate * (T_FAIR_COEF /
2241 (8 * bp->vn_weight_sum))),
2242 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244 m_fair_vn.vn_credit_delta);
2247 /* Store it to internal memory */
2248 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251 ((u32 *)(&m_rs_vn))[i]);
2253 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256 ((u32 *)(&m_fair_vn))[i]);
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2268 if (bp->link_vars.link_up) {
2270 /* dropless flow control */
2271 if (CHIP_IS_E1H(bp)) {
2272 int port = BP_PORT(bp);
2273 u32 pause_enabled = 0;
2275 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2278 REG_WR(bp, BAR_USTRORM_INTMEM +
2279 USTORM_PAUSE_ENABLED_OFFSET(port),
2283 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284 struct host_port_stats *pstats;
2286 pstats = bnx2x_sp(bp, port_stats);
2287 /* reset old bmac stats */
2288 memset(&(pstats->mac_stx[0]), 0,
2289 sizeof(struct mac_stx));
2291 if ((bp->state == BNX2X_STATE_OPEN) ||
2292 (bp->state == BNX2X_STATE_DISABLED))
2293 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2296 /* indicate link status */
2297 bnx2x_link_report(bp);
2300 int port = BP_PORT(bp);
2304 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305 if (vn == BP_E1HVN(bp))
2308 func = ((vn << 1) | port);
2310 /* Set the attention towards other drivers
2312 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2316 if (bp->link_vars.link_up) {
2319 /* Init rate shaping and fairness contexts */
2320 bnx2x_init_port_minmax(bp);
2322 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323 bnx2x_init_vn_minmax(bp, 2*vn + port);
2325 /* Store it to internal memory */
2327 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330 ((u32 *)(&bp->cmng))[i]);
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2337 if (bp->state != BNX2X_STATE_OPEN)
2340 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2342 if (bp->link_vars.link_up)
2343 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2347 /* indicate link status */
2348 bnx2x_link_report(bp);
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2353 int port = BP_PORT(bp);
2357 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2359 /* enable nig attention */
2360 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2364 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 * General service functions
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377 u32 data_hi, u32 data_lo, int common)
2379 int func = BP_FUNC(bp);
2381 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2383 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2387 #ifdef BNX2X_STOP_ON_ERROR
2388 if (unlikely(bp->panic))
2392 spin_lock_bh(&bp->spq_lock);
2394 if (!bp->spq_left) {
2395 BNX2X_ERR("BUG! SPQ ring full!\n");
2396 spin_unlock_bh(&bp->spq_lock);
2401 /* CID needs port number to be encoded int it */
2402 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2405 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2407 bp->spq_prod_bd->hdr.type |=
2408 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2410 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2415 if (bp->spq_prod_bd == bp->spq_last_bd) {
2416 bp->spq_prod_bd = bp->spq;
2417 bp->spq_prod_idx = 0;
2418 DP(NETIF_MSG_TIMER, "end of spq\n");
2425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2428 spin_unlock_bh(&bp->spq_lock);
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2440 for (j = 0; j < i*10; j++) {
2442 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444 if (val & (1L << 31))
2449 if (!(val & (1L << 31))) {
2450 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2462 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2467 struct host_def_status_block *def_sb = bp->def_status_blk;
2470 barrier(); /* status block is written to by the chip */
2471 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2475 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2479 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2483 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2487 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2495 * slow path service functions
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 int port = BP_PORT(bp);
2501 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502 COMMAND_REG_ATTN_BITS_SET);
2503 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506 NIG_REG_MASK_INTERRUPT_PORT0;
2510 if (bp->attn_state & asserted)
2511 BNX2X_ERR("IGU ERROR\n");
2513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514 aeu_mask = REG_RD(bp, aeu_addr);
2516 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2517 aeu_mask, asserted);
2518 aeu_mask &= ~(asserted & 0xff);
2519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2521 REG_WR(bp, aeu_addr, aeu_mask);
2522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525 bp->attn_state |= asserted;
2526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2528 if (asserted & ATTN_HARD_WIRED_MASK) {
2529 if (asserted & ATTN_NIG_FOR_FUNC) {
2531 bnx2x_acquire_phy_lock(bp);
2533 /* save nig interrupt mask */
2534 nig_mask = REG_RD(bp, nig_int_mask_addr);
2535 REG_WR(bp, nig_int_mask_addr, 0);
2537 bnx2x_link_attn(bp);
2539 /* handle unicore attn? */
2541 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2544 if (asserted & GPIO_2_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2547 if (asserted & GPIO_3_FUNC)
2548 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2550 if (asserted & GPIO_4_FUNC)
2551 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2554 if (asserted & ATTN_GENERAL_ATTN_1) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2558 if (asserted & ATTN_GENERAL_ATTN_2) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2562 if (asserted & ATTN_GENERAL_ATTN_3) {
2563 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2567 if (asserted & ATTN_GENERAL_ATTN_4) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2571 if (asserted & ATTN_GENERAL_ATTN_5) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2575 if (asserted & ATTN_GENERAL_ATTN_6) {
2576 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2581 } /* if hardwired */
2583 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2585 REG_WR(bp, hc_addr, asserted);
2587 /* now set back the mask */
2588 if (asserted & ATTN_NIG_FOR_FUNC) {
2589 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590 bnx2x_release_phy_lock(bp);
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2596 int port = BP_PORT(bp);
2600 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2603 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2605 val = REG_RD(bp, reg_offset);
2606 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607 REG_WR(bp, reg_offset, val);
2609 BNX2X_ERR("SPIO5 hw attention\n");
2611 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613 /* Fan failure attention */
2615 /* The PHY reset is controlled by GPIO 1 */
2616 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618 /* Low power mode is controlled by GPIO 2 */
2619 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621 /* mark the failure */
2622 bp->link_params.ext_phy_config &=
2623 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624 bp->link_params.ext_phy_config |=
2625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 dev_info.port_hw_config[port].
2628 external_phy_config,
2629 bp->link_params.ext_phy_config);
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network"
2632 " Controller %s has caused the driver to"
2633 " shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for"
2635 " assistance\n", bp->dev->name);
2643 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645 bnx2x_acquire_phy_lock(bp);
2646 bnx2x_handle_module_detect_int(&bp->link_params);
2647 bnx2x_release_phy_lock(bp);
2650 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654 REG_WR(bp, reg_offset, val);
2656 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_0));
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2666 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2668 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670 /* DORQ discard attention */
2672 BNX2X_ERR("FATAL error from DORQ\n");
2675 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2677 int port = BP_PORT(bp);
2680 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2683 val = REG_RD(bp, reg_offset);
2684 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685 REG_WR(bp, reg_offset, val);
2687 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688 (attn & HW_INTERRUT_ASSERT_SET_1));
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2697 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2699 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701 /* CFC error attention */
2703 BNX2X_ERR("FATAL error from CFC\n");
2706 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2708 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710 /* RQ_USDMDP_FIFO_OVERFLOW */
2712 BNX2X_ERR("FATAL error from PXP\n");
2715 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2717 int port = BP_PORT(bp);
2720 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2723 val = REG_RD(bp, reg_offset);
2724 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725 REG_WR(bp, reg_offset, val);
2727 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728 (attn & HW_INTERRUT_ASSERT_SET_2));
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2737 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2739 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740 int func = BP_FUNC(bp);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743 bnx2x__link_status_update(bp);
2744 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2746 bnx2x_pmf_update(bp);
2748 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2750 BNX2X_ERR("MC assert!\n");
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2757 } else if (attn & BNX2X_MCP_ASSERT) {
2759 BNX2X_ERR("MCP assert!\n");
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2764 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2767 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769 if (attn & BNX2X_GRC_TIMEOUT) {
2770 val = CHIP_IS_E1H(bp) ?
2771 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2774 if (attn & BNX2X_GRC_RSV) {
2775 val = CHIP_IS_E1H(bp) ?
2776 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2779 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2785 struct attn_route attn;
2786 struct attn_route group_mask;
2787 int port = BP_PORT(bp);
2793 /* need to take HW lock because MCP or other port might also
2794 try to handle this event */
2795 bnx2x_acquire_alr(bp);
2797 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2804 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805 if (deasserted & (1 << index)) {
2806 group_mask = bp->attn_group[index];
2808 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809 index, group_mask.sig[0], group_mask.sig[1],
2810 group_mask.sig[2], group_mask.sig[3]);
2812 bnx2x_attn_int_deasserted3(bp,
2813 attn.sig[3] & group_mask.sig[3]);
2814 bnx2x_attn_int_deasserted1(bp,
2815 attn.sig[1] & group_mask.sig[1]);
2816 bnx2x_attn_int_deasserted2(bp,
2817 attn.sig[2] & group_mask.sig[2]);
2818 bnx2x_attn_int_deasserted0(bp,
2819 attn.sig[0] & group_mask.sig[0]);
2821 if ((attn.sig[0] & group_mask.sig[0] &
2822 HW_PRTY_ASSERT_SET_0) ||
2823 (attn.sig[1] & group_mask.sig[1] &
2824 HW_PRTY_ASSERT_SET_1) ||
2825 (attn.sig[2] & group_mask.sig[2] &
2826 HW_PRTY_ASSERT_SET_2))
2827 BNX2X_ERR("FATAL HW block parity attention\n");
2831 bnx2x_release_alr(bp);
2833 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2836 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2838 REG_WR(bp, reg_addr, val);
2840 if (~bp->attn_state & deasserted)
2841 BNX2X_ERR("IGU ERROR\n");
2843 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847 aeu_mask = REG_RD(bp, reg_addr);
2849 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2850 aeu_mask, deasserted);
2851 aeu_mask |= (deasserted & 0xff);
2852 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2854 REG_WR(bp, reg_addr, aeu_mask);
2855 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2857 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858 bp->attn_state &= ~deasserted;
2859 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2864 /* read local copy of bits */
2865 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2869 u32 attn_state = bp->attn_state;
2871 /* look for changed bits */
2872 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2873 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2876 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2877 attn_bits, attn_ack, asserted, deasserted);
2879 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880 BNX2X_ERR("BAD attention state\n");
2882 /* handle bits that were raised */
2884 bnx2x_attn_int_asserted(bp, asserted);
2887 bnx2x_attn_int_deasserted(bp, deasserted);
2890 static void bnx2x_sp_task(struct work_struct *work)
2892 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2902 status = bnx2x_update_dsb_idx(bp);
2903 /* if (status == 0) */
2904 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2906 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2914 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2916 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2920 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2927 struct net_device *dev = dev_instance;
2928 struct bnx2x *bp = netdev_priv(dev);
2930 /* Return here if interrupt is disabled */
2931 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2936 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2938 #ifdef BNX2X_STOP_ON_ERROR
2939 if (unlikely(bp->panic))
2943 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2948 /* end of slow path */
2952 /****************************************************************************
2954 ****************************************************************************/
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2960 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2966 if (m_lo < s_lo) { \
2968 d_hi = m_hi - s_hi; \
2970 /* we can 'loan' 1 */ \
2972 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2974 /* m_hi <= s_hi */ \
2979 /* m_lo >= s_lo */ \
2980 if (m_hi < s_hi) { \
2984 /* m_hi >= s_hi */ \
2985 d_hi = m_hi - s_hi; \
2986 d_lo = m_lo - s_lo; \
2991 #define UPDATE_STAT64(s, t) \
2993 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998 pstats->mac_stx[1].t##_lo, diff.lo); \
3001 #define UPDATE_STAT64_NIG(s, t) \
3003 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004 diff.lo, new->s##_lo, old->s##_lo); \
3005 ADD_64(estats->t##_hi, diff.hi, \
3006 estats->t##_lo, diff.lo); \
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3013 s_hi += (s_lo < a) ? 1 : 0; \
3016 #define UPDATE_EXTEND_STAT(s) \
3018 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019 pstats->mac_stx[1].s##_lo, \
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3025 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026 old_tclient->s = tclient->s; \
3027 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3032 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033 old_uclient->s = uclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3039 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040 old_xclient->s = xclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3047 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3053 SUB_64(m_hi, 0, m_lo, s); \
3056 #define SUB_EXTEND_USTAT(s, t) \
3058 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3063 * General service functions
3066 static inline long bnx2x_hilo(u32 *hiref)
3068 u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3072 return HILO_U64(hi, lo);
3079 * Init service functions
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3084 if (!bp->stats_pending) {
3085 struct eth_query_ramrod_data ramrod_data = {0};
3088 ramrod_data.drv_counter = bp->stats_counter++;
3089 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090 for_each_queue(bp, i)
3091 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3093 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094 ((u32 *)&ramrod_data)[1],
3095 ((u32 *)&ramrod_data)[0], 0);
3097 /* stats ramrod has it's own slot on the spq */
3099 bp->stats_pending = 1;
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3106 int port = BP_PORT(bp);
3109 bp->stats_pending = 0;
3110 bp->executer_idx = 0;
3111 bp->stats_counter = 0;
3115 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3117 bp->port.port_stx = 0;
3118 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3120 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121 bp->port.old_nig_stats.brb_discard =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123 bp->port.old_nig_stats.brb_truncate =
3124 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3130 /* function stats */
3131 for_each_queue(bp, i) {
3132 struct bnx2x_fastpath *fp = &bp->fp[i];
3134 memset(&fp->old_tclient, 0,
3135 sizeof(struct tstorm_per_client_stats));
3136 memset(&fp->old_uclient, 0,
3137 sizeof(struct ustorm_per_client_stats));
3138 memset(&fp->old_xclient, 0,
3139 sizeof(struct xstorm_per_client_stats));
3140 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3143 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3146 bp->stats_state = STATS_STATE_DISABLED;
3147 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3153 struct dmae_command *dmae = &bp->stats_dmae;
3154 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3156 *stats_comp = DMAE_COMP_VAL;
3157 if (CHIP_REV_IS_SLOW(bp))
3161 if (bp->executer_idx) {
3162 int loader_idx = PMF_DMAE_C(bp);
3164 memset(dmae, 0, sizeof(struct dmae_command));
3166 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168 DMAE_CMD_DST_RESET |
3170 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172 DMAE_CMD_ENDIANITY_DW_SWAP |
3174 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3176 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180 sizeof(struct dmae_command) *
3181 (loader_idx + 1)) >> 2;
3182 dmae->dst_addr_hi = 0;
3183 dmae->len = sizeof(struct dmae_command) >> 2;
3186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187 dmae->comp_addr_hi = 0;
3191 bnx2x_post_dmae(bp, dmae, loader_idx);
3193 } else if (bp->func_stx) {
3195 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3201 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3205 while (*stats_comp != DMAE_COMP_VAL) {
3207 BNX2X_ERR("timeout waiting for stats finished\n");
3217 * Statistics service functions
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3222 struct dmae_command *dmae;
3224 int loader_idx = PMF_DMAE_C(bp);
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3228 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229 BNX2X_ERR("BUG!\n");
3233 bp->executer_idx = 0;
3235 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 DMAE_CMD_ENDIANITY_DW_SWAP |
3243 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248 dmae->src_addr_lo = bp->port.port_stx >> 2;
3249 dmae->src_addr_hi = 0;
3250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->len = DMAE_LEN32_RD_MAX;
3253 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254 dmae->comp_addr_hi = 0;
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260 dmae->src_addr_hi = 0;
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264 DMAE_LEN32_RD_MAX * 4);
3265 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_val = DMAE_COMP_VAL;
3271 bnx2x_hw_stats_post(bp);
3272 bnx2x_stats_comp(bp);
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3277 struct dmae_command *dmae;
3278 int port = BP_PORT(bp);
3279 int vn = BP_E1HVN(bp);
3281 int loader_idx = PMF_DMAE_C(bp);
3283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3286 if (!bp->link_vars.link_up || !bp->port.pmf) {
3287 BNX2X_ERR("BUG!\n");
3291 bp->executer_idx = 0;
3294 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3300 DMAE_CMD_ENDIANITY_DW_SWAP |
3302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303 (vn << DMAE_CMD_E1HVN_SHIFT));
3305 if (bp->port.port_stx) {
3307 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308 dmae->opcode = opcode;
3309 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312 dmae->dst_addr_hi = 0;
3313 dmae->len = sizeof(struct host_port_stats) >> 2;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->dst_addr_lo = bp->func_stx >> 2;
3326 dmae->dst_addr_hi = 0;
3327 dmae->len = sizeof(struct host_func_stats) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3334 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3338 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3340 DMAE_CMD_ENDIANITY_DW_SWAP |
3342 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343 (vn << DMAE_CMD_E1HVN_SHIFT));
3345 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3347 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348 NIG_REG_INGRESS_BMAC0_MEM);
3350 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351 BIGMAC_REGISTER_TX_STAT_GTBYT */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3365 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379 dmae->comp_addr_hi = 0;
3382 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3384 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3386 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3399 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401 dmae->opcode = opcode;
3402 dmae->src_addr_lo = (mac_addr +
3403 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3410 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411 dmae->comp_addr_hi = 0;
3414 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (mac_addr +
3418 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432 dmae->opcode = opcode;
3433 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435 dmae->src_addr_hi = 0;
3436 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440 dmae->comp_addr_hi = 0;
3443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444 dmae->opcode = opcode;
3445 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447 dmae->src_addr_hi = 0;
3448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452 dmae->len = (2*sizeof(u32)) >> 2;
3453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454 dmae->comp_addr_hi = 0;
3457 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464 DMAE_CMD_ENDIANITY_DW_SWAP |
3466 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467 (vn << DMAE_CMD_E1HVN_SHIFT));
3468 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470 dmae->src_addr_hi = 0;
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475 dmae->len = (2*sizeof(u32)) >> 2;
3476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_val = DMAE_COMP_VAL;
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3485 struct dmae_command *dmae = &bp->stats_dmae;
3486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3489 if (!bp->func_stx) {
3490 BNX2X_ERR("BUG!\n");
3494 bp->executer_idx = 0;
3495 memset(dmae, 0, sizeof(struct dmae_command));
3497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->dst_addr_lo = bp->func_stx >> 2;
3510 dmae->dst_addr_hi = 0;
3511 dmae->len = sizeof(struct host_func_stats) >> 2;
3512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_val = DMAE_COMP_VAL;
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3522 bnx2x_port_stats_init(bp);
3524 else if (bp->func_stx)
3525 bnx2x_func_stats_init(bp);
3527 bnx2x_hw_stats_post(bp);
3528 bnx2x_storm_stats_post(bp);
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3533 bnx2x_stats_comp(bp);
3534 bnx2x_stats_pmf_update(bp);
3535 bnx2x_stats_start(bp);
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_start(bp);
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3546 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3554 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566 UPDATE_STAT64(tx_stat_gt127,
3567 tx_stat_etherstatspkts65octetsto127octets);
3568 UPDATE_STAT64(tx_stat_gt255,
3569 tx_stat_etherstatspkts128octetsto255octets);
3570 UPDATE_STAT64(tx_stat_gt511,
3571 tx_stat_etherstatspkts256octetsto511octets);
3572 UPDATE_STAT64(tx_stat_gt1023,
3573 tx_stat_etherstatspkts512octetsto1023octets);
3574 UPDATE_STAT64(tx_stat_gt1518,
3575 tx_stat_etherstatspkts1024octetsto1522octets);
3576 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580 UPDATE_STAT64(tx_stat_gterr,
3581 tx_stat_dot3statsinternalmactransmiterrors);
3582 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3584 estats->pause_frames_received_hi =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586 estats->pause_frames_received_lo =
3587 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3589 estats->pause_frames_sent_hi =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591 estats->pause_frames_sent_lo =
3592 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3597 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3601 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3633 estats->pause_frames_received_hi =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635 estats->pause_frames_received_lo =
3636 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637 ADD_64(estats->pause_frames_received_hi,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639 estats->pause_frames_received_lo,
3640 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3642 estats->pause_frames_sent_hi =
3643 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644 estats->pause_frames_sent_lo =
3645 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646 ADD_64(estats->pause_frames_sent_hi,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648 estats->pause_frames_sent_lo,
3649 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3654 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655 struct nig_stats *old = &(bp->port.old_nig_stats);
3656 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3664 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665 bnx2x_bmac_stats_update(bp);
3667 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668 bnx2x_emac_stats_update(bp);
3670 else { /* unreached */
3671 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3675 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676 new->brb_discard - old->brb_discard);
3677 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678 new->brb_truncate - old->brb_truncate);
3680 UPDATE_STAT64_NIG(egress_mac_pkt0,
3681 etherstatspkts1024octetsto1522octets);
3682 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3684 memcpy(old, new, sizeof(struct nig_stats));
3686 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687 sizeof(struct mac_stx));
3688 estats->brb_drop_hi = pstats->brb_drop_hi;
3689 estats->brb_drop_lo = pstats->brb_drop_lo;
3691 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3693 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694 if (nig_timer_max != estats->nig_timer_max) {
3695 estats->nig_timer_max = nig_timer_max;
3696 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3704 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705 struct tstorm_per_port_stats *tport =
3706 &stats->tstorm_common.port_statistics;
3707 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3711 memset(&(fstats->total_bytes_received_hi), 0,
3712 sizeof(struct host_func_stats) - 2*sizeof(u32));
3713 estats->error_bytes_received_hi = 0;
3714 estats->error_bytes_received_lo = 0;
3715 estats->etherstatsoverrsizepkts_hi = 0;
3716 estats->etherstatsoverrsizepkts_lo = 0;
3717 estats->no_buff_discard_hi = 0;
3718 estats->no_buff_discard_lo = 0;
3720 for_each_queue(bp, i) {
3721 struct bnx2x_fastpath *fp = &bp->fp[i];
3722 int cl_id = fp->cl_id;
3723 struct tstorm_per_client_stats *tclient =
3724 &stats->tstorm_common.client_statistics[cl_id];
3725 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726 struct ustorm_per_client_stats *uclient =
3727 &stats->ustorm_common.client_statistics[cl_id];
3728 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729 struct xstorm_per_client_stats *xclient =
3730 &stats->xstorm_common.client_statistics[cl_id];
3731 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3735 /* are storm stats valid? */
3736 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737 bp->stats_counter) {
3738 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739 " xstorm counter (%d) != stats_counter (%d)\n",
3740 i, xclient->stats_counter, bp->stats_counter);
3743 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744 bp->stats_counter) {
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746 " tstorm counter (%d) != stats_counter (%d)\n",
3747 i, tclient->stats_counter, bp->stats_counter);
3750 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753 " ustorm counter (%d) != stats_counter (%d)\n",
3754 i, uclient->stats_counter, bp->stats_counter);
3758 qstats->total_bytes_received_hi =
3759 qstats->valid_bytes_received_hi =
3760 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761 qstats->total_bytes_received_lo =
3762 qstats->valid_bytes_received_lo =
3763 le32_to_cpu(tclient->total_rcv_bytes.lo);
3765 qstats->error_bytes_received_hi =
3766 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767 qstats->error_bytes_received_lo =
3768 le32_to_cpu(tclient->rcv_error_bytes.lo);
3770 ADD_64(qstats->total_bytes_received_hi,
3771 qstats->error_bytes_received_hi,
3772 qstats->total_bytes_received_lo,
3773 qstats->error_bytes_received_lo);
3775 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776 total_unicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778 total_multicast_packets_received);
3779 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780 total_broadcast_packets_received);
3781 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782 etherstatsoverrsizepkts);
3783 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3785 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786 total_unicast_packets_received);
3787 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788 total_multicast_packets_received);
3789 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790 total_broadcast_packets_received);
3791 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3795 qstats->total_bytes_transmitted_hi =
3796 le32_to_cpu(xclient->total_sent_bytes.hi);
3797 qstats->total_bytes_transmitted_lo =
3798 le32_to_cpu(xclient->total_sent_bytes.lo);
3800 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801 total_unicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803 total_multicast_packets_transmitted);
3804 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805 total_broadcast_packets_transmitted);
3807 old_tclient->checksum_discard = tclient->checksum_discard;
3808 old_tclient->ttl0_discard = tclient->ttl0_discard;
3810 ADD_64(fstats->total_bytes_received_hi,
3811 qstats->total_bytes_received_hi,
3812 fstats->total_bytes_received_lo,
3813 qstats->total_bytes_received_lo);
3814 ADD_64(fstats->total_bytes_transmitted_hi,
3815 qstats->total_bytes_transmitted_hi,
3816 fstats->total_bytes_transmitted_lo,
3817 qstats->total_bytes_transmitted_lo);
3818 ADD_64(fstats->total_unicast_packets_received_hi,
3819 qstats->total_unicast_packets_received_hi,
3820 fstats->total_unicast_packets_received_lo,
3821 qstats->total_unicast_packets_received_lo);
3822 ADD_64(fstats->total_multicast_packets_received_hi,
3823 qstats->total_multicast_packets_received_hi,
3824 fstats->total_multicast_packets_received_lo,
3825 qstats->total_multicast_packets_received_lo);
3826 ADD_64(fstats->total_broadcast_packets_received_hi,
3827 qstats->total_broadcast_packets_received_hi,
3828 fstats->total_broadcast_packets_received_lo,
3829 qstats->total_broadcast_packets_received_lo);
3830 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831 qstats->total_unicast_packets_transmitted_hi,
3832 fstats->total_unicast_packets_transmitted_lo,
3833 qstats->total_unicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835 qstats->total_multicast_packets_transmitted_hi,
3836 fstats->total_multicast_packets_transmitted_lo,
3837 qstats->total_multicast_packets_transmitted_lo);
3838 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839 qstats->total_broadcast_packets_transmitted_hi,
3840 fstats->total_broadcast_packets_transmitted_lo,
3841 qstats->total_broadcast_packets_transmitted_lo);
3842 ADD_64(fstats->valid_bytes_received_hi,
3843 qstats->valid_bytes_received_hi,
3844 fstats->valid_bytes_received_lo,
3845 qstats->valid_bytes_received_lo);
3847 ADD_64(estats->error_bytes_received_hi,
3848 qstats->error_bytes_received_hi,
3849 estats->error_bytes_received_lo,
3850 qstats->error_bytes_received_lo);
3851 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852 qstats->etherstatsoverrsizepkts_hi,
3853 estats->etherstatsoverrsizepkts_lo,
3854 qstats->etherstatsoverrsizepkts_lo);
3855 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3859 ADD_64(fstats->total_bytes_received_hi,
3860 estats->rx_stat_ifhcinbadoctets_hi,
3861 fstats->total_bytes_received_lo,
3862 estats->rx_stat_ifhcinbadoctets_lo);
3864 memcpy(estats, &(fstats->total_bytes_received_hi),
3865 sizeof(struct host_func_stats) - 2*sizeof(u32));
3867 ADD_64(estats->etherstatsoverrsizepkts_hi,
3868 estats->rx_stat_dot3statsframestoolong_hi,
3869 estats->etherstatsoverrsizepkts_lo,
3870 estats->rx_stat_dot3statsframestoolong_lo);
3871 ADD_64(estats->error_bytes_received_hi,
3872 estats->rx_stat_ifhcinbadoctets_hi,
3873 estats->error_bytes_received_lo,
3874 estats->rx_stat_ifhcinbadoctets_lo);
3877 estats->mac_filter_discard =
3878 le32_to_cpu(tport->mac_filter_discard);
3879 estats->xxoverflow_discard =
3880 le32_to_cpu(tport->xxoverflow_discard);
3881 estats->brb_truncate_discard =
3882 le32_to_cpu(tport->brb_truncate_discard);
3883 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3886 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3888 bp->stats_pending = 0;
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3895 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896 struct net_device_stats *nstats = &bp->dev->stats;
3899 nstats->rx_packets =
3900 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3904 nstats->tx_packets =
3905 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3909 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3911 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3913 nstats->rx_dropped = estats->mac_discard;
3914 for_each_queue(bp, i)
3915 nstats->rx_dropped +=
3916 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3918 nstats->tx_dropped = 0;
3921 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3923 nstats->collisions =
3924 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3926 nstats->rx_length_errors =
3927 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930 bnx2x_hilo(&estats->brb_truncate_hi);
3931 nstats->rx_crc_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933 nstats->rx_frame_errors =
3934 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936 nstats->rx_missed_errors = estats->xxoverflow_discard;
3938 nstats->rx_errors = nstats->rx_length_errors +
3939 nstats->rx_over_errors +
3940 nstats->rx_crc_errors +
3941 nstats->rx_frame_errors +
3942 nstats->rx_fifo_errors +
3943 nstats->rx_missed_errors;
3945 nstats->tx_aborted_errors =
3946 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948 nstats->tx_carrier_errors =
3949 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950 nstats->tx_fifo_errors = 0;
3951 nstats->tx_heartbeat_errors = 0;
3952 nstats->tx_window_errors = 0;
3954 nstats->tx_errors = nstats->tx_aborted_errors +
3955 nstats->tx_carrier_errors +
3956 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3964 estats->driver_xoff = 0;
3965 estats->rx_err_discard_pkt = 0;
3966 estats->rx_skb_alloc_failed = 0;
3967 estats->hw_csum_err = 0;
3968 for_each_queue(bp, i) {
3969 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3971 estats->driver_xoff += qstats->driver_xoff;
3972 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974 estats->hw_csum_err += qstats->hw_csum_err;
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3980 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3982 if (*stats_comp != DMAE_COMP_VAL)
3986 bnx2x_hw_stats_update(bp);
3988 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989 BNX2X_ERR("storm stats were not updated for 3 times\n");
3994 bnx2x_net_stats_update(bp);
3995 bnx2x_drv_stats_update(bp);
3997 if (bp->msglevel & NETIF_MSG_TIMER) {
3998 struct tstorm_per_client_stats *old_tclient =
3999 &bp->fp->old_tclient;
4000 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002 struct net_device_stats *nstats = &bp->dev->stats;
4005 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4008 bnx2x_tx_avail(bp->fp),
4009 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4012 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013 bp->fp->rx_comp_cons),
4014 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4016 "brb truncate %u\n",
4017 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018 qstats->driver_xoff,
4019 estats->brb_drop_lo, estats->brb_truncate_lo);
4020 printk(KERN_DEBUG "tstats: checksum_discard %u "
4021 "packets_too_big_discard %lu no_buff_discard %lu "
4022 "mac_discard %u mac_filter_discard %u "
4023 "xxovrflow_discard %u brb_truncate_discard %u "
4024 "ttl0_discard %u\n",
4025 le32_to_cpu(old_tclient->checksum_discard),
4026 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027 bnx2x_hilo(&qstats->no_buff_discard_hi),
4028 estats->mac_discard, estats->mac_filter_discard,
4029 estats->xxoverflow_discard, estats->brb_truncate_discard,
4030 le32_to_cpu(old_tclient->ttl0_discard));
4032 for_each_queue(bp, i) {
4033 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034 bnx2x_fp(bp, i, tx_pkt),
4035 bnx2x_fp(bp, i, rx_pkt),
4036 bnx2x_fp(bp, i, rx_calls));
4040 bnx2x_hw_stats_post(bp);
4041 bnx2x_storm_stats_post(bp);
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4046 struct dmae_command *dmae;
4048 int loader_idx = PMF_DMAE_C(bp);
4049 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4051 bp->executer_idx = 0;
4053 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4055 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4059 DMAE_CMD_ENDIANITY_DW_SWAP |
4061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4064 if (bp->port.port_stx) {
4066 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4070 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074 dmae->dst_addr_hi = 0;
4075 dmae->len = sizeof(struct host_port_stats) >> 2;
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4081 dmae->comp_addr_lo =
4082 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_addr_hi =
4084 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085 dmae->comp_val = DMAE_COMP_VAL;
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->dst_addr_lo = bp->func_stx >> 2;
4098 dmae->dst_addr_hi = 0;
4099 dmae->len = sizeof(struct host_func_stats) >> 2;
4100 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_val = DMAE_COMP_VAL;
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4112 bnx2x_stats_comp(bp);
4115 update = (bnx2x_hw_stats_update(bp) == 0);
4117 update |= (bnx2x_storm_stats_update(bp) == 0);
4120 bnx2x_net_stats_update(bp);
4123 bnx2x_port_stats_stop(bp);
4125 bnx2x_hw_stats_post(bp);
4126 bnx2x_stats_comp(bp);
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4134 static const struct {
4135 void (*action)(struct bnx2x *bp);
4136 enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4140 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4142 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4146 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4147 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4148 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4149 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4155 enum bnx2x_stats_state state = bp->stats_state;
4157 bnx2x_stats_stm[state][event].action(bp);
4158 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4160 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162 state, event, bp->stats_state);
4165 static void bnx2x_timer(unsigned long data)
4167 struct bnx2x *bp = (struct bnx2x *) data;
4169 if (!netif_running(bp->dev))
4172 if (atomic_read(&bp->intr_sem) != 0)
4176 struct bnx2x_fastpath *fp = &bp->fp[0];
4179 bnx2x_tx_int(fp, 1000);
4180 rc = bnx2x_rx_int(fp, 1000);
4183 if (!BP_NOMCP(bp)) {
4184 int func = BP_FUNC(bp);
4188 ++bp->fw_drv_pulse_wr_seq;
4189 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190 /* TBD - add SYSTEM_TIME */
4191 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4194 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195 MCP_PULSE_SEQ_MASK);
4196 /* The delta between driver pulse and mcp response
4197 * should be 1 (before mcp response) or 0 (after mcp response)
4199 if ((drv_pulse != mcp_pulse) &&
4200 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201 /* someone lost a heartbeat... */
4202 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203 drv_pulse, mcp_pulse);
4207 if ((bp->state == BNX2X_STATE_OPEN) ||
4208 (bp->state == BNX2X_STATE_DISABLED))
4209 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4212 mod_timer(&bp->timer, jiffies + bp->current_interval);
4215 /* end of Statistics */
4220 * nic init service functions
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4225 int port = BP_PORT(bp);
4227 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4228 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229 sizeof(struct ustorm_status_block)/4);
4230 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232 sizeof(struct cstorm_status_block)/4);
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236 dma_addr_t mapping, int sb_id)
4238 int port = BP_PORT(bp);
4239 int func = BP_FUNC(bp);
4244 section = ((u64)mapping) + offsetof(struct host_status_block,
4246 sb->u_status_block.status_block_id = sb_id;
4248 REG_WR(bp, BAR_USTRORM_INTMEM +
4249 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250 REG_WR(bp, BAR_USTRORM_INTMEM +
4251 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4253 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4256 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4261 section = ((u64)mapping) + offsetof(struct host_status_block,
4263 sb->c_status_block.status_block_id = sb_id;
4265 REG_WR(bp, BAR_CSTRORM_INTMEM +
4266 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267 REG_WR(bp, BAR_CSTRORM_INTMEM +
4268 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4270 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4273 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4277 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4282 int func = BP_FUNC(bp);
4284 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4285 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286 sizeof(struct ustorm_def_status_block)/4);
4287 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4288 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289 sizeof(struct cstorm_def_status_block)/4);
4290 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4291 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292 sizeof(struct xstorm_def_status_block)/4);
4293 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295 sizeof(struct tstorm_def_status_block)/4);
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299 struct host_def_status_block *def_sb,
4300 dma_addr_t mapping, int sb_id)
4302 int port = BP_PORT(bp);
4303 int func = BP_FUNC(bp);
4304 int index, val, reg_offset;
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 atten_status_block);
4310 def_sb->atten_status_block.status_block_id = sb_id;
4314 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4317 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318 bp->attn_group[index].sig[0] = REG_RD(bp,
4319 reg_offset + 0x10*index);
4320 bp->attn_group[index].sig[1] = REG_RD(bp,
4321 reg_offset + 0x4 + 0x10*index);
4322 bp->attn_group[index].sig[2] = REG_RD(bp,
4323 reg_offset + 0x8 + 0x10*index);
4324 bp->attn_group[index].sig[3] = REG_RD(bp,
4325 reg_offset + 0xc + 0x10*index);
4328 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329 HC_REG_ATTN_MSG0_ADDR_L);
4331 REG_WR(bp, reg_offset, U64_LO(section));
4332 REG_WR(bp, reg_offset + 4, U64_HI(section));
4334 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4336 val = REG_RD(bp, reg_offset);
4338 REG_WR(bp, reg_offset, val);
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 u_def_status_block);
4343 def_sb->u_def_status_block.status_block_id = sb_id;
4345 REG_WR(bp, BAR_USTRORM_INTMEM +
4346 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347 REG_WR(bp, BAR_USTRORM_INTMEM +
4348 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4350 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4353 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 c_def_status_block);
4360 def_sb->c_def_status_block.status_block_id = sb_id;
4362 REG_WR(bp, BAR_CSTRORM_INTMEM +
4363 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364 REG_WR(bp, BAR_CSTRORM_INTMEM +
4365 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4367 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4370 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4375 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376 t_def_status_block);
4377 def_sb->t_def_status_block.status_block_id = sb_id;
4379 REG_WR(bp, BAR_TSTRORM_INTMEM +
4380 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381 REG_WR(bp, BAR_TSTRORM_INTMEM +
4382 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4384 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4387 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4392 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393 x_def_status_block);
4394 def_sb->x_def_status_block.status_block_id = sb_id;
4396 REG_WR(bp, BAR_XSTRORM_INTMEM +
4397 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398 REG_WR(bp, BAR_XSTRORM_INTMEM +
4399 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4401 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4404 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4408 bp->stats_pending = 0;
4409 bp->set_mac_pending = 0;
4411 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4416 int port = BP_PORT(bp);
4419 for_each_queue(bp, i) {
4420 int sb_id = bp->fp[i].sb_id;
4422 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425 U_SB_ETH_RX_CQ_INDEX),
4427 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429 U_SB_ETH_RX_CQ_INDEX),
4430 bp->rx_ticks ? 0 : 1);
4432 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435 C_SB_ETH_TX_CQ_INDEX),
4437 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439 C_SB_ETH_TX_CQ_INDEX),
4440 bp->tx_ticks ? 0 : 1);
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445 struct bnx2x_fastpath *fp, int last)
4449 for (i = 0; i < last; i++) {
4450 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451 struct sk_buff *skb = rx_buf->skb;
4454 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4458 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459 pci_unmap_single(bp->pdev,
4460 pci_unmap_addr(rx_buf, mapping),
4461 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4470 int func = BP_FUNC(bp);
4471 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472 ETH_MAX_AGGREGATION_QUEUES_E1H;
4473 u16 ring_prod, cqe_ring_prod;
4476 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4478 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4480 if (bp->flags & TPA_ENABLE_FLAG) {
4482 for_each_rx_queue(bp, j) {
4483 struct bnx2x_fastpath *fp = &bp->fp[j];
4485 for (i = 0; i < max_agg_queues; i++) {
4486 fp->tpa_pool[i].skb =
4487 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488 if (!fp->tpa_pool[i].skb) {
4489 BNX2X_ERR("Failed to allocate TPA "
4490 "skb pool for queue[%d] - "
4491 "disabling TPA on this "
4493 bnx2x_free_tpa_pool(bp, fp, i);
4494 fp->disable_tpa = 1;
4497 pci_unmap_addr_set((struct sw_rx_bd *)
4498 &bp->fp->tpa_pool[i],
4500 fp->tpa_state[i] = BNX2X_TPA_STOP;
4505 for_each_rx_queue(bp, j) {
4506 struct bnx2x_fastpath *fp = &bp->fp[j];
4509 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4512 /* "next page" elements initialization */
4514 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515 struct eth_rx_sge *sge;
4517 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4519 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4526 bnx2x_init_sge_ring_bit_mask(fp);
4529 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530 struct eth_rx_bd *rx_bd;
4532 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4534 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4537 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4542 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543 struct eth_rx_cqe_next_page *nextpg;
4545 nextpg = (struct eth_rx_cqe_next_page *)
4546 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4548 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4551 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4555 /* Allocate SGEs and initialize the ring elements */
4556 for (i = 0, ring_prod = 0;
4557 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4559 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560 BNX2X_ERR("was only able to allocate "
4562 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563 /* Cleanup already allocated elements */
4564 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566 fp->disable_tpa = 1;
4570 ring_prod = NEXT_SGE_IDX(ring_prod);
4572 fp->rx_sge_prod = ring_prod;
4574 /* Allocate BDs and initialize BD ring */
4575 fp->rx_comp_cons = 0;
4576 cqe_ring_prod = ring_prod = 0;
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579 BNX2X_ERR("was only able to allocate "
4580 "%d rx skbs on queue[%d]\n", i, j);
4581 fp->eth_q_stats.rx_skb_alloc_failed++;
4584 ring_prod = NEXT_RX_IDX(ring_prod);
4585 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586 WARN_ON(ring_prod <= i);
4589 fp->rx_bd_prod = ring_prod;
4590 /* must not have more available CQEs than BDs */
4591 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4593 fp->rx_pkt = fp->rx_calls = 0;
4596 * this will generate an interrupt (to the TSTORM)
4597 * must only be done after chip is initialized
4599 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606 U64_LO(fp->rx_comp_mapping));
4607 REG_WR(bp, BAR_USTRORM_INTMEM +
4608 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609 U64_HI(fp->rx_comp_mapping));
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4617 for_each_tx_queue(bp, j) {
4618 struct bnx2x_fastpath *fp = &bp->fp[j];
4620 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621 struct eth_tx_bd *tx_bd =
4622 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4625 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4628 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4632 fp->tx_pkt_prod = 0;
4633 fp->tx_pkt_cons = 0;
4636 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4643 int func = BP_FUNC(bp);
4645 spin_lock_init(&bp->spq_lock);
4647 bp->spq_left = MAX_SPQ_PENDING;
4648 bp->spq_prod_idx = 0;
4649 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650 bp->spq_prod_bd = bp->spq;
4651 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4653 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654 U64_LO(bp->spq_mapping));
4656 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657 U64_HI(bp->spq_mapping));
4659 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4663 static void bnx2x_init_context(struct bnx2x *bp)
4667 for_each_queue(bp, i) {
4668 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669 struct bnx2x_fastpath *fp = &bp->fp[i];
4670 u8 cl_id = fp->cl_id;
4671 u8 sb_id = fp->sb_id;
4673 context->ustorm_st_context.common.sb_index_numbers =
4674 BNX2X_RX_SB_INDEX_NUM;
4675 context->ustorm_st_context.common.clientId = cl_id;
4676 context->ustorm_st_context.common.status_block_id = sb_id;
4677 context->ustorm_st_context.common.flags =
4678 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680 context->ustorm_st_context.common.statistics_counter_id =
4682 context->ustorm_st_context.common.mc_alignment_log_size =
4683 BNX2X_RX_ALIGN_SHIFT;
4684 context->ustorm_st_context.common.bd_buff_size =
4686 context->ustorm_st_context.common.bd_page_base_hi =
4687 U64_HI(fp->rx_desc_mapping);
4688 context->ustorm_st_context.common.bd_page_base_lo =
4689 U64_LO(fp->rx_desc_mapping);
4690 if (!fp->disable_tpa) {
4691 context->ustorm_st_context.common.flags |=
4692 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694 context->ustorm_st_context.common.sge_buff_size =
4695 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4697 context->ustorm_st_context.common.sge_page_base_hi =
4698 U64_HI(fp->rx_sge_mapping);
4699 context->ustorm_st_context.common.sge_page_base_lo =
4700 U64_LO(fp->rx_sge_mapping);
4703 context->ustorm_ag_context.cdu_usage =
4704 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705 CDU_REGION_NUMBER_UCM_AG,
4706 ETH_CONNECTION_TYPE);
4708 context->xstorm_st_context.tx_bd_page_base_hi =
4709 U64_HI(fp->tx_desc_mapping);
4710 context->xstorm_st_context.tx_bd_page_base_lo =
4711 U64_LO(fp->tx_desc_mapping);
4712 context->xstorm_st_context.db_data_addr_hi =
4713 U64_HI(fp->tx_prods_mapping);
4714 context->xstorm_st_context.db_data_addr_lo =
4715 U64_LO(fp->tx_prods_mapping);
4716 context->xstorm_st_context.statistics_data = (cl_id |
4717 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718 context->cstorm_st_context.sb_index_number =
4719 C_SB_ETH_TX_CQ_INDEX;
4720 context->cstorm_st_context.status_block_id = sb_id;
4722 context->xstorm_ag_context.cdu_reserved =
4723 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724 CDU_REGION_NUMBER_XCM_AG,
4725 ETH_CONNECTION_TYPE);
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4731 int func = BP_FUNC(bp);
4734 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4738 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4739 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742 bp->fp->cl_id + (i % bp->num_rx_queues));
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4747 struct tstorm_eth_client_config tstorm_client = {0};
4748 int port = BP_PORT(bp);
4751 tstorm_client.mtu = bp->dev->mtu;
4752 tstorm_client.config_flags =
4753 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4756 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757 tstorm_client.config_flags |=
4758 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4763 if (bp->flags & TPA_ENABLE_FLAG) {
4764 tstorm_client.max_sges_for_packet =
4765 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766 tstorm_client.max_sges_for_packet =
4767 ((tstorm_client.max_sges_for_packet +
4768 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769 PAGES_PER_SGE_SHIFT;
4771 tstorm_client.config_flags |=
4772 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4775 for_each_queue(bp, i) {
4776 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780 ((u32 *)&tstorm_client)[0]);
4781 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783 ((u32 *)&tstorm_client)[1]);
4786 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4792 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793 int mode = bp->rx_mode;
4794 int mask = (1 << BP_L_ID(bp));
4795 int func = BP_FUNC(bp);
4798 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4801 case BNX2X_RX_MODE_NONE: /* no Rx */
4802 tstorm_mac_filter.ucast_drop_all = mask;
4803 tstorm_mac_filter.mcast_drop_all = mask;
4804 tstorm_mac_filter.bcast_drop_all = mask;
4807 case BNX2X_RX_MODE_NORMAL:
4808 tstorm_mac_filter.bcast_accept_all = mask;
4811 case BNX2X_RX_MODE_ALLMULTI:
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
4816 case BNX2X_RX_MODE_PROMISC:
4817 tstorm_mac_filter.ucast_accept_all = mask;
4818 tstorm_mac_filter.mcast_accept_all = mask;
4819 tstorm_mac_filter.bcast_accept_all = mask;
4823 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4827 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830 ((u32 *)&tstorm_mac_filter)[i]);
4832 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833 ((u32 *)&tstorm_mac_filter)[i]); */
4836 if (mode != BNX2X_RX_MODE_NONE)
4837 bnx2x_set_client_config(bp);
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4844 if (bp->flags & TPA_ENABLE_FLAG) {
4845 struct tstorm_eth_tpa_exist tpa = {0};
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4851 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4855 /* Zero this manually as its initialization is
4856 currently missing in the initTool */
4857 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858 REG_WR(bp, BAR_USTRORM_INTMEM +
4859 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4864 int port = BP_PORT(bp);
4866 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4872 /* Calculates the sum of vn_min_rates.
4873 It's needed for further normalizing of the min_rates.
4875 sum of vn_min_rates.
4877 0 - if all the min_rates are 0.
4878 In the later case fainess algorithm should be deactivated.
4879 If not all min_rates are zero then those that are zeroes will be set to 1.
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4884 int port = BP_PORT(bp);
4887 bp->vn_weight_sum = 0;
4888 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889 int func = 2*vn + port;
4891 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4895 /* Skip hidden vns */
4896 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4899 /* If min rate is zero - set it to 1 */
4901 vn_min_rate = DEF_MIN_RATE;
4905 bp->vn_weight_sum += vn_min_rate;
4908 /* ... only if all min rates are zeros - disable fairness */
4910 bp->vn_weight_sum = 0;
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4915 struct tstorm_eth_function_common_config tstorm_config = {0};
4916 struct stats_indication_flags stats_flags = {0};
4917 int port = BP_PORT(bp);
4918 int func = BP_FUNC(bp);
4924 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925 tstorm_config.rss_result_mask = MULTI_MASK;
4928 tstorm_config.config_flags |=
4929 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4931 tstorm_config.leading_client_id = BP_L_ID(bp);
4933 REG_WR(bp, BAR_TSTRORM_INTMEM +
4934 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935 (*(u32 *)&tstorm_config));
4937 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938 bnx2x_set_storm_rx_mode(bp);
4940 for_each_queue(bp, i) {
4941 u8 cl_id = bp->fp[i].cl_id;
4943 /* reset xstorm per client statistics */
4944 offset = BAR_XSTRORM_INTMEM +
4945 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4950 /* reset tstorm per client statistics */
4951 offset = BAR_TSTRORM_INTMEM +
4952 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4957 /* reset ustorm per client statistics */
4958 offset = BAR_USTRORM_INTMEM +
4959 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4965 /* Init statistics related context */
4966 stats_flags.collect_eth = 1;
4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969 ((u32 *)&stats_flags)[0]);
4970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971 ((u32 *)&stats_flags)[1]);
4973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974 ((u32 *)&stats_flags)[0]);
4975 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976 ((u32 *)&stats_flags)[1]);
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979 ((u32 *)&stats_flags)[0]);
4980 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981 ((u32 *)&stats_flags)[1]);
4983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984 ((u32 *)&stats_flags)[0]);
4985 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986 ((u32 *)&stats_flags)[1]);
4988 REG_WR(bp, BAR_XSTRORM_INTMEM +
4989 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995 REG_WR(bp, BAR_TSTRORM_INTMEM +
4996 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_TSTRORM_INTMEM +
4999 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002 REG_WR(bp, BAR_USTRORM_INTMEM +
5003 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_USTRORM_INTMEM +
5006 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009 if (CHIP_IS_E1H(bp)) {
5010 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5012 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5014 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5016 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5019 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5023 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5025 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026 SGE_PAGE_SIZE * PAGES_PER_SGE),
5028 for_each_rx_queue(bp, i) {
5029 struct bnx2x_fastpath *fp = &bp->fp[i];
5031 REG_WR(bp, BAR_USTRORM_INTMEM +
5032 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033 U64_LO(fp->rx_comp_mapping));
5034 REG_WR(bp, BAR_USTRORM_INTMEM +
5035 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036 U64_HI(fp->rx_comp_mapping));
5038 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5043 /* dropless flow control */
5044 if (CHIP_IS_E1H(bp)) {
5045 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5047 rx_pause.bd_thr_low = 250;
5048 rx_pause.cqe_thr_low = 250;
5050 rx_pause.sge_thr_low = 0;
5051 rx_pause.bd_thr_high = 350;
5052 rx_pause.cqe_thr_high = 350;
5053 rx_pause.sge_thr_high = 0;
5055 for_each_rx_queue(bp, i) {
5056 struct bnx2x_fastpath *fp = &bp->fp[i];
5058 if (!fp->disable_tpa) {
5059 rx_pause.sge_thr_low = 150;
5060 rx_pause.sge_thr_high = 250;
5064 offset = BAR_USTRORM_INTMEM +
5065 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5068 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5070 REG_WR(bp, offset + j*4,
5071 ((u32 *)&rx_pause)[j]);
5075 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5077 /* Init rate shaping and fairness contexts */
5081 /* During init there is no active link
5082 Until link is up, set link rate to 10Gbps */
5083 bp->link_vars.line_speed = SPEED_10000;
5084 bnx2x_init_port_minmax(bp);
5086 bnx2x_calc_vn_weight_sum(bp);
5088 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089 bnx2x_init_vn_minmax(bp, 2*vn + port);
5091 /* Enable rate shaping and fairness */
5092 bp->cmng.flags.cmng_enables =
5093 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094 if (bp->vn_weight_sum)
5095 bp->cmng.flags.cmng_enables |=
5096 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5098 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099 " fairness will be disabled\n");
5101 /* rate shaping and fairness are disabled */
5103 "single function mode minmax will be disabled\n");
5107 /* Store it to internal memory */
5109 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110 REG_WR(bp, BAR_XSTRORM_INTMEM +
5111 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112 ((u32 *)(&bp->cmng))[i]);
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5117 switch (load_code) {
5118 case FW_MSG_CODE_DRV_LOAD_COMMON:
5119 bnx2x_init_internal_common(bp);
5122 case FW_MSG_CODE_DRV_LOAD_PORT:
5123 bnx2x_init_internal_port(bp);
5126 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127 bnx2x_init_internal_func(bp);
5131 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5140 for_each_queue(bp, i) {
5141 struct bnx2x_fastpath *fp = &bp->fp[i];
5144 fp->state = BNX2X_FP_STATE_CLOSED;
5146 fp->cl_id = BP_L_ID(bp) + i;
5147 fp->sb_id = fp->cl_id;
5149 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5150 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5153 bnx2x_update_fpsb_idx(fp);
5156 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5158 bnx2x_update_dsb_idx(bp);
5159 bnx2x_update_coalesce(bp);
5160 bnx2x_init_rx_rings(bp);
5161 bnx2x_init_tx_ring(bp);
5162 bnx2x_init_sp_ring(bp);
5163 bnx2x_init_context(bp);
5164 bnx2x_init_internal(bp, load_code);
5165 bnx2x_init_ind_table(bp);
5166 bnx2x_stats_init(bp);
5168 /* At this point, we are ready for interrupts */
5169 atomic_set(&bp->intr_sem, 0);
5171 /* flush all before enabling interrupts */
5175 bnx2x_int_enable(bp);
5178 /* end of nic init */
5181 * gzip service functions
5184 static int bnx2x_gunzip_init(struct bnx2x *bp)
5186 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5187 &bp->gunzip_mapping);
5188 if (bp->gunzip_buf == NULL)
5191 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5192 if (bp->strm == NULL)
5195 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5197 if (bp->strm->workspace == NULL)
5207 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5208 bp->gunzip_mapping);
5209 bp->gunzip_buf = NULL;
5212 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5213 " un-compression\n", bp->dev->name);
5217 static void bnx2x_gunzip_end(struct bnx2x *bp)
5219 kfree(bp->strm->workspace);
5224 if (bp->gunzip_buf) {
5225 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5226 bp->gunzip_mapping);
5227 bp->gunzip_buf = NULL;
5231 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5235 /* check gzip header */
5236 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5243 if (zbuf[3] & FNAME)
5244 while ((zbuf[n++] != 0) && (n < len));
5246 bp->strm->next_in = zbuf + n;
5247 bp->strm->avail_in = len - n;
5248 bp->strm->next_out = bp->gunzip_buf;
5249 bp->strm->avail_out = FW_BUF_SIZE;
5251 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5255 rc = zlib_inflate(bp->strm, Z_FINISH);
5256 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5257 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5258 bp->dev->name, bp->strm->msg);
5260 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5261 if (bp->gunzip_outlen & 0x3)
5262 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5263 " gunzip_outlen (%d) not aligned\n",
5264 bp->dev->name, bp->gunzip_outlen);
5265 bp->gunzip_outlen >>= 2;
5267 zlib_inflateEnd(bp->strm);
5269 if (rc == Z_STREAM_END)
5275 /* nic load/unload */
5278 * General service functions
5281 /* send a NIG loopback debug packet */
5282 static void bnx2x_lb_pckt(struct bnx2x *bp)
5286 /* Ethernet source and destination addresses */
5287 wb_write[0] = 0x55555555;
5288 wb_write[1] = 0x55555555;
5289 wb_write[2] = 0x20; /* SOP */
5290 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5292 /* NON-IP protocol */
5293 wb_write[0] = 0x09000000;
5294 wb_write[1] = 0x55555555;
5295 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5296 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5299 /* some of the internal memories
5300 * are not directly readable from the driver
5301 * to test them we send debug packets
5303 static int bnx2x_int_mem_test(struct bnx2x *bp)
5309 if (CHIP_REV_IS_FPGA(bp))
5311 else if (CHIP_REV_IS_EMUL(bp))
5316 DP(NETIF_MSG_HW, "start part1\n");
5318 /* Disable inputs of parser neighbor blocks */
5319 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5320 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5321 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5322 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5324 /* Write 0 to parser credits for CFC search request */
5325 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5327 /* send Ethernet packet */
5330 /* TODO do i reset NIG statistic? */
5331 /* Wait until NIG register shows 1 packet of size 0x10 */
5332 count = 1000 * factor;
5335 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5336 val = *bnx2x_sp(bp, wb_data[0]);
5344 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5348 /* Wait until PRS register shows 1 packet */
5349 count = 1000 * factor;
5351 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5359 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5363 /* Reset and init BRB, PRS */
5364 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5366 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5368 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5369 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5371 DP(NETIF_MSG_HW, "part2\n");
5373 /* Disable inputs of parser neighbor blocks */
5374 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5375 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5376 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5377 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5379 /* Write 0 to parser credits for CFC search request */
5380 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5382 /* send 10 Ethernet packets */
5383 for (i = 0; i < 10; i++)
5386 /* Wait until NIG register shows 10 + 1
5387 packets of size 11*0x10 = 0xb0 */
5388 count = 1000 * factor;
5391 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5392 val = *bnx2x_sp(bp, wb_data[0]);
5400 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5404 /* Wait until PRS register shows 2 packets */
5405 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5407 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409 /* Write 1 to parser credits for CFC search request */
5410 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5412 /* Wait until PRS register shows 3 packets */
5413 msleep(10 * factor);
5414 /* Wait until NIG register shows 1 packet of size 0x10 */
5415 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5417 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5419 /* clear NIG EOP FIFO */
5420 for (i = 0; i < 11; i++)
5421 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5422 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5424 BNX2X_ERR("clear of NIG failed\n");
5428 /* Reset and init BRB, PRS, NIG */
5429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5431 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5433 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5434 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5437 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5440 /* Enable inputs of parser neighbor blocks */
5441 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5442 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5443 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5444 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5446 DP(NETIF_MSG_HW, "done\n");
5451 static void enable_blocks_attention(struct bnx2x *bp)
5453 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5454 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5455 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5456 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5457 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5458 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5459 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5460 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5461 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5462 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5463 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5464 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5465 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5466 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5467 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5468 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5469 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5470 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5471 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5472 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5473 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5474 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5475 if (CHIP_REV_IS_FPGA(bp))
5476 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5478 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5479 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5480 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5481 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5482 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5483 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5484 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5485 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5486 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5487 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5491 static void bnx2x_reset_common(struct bnx2x *bp)
5494 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5496 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5499 static int bnx2x_init_common(struct bnx2x *bp)
5503 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5505 bnx2x_reset_common(bp);
5506 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5509 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5510 if (CHIP_IS_E1H(bp))
5511 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5515 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5517 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5518 if (CHIP_IS_E1(bp)) {
5519 /* enable HW interrupt from PXP on USDM overflow
5520 bit 16 on INT_MASK_0 */
5521 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5524 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5528 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5529 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5530 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5531 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5533 /* make sure this value is 0 */
5534 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5536 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5537 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5538 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5539 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5540 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5545 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5546 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5547 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5550 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5551 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5553 /* let the HW do it's magic ... */
5555 /* finish PXP init */
5556 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5558 BNX2X_ERR("PXP2 CFG failed\n");
5561 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5563 BNX2X_ERR("PXP2 RD_INIT failed\n");
5567 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5568 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5570 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5572 /* clean the DMAE memory */
5574 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5576 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5577 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5578 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5579 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5581 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5582 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5583 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5584 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5587 /* soft reset pulse */
5588 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5589 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5592 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5595 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5596 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5597 if (!CHIP_REV_IS_SLOW(bp)) {
5598 /* enable hw interrupt from doorbell Q */
5599 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5602 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5603 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5604 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5606 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5607 if (CHIP_IS_E1H(bp))
5608 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5610 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5611 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5612 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5613 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5615 if (CHIP_IS_E1H(bp)) {
5616 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5617 STORM_INTMEM_SIZE_E1H/2);
5619 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5620 0, STORM_INTMEM_SIZE_E1H/2);
5621 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5622 STORM_INTMEM_SIZE_E1H/2);
5624 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5625 0, STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5627 STORM_INTMEM_SIZE_E1H/2);
5629 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5630 0, STORM_INTMEM_SIZE_E1H/2);
5631 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5632 STORM_INTMEM_SIZE_E1H/2);
5634 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5635 0, STORM_INTMEM_SIZE_E1H/2);
5637 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5638 STORM_INTMEM_SIZE_E1);
5639 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5640 STORM_INTMEM_SIZE_E1);
5641 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5642 STORM_INTMEM_SIZE_E1);
5643 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5644 STORM_INTMEM_SIZE_E1);
5647 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5648 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5649 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5650 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5653 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5658 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5659 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5660 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5662 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5663 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5664 REG_WR(bp, i, 0xc0cac01a);
5665 /* TODO: replace with something meaningful */
5667 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5668 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5670 if (sizeof(union cdu_context) != 1024)
5671 /* we currently assume that a context is 1024 bytes */
5672 printk(KERN_ALERT PFX "please adjust the size of"
5673 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5675 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5676 val = (4 << 24) + (0 << 12) + 1024;
5677 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5678 if (CHIP_IS_E1(bp)) {
5679 /* !!! fix pxp client crdit until excel update */
5680 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5681 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5684 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5685 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5686 /* enable context validation interrupt from CFC */
5687 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5689 /* set the thresholds to prevent CFC/CDU race */
5690 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5692 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5693 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5695 /* PXPCS COMMON comes here */
5696 /* Reset PCIE errors for debug */
5697 REG_WR(bp, 0x2814, 0xffffffff);
5698 REG_WR(bp, 0x3820, 0xffffffff);
5700 /* EMAC0 COMMON comes here */
5701 /* EMAC1 COMMON comes here */
5702 /* DBU COMMON comes here */
5703 /* DBG COMMON comes here */
5705 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5706 if (CHIP_IS_E1H(bp)) {
5707 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5708 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5711 if (CHIP_REV_IS_SLOW(bp))
5714 /* finish CFC init */
5715 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5717 BNX2X_ERR("CFC LL_INIT failed\n");
5720 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5722 BNX2X_ERR("CFC AC_INIT failed\n");
5725 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5727 BNX2X_ERR("CFC CAM_INIT failed\n");
5730 REG_WR(bp, CFC_REG_DEBUG0, 0);
5732 /* read NIG statistic
5733 to see if this is our first up since powerup */
5734 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5735 val = *bnx2x_sp(bp, wb_data[0]);
5737 /* do internal memory self test */
5738 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5739 BNX2X_ERR("internal mem self test failed\n");
5743 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5747 bp->port.need_hw_lock = 1;
5750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5751 /* Fan failure is indicated by SPIO 5 */
5752 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5753 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5755 /* set to active low mode */
5756 val = REG_RD(bp, MISC_REG_SPIO_INT);
5757 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5758 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5759 REG_WR(bp, MISC_REG_SPIO_INT, val);
5761 /* enable interrupt to signal the IGU */
5762 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5763 val |= (1 << MISC_REGISTERS_SPIO_5);
5764 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5771 /* clear PXP2 attentions */
5772 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5774 enable_blocks_attention(bp);
5776 if (!BP_NOMCP(bp)) {
5777 bnx2x_acquire_phy_lock(bp);
5778 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5779 bnx2x_release_phy_lock(bp);
5781 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5786 static int bnx2x_init_port(struct bnx2x *bp)
5788 int port = BP_PORT(bp);
5792 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5794 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5796 /* Port PXP comes here */
5797 /* Port PXP2 comes here */
5802 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5803 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5804 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5805 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5810 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5811 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5812 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5813 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5818 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5819 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5820 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5821 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5823 /* Port CMs come here */
5824 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5825 (port ? XCM_PORT1_END : XCM_PORT0_END));
5827 /* Port QM comes here */
5829 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5830 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5832 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5833 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5835 /* Port DQ comes here */
5837 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5838 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5839 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5840 /* no pause for emulation and FPGA */
5845 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5846 else if (bp->dev->mtu > 4096) {
5847 if (bp->flags & ONE_PORT_FLAG)
5851 /* (24*1024 + val*4)/256 */
5852 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5855 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5856 high = low + 56; /* 14*1024/256 */
5858 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5859 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5862 /* Port PRS comes here */
5863 /* Port TSDM comes here */
5864 /* Port CSDM comes here */
5865 /* Port USDM comes here */
5866 /* Port XSDM comes here */
5868 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5869 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5870 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5871 port ? USEM_PORT1_END : USEM_PORT0_END);
5872 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5873 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5874 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5875 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5877 /* Port UPB comes here */
5878 /* Port XPB comes here */
5880 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5881 port ? PBF_PORT1_END : PBF_PORT0_END);
5883 /* configure PBF to work without PAUSE mtu 9000 */
5884 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5886 /* update threshold */
5887 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5888 /* update init credit */
5889 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5892 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5894 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5897 /* tell the searcher where the T2 table is */
5898 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5900 wb_write[0] = U64_LO(bp->t2_mapping);
5901 wb_write[1] = U64_HI(bp->t2_mapping);
5902 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5903 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5904 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5905 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5907 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5908 /* Port SRCH comes here */
5910 /* Port CDU comes here */
5911 /* Port CFC comes here */
5913 if (CHIP_IS_E1(bp)) {
5914 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5915 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5917 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5918 port ? HC_PORT1_END : HC_PORT0_END);
5920 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5921 MISC_AEU_PORT0_START,
5922 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5923 /* init aeu_mask_attn_func_0/1:
5924 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5925 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5926 * bits 4-7 are used for "per vn group attention" */
5927 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5928 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5930 /* Port PXPCS comes here */
5931 /* Port EMAC0 comes here */
5932 /* Port EMAC1 comes here */
5933 /* Port DBU comes here */
5934 /* Port DBG comes here */
5936 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5937 port ? NIG_PORT1_END : NIG_PORT0_END);
5939 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5941 if (CHIP_IS_E1H(bp)) {
5942 /* 0x2 disable e1hov, 0x1 enable */
5943 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5944 (IS_E1HMF(bp) ? 0x1 : 0x2));
5946 /* support pause requests from USDM, TSDM and BRB */
5947 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5950 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5951 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5952 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5956 /* Port MCP comes here */
5957 /* Port DMAE comes here */
5959 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5962 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5964 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5967 /* The GPIO should be swapped if the swap register is
5969 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5972 /* Select function upon port-swap configuration */
5974 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975 aeu_gpio_mask = (swap_val && swap_override) ?
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5979 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980 aeu_gpio_mask = (swap_val && swap_override) ?
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5984 val = REG_RD(bp, offset);
5985 /* add GPIO3 to group */
5986 val |= aeu_gpio_mask;
5987 REG_WR(bp, offset, val);
5991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5992 /* add SPIO 5 to group 0 */
5993 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6002 bnx2x__link_reset(bp);
6007 #define ILT_PER_FUNC (768/2)
6008 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010 1=valid bit added to the 53rd bit
6011 then since this is a wide register(TM)
6012 we split it into two 32 bit writes
6014 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6019 #define CNIC_ILT_LINES 0
6021 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6025 if (CHIP_IS_E1H(bp))
6026 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6028 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6030 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6033 static int bnx2x_init_func(struct bnx2x *bp)
6035 int port = BP_PORT(bp);
6036 int func = BP_FUNC(bp);
6040 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6042 /* set MSI reconfigure capability */
6043 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044 val = REG_RD(bp, addr);
6045 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046 REG_WR(bp, addr, val);
6048 i = FUNC_ILT_BASE(func);
6050 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051 if (CHIP_IS_E1H(bp)) {
6052 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6055 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6059 if (CHIP_IS_E1H(bp)) {
6060 for (i = 0; i < 9; i++)
6061 bnx2x_init_block(bp,
6062 cm_start[func][i], cm_end[func][i]);
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6068 /* HC init per function */
6069 if (CHIP_IS_E1H(bp)) {
6070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6072 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6075 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6077 /* Reset PCIE errors for debug */
6078 REG_WR(bp, 0x2114, 0xffffffff);
6079 REG_WR(bp, 0x2120, 0xffffffff);
6084 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6088 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6089 BP_FUNC(bp), load_code);
6092 mutex_init(&bp->dmae_mutex);
6093 bnx2x_gunzip_init(bp);
6095 switch (load_code) {
6096 case FW_MSG_CODE_DRV_LOAD_COMMON:
6097 rc = bnx2x_init_common(bp);
6102 case FW_MSG_CODE_DRV_LOAD_PORT:
6104 rc = bnx2x_init_port(bp);
6109 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6111 rc = bnx2x_init_func(bp);
6117 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6121 if (!BP_NOMCP(bp)) {
6122 int func = BP_FUNC(bp);
6124 bp->fw_drv_pulse_wr_seq =
6125 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6126 DRV_PULSE_SEQ_MASK);
6127 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6129 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6133 /* this needs to be done before gunzip end */
6134 bnx2x_zero_def_sb(bp);
6135 for_each_queue(bp, i)
6136 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6139 bnx2x_gunzip_end(bp);
6144 /* send the MCP a request, block until there is a reply */
6145 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6147 int func = BP_FUNC(bp);
6148 u32 seq = ++bp->fw_seq;
6151 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6153 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6154 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6157 /* let the FW do it's magic ... */
6160 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6162 /* Give the FW up to 2 second (200*10ms) */
6163 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6165 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166 cnt*delay, rc, seq);
6168 /* is this a reply to our command? */
6169 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170 rc &= FW_MSG_CODE_MASK;
6174 BNX2X_ERR("FW failed to respond!\n");
6182 static void bnx2x_free_mem(struct bnx2x *bp)
6185 #define BNX2X_PCI_FREE(x, y, size) \
6188 pci_free_consistent(bp->pdev, size, x, y); \
6194 #define BNX2X_FREE(x) \
6206 for_each_queue(bp, i) {
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210 bnx2x_fp(bp, i, status_blk_mapping),
6211 sizeof(struct host_status_block) +
6212 sizeof(struct eth_tx_db_data));
6215 for_each_rx_queue(bp, i) {
6217 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220 bnx2x_fp(bp, i, rx_desc_mapping),
6221 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6223 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224 bnx2x_fp(bp, i, rx_comp_mapping),
6225 sizeof(struct eth_fast_path_rx_cqe) *
6229 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6230 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231 bnx2x_fp(bp, i, rx_sge_mapping),
6232 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6235 for_each_tx_queue(bp, i) {
6237 /* fastpath tx rings: tx_buf tx_desc */
6238 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240 bnx2x_fp(bp, i, tx_desc_mapping),
6241 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6243 /* end of fastpath */
6245 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6246 sizeof(struct host_def_status_block));
6248 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6249 sizeof(struct bnx2x_slowpath));
6252 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6257 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6259 #undef BNX2X_PCI_FREE
6263 static int bnx2x_alloc_mem(struct bnx2x *bp)
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6268 x = pci_alloc_consistent(bp->pdev, size, y); \
6270 goto alloc_mem_err; \
6271 memset(x, 0, size); \
6274 #define BNX2X_ALLOC(x, size) \
6276 x = vmalloc(size); \
6278 goto alloc_mem_err; \
6279 memset(x, 0, size); \
6286 for_each_queue(bp, i) {
6287 bnx2x_fp(bp, i, bp) = bp;
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291 &bnx2x_fp(bp, i, status_blk_mapping),
6292 sizeof(struct host_status_block) +
6293 sizeof(struct eth_tx_db_data));
6296 for_each_rx_queue(bp, i) {
6298 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302 &bnx2x_fp(bp, i, rx_desc_mapping),
6303 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6305 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306 &bnx2x_fp(bp, i, rx_comp_mapping),
6307 sizeof(struct eth_fast_path_rx_cqe) *
6311 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314 &bnx2x_fp(bp, i, rx_sge_mapping),
6315 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6318 for_each_tx_queue(bp, i) {
6320 bnx2x_fp(bp, i, hw_tx_prods) =
6321 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6323 bnx2x_fp(bp, i, tx_prods_mapping) =
6324 bnx2x_fp(bp, i, status_blk_mapping) +
6325 sizeof(struct host_status_block);
6327 /* fastpath tx rings: tx_buf tx_desc */
6328 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331 &bnx2x_fp(bp, i, tx_desc_mapping),
6332 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6334 /* end of fastpath */
6336 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337 sizeof(struct host_def_status_block));
6339 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340 sizeof(struct bnx2x_slowpath));
6343 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6346 for (i = 0; i < 64*1024; i += 64) {
6347 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6351 /* allocate searcher T2 table
6352 we allocate 1/4 of alloc num for T2
6353 (which is not entered into the ILT) */
6354 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6357 for (i = 0; i < 16*1024; i += 64)
6358 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6360 /* now fixup the last line in the block to point to the next block */
6361 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6363 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6366 /* QM queues (128*MAX_CONN) */
6367 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6370 /* Slow path ring */
6371 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6379 #undef BNX2X_PCI_ALLOC
6383 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6387 for_each_tx_queue(bp, i) {
6388 struct bnx2x_fastpath *fp = &bp->fp[i];
6390 u16 bd_cons = fp->tx_bd_cons;
6391 u16 sw_prod = fp->tx_pkt_prod;
6392 u16 sw_cons = fp->tx_pkt_cons;
6394 while (sw_cons != sw_prod) {
6395 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6401 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6405 for_each_rx_queue(bp, j) {
6406 struct bnx2x_fastpath *fp = &bp->fp[j];
6408 for (i = 0; i < NUM_RX_BD; i++) {
6409 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410 struct sk_buff *skb = rx_buf->skb;
6415 pci_unmap_single(bp->pdev,
6416 pci_unmap_addr(rx_buf, mapping),
6417 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6422 if (!fp->disable_tpa)
6423 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424 ETH_MAX_AGGREGATION_QUEUES_E1 :
6425 ETH_MAX_AGGREGATION_QUEUES_E1H);
6429 static void bnx2x_free_skbs(struct bnx2x *bp)
6431 bnx2x_free_tx_skbs(bp);
6432 bnx2x_free_rx_skbs(bp);
6435 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6439 free_irq(bp->msix_table[0].vector, bp->dev);
6440 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6441 bp->msix_table[0].vector);
6443 for_each_queue(bp, i) {
6444 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6445 "state %x\n", i, bp->msix_table[i + offset].vector,
6446 bnx2x_fp(bp, i, state));
6448 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6452 static void bnx2x_free_irq(struct bnx2x *bp)
6454 if (bp->flags & USING_MSIX_FLAG) {
6455 bnx2x_free_msix_irqs(bp);
6456 pci_disable_msix(bp->pdev);
6457 bp->flags &= ~USING_MSIX_FLAG;
6459 } else if (bp->flags & USING_MSI_FLAG) {
6460 free_irq(bp->pdev->irq, bp->dev);
6461 pci_disable_msi(bp->pdev);
6462 bp->flags &= ~USING_MSI_FLAG;
6465 free_irq(bp->pdev->irq, bp->dev);
6468 static int bnx2x_enable_msix(struct bnx2x *bp)
6470 int i, rc, offset = 1;
6473 bp->msix_table[0].entry = igu_vec;
6474 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6476 for_each_queue(bp, i) {
6477 igu_vec = BP_L_ID(bp) + offset + i;
6478 bp->msix_table[i + offset].entry = igu_vec;
6479 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480 "(fastpath #%u)\n", i + offset, igu_vec, i);
6483 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6484 BNX2X_NUM_QUEUES(bp) + offset);
6486 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6490 bp->flags |= USING_MSIX_FLAG;
6495 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6497 int i, rc, offset = 1;
6499 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500 bp->dev->name, bp->dev);
6502 BNX2X_ERR("request sp irq failed\n");
6506 for_each_queue(bp, i) {
6507 struct bnx2x_fastpath *fp = &bp->fp[i];
6509 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6510 rc = request_irq(bp->msix_table[i + offset].vector,
6511 bnx2x_msix_fp_int, 0, fp->name, fp);
6513 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6514 bnx2x_free_msix_irqs(bp);
6518 fp->state = BNX2X_FP_STATE_IRQ;
6521 i = BNX2X_NUM_QUEUES(bp);
6523 printk(KERN_INFO PFX
6524 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6525 bp->dev->name, bp->msix_table[0].vector,
6526 bp->msix_table[offset].vector,
6527 bp->msix_table[offset + i - 1].vector);
6529 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6530 bp->dev->name, bp->msix_table[0].vector,
6531 bp->msix_table[offset + i - 1].vector);
6536 static int bnx2x_enable_msi(struct bnx2x *bp)
6540 rc = pci_enable_msi(bp->pdev);
6542 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6545 bp->flags |= USING_MSI_FLAG;
6550 static int bnx2x_req_irq(struct bnx2x *bp)
6552 unsigned long flags;
6555 if (bp->flags & USING_MSI_FLAG)
6558 flags = IRQF_SHARED;
6560 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6561 bp->dev->name, bp->dev);
6563 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6568 static void bnx2x_napi_enable(struct bnx2x *bp)
6572 for_each_rx_queue(bp, i)
6573 napi_enable(&bnx2x_fp(bp, i, napi));
6576 static void bnx2x_napi_disable(struct bnx2x *bp)
6580 for_each_rx_queue(bp, i)
6581 napi_disable(&bnx2x_fp(bp, i, napi));
6584 static void bnx2x_netif_start(struct bnx2x *bp)
6586 if (atomic_dec_and_test(&bp->intr_sem)) {
6587 if (netif_running(bp->dev)) {
6588 bnx2x_napi_enable(bp);
6589 bnx2x_int_enable(bp);
6590 if (bp->state == BNX2X_STATE_OPEN)
6591 netif_tx_wake_all_queues(bp->dev);
6596 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6598 bnx2x_int_disable_sync(bp, disable_hw);
6599 bnx2x_napi_disable(bp);
6600 if (netif_running(bp->dev)) {
6601 netif_tx_disable(bp->dev);
6602 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6607 * Init service functions
6610 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6612 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6613 int port = BP_PORT(bp);
6616 * unicasts 0-31:port0 32-63:port1
6617 * multicast 64-127:port0 128-191:port1
6619 config->hdr.length = 2;
6620 config->hdr.offset = port ? 32 : 0;
6621 config->hdr.client_id = bp->fp->cl_id;
6622 config->hdr.reserved1 = 0;
6625 config->config_table[0].cam_entry.msb_mac_addr =
6626 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6627 config->config_table[0].cam_entry.middle_mac_addr =
6628 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6629 config->config_table[0].cam_entry.lsb_mac_addr =
6630 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6631 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6633 config->config_table[0].target_table_entry.flags = 0;
6635 CAM_INVALIDATE(config->config_table[0]);
6636 config->config_table[0].target_table_entry.client_id = 0;
6637 config->config_table[0].target_table_entry.vlan_id = 0;
6639 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6640 (set ? "setting" : "clearing"),
6641 config->config_table[0].cam_entry.msb_mac_addr,
6642 config->config_table[0].cam_entry.middle_mac_addr,
6643 config->config_table[0].cam_entry.lsb_mac_addr);
6646 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6647 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6648 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6649 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6651 config->config_table[1].target_table_entry.flags =
6652 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6654 CAM_INVALIDATE(config->config_table[1]);
6655 config->config_table[1].target_table_entry.client_id = 0;
6656 config->config_table[1].target_table_entry.vlan_id = 0;
6658 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6659 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6660 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6665 struct mac_configuration_cmd_e1h *config =
6666 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6668 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6669 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6673 /* CAM allocation for E1H
6674 * unicasts: by func number
6675 * multicast: 20+FUNC*20, 20 each
6677 config->hdr.length = 1;
6678 config->hdr.offset = BP_FUNC(bp);
6679 config->hdr.client_id = bp->fp->cl_id;
6680 config->hdr.reserved1 = 0;
6683 config->config_table[0].msb_mac_addr =
6684 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6685 config->config_table[0].middle_mac_addr =
6686 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6687 config->config_table[0].lsb_mac_addr =
6688 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6689 config->config_table[0].client_id = BP_L_ID(bp);
6690 config->config_table[0].vlan_id = 0;
6691 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6693 config->config_table[0].flags = BP_PORT(bp);
6695 config->config_table[0].flags =
6696 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6698 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6699 (set ? "setting" : "clearing"),
6700 config->config_table[0].msb_mac_addr,
6701 config->config_table[0].middle_mac_addr,
6702 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6704 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6705 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6706 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6709 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6710 int *state_p, int poll)
6712 /* can take a while if any port is running */
6715 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6716 poll ? "polling" : "waiting", state, idx);
6721 bnx2x_rx_int(bp->fp, 10);
6722 /* if index is different from 0
6723 * the reply for some commands will
6724 * be on the non default queue
6727 bnx2x_rx_int(&bp->fp[idx], 10);
6730 mb(); /* state is changed by bnx2x_sp_event() */
6731 if (*state_p == state) {
6732 #ifdef BNX2X_STOP_ON_ERROR
6733 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6742 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6743 poll ? "polling" : "waiting", state, idx);
6744 #ifdef BNX2X_STOP_ON_ERROR
6751 static int bnx2x_setup_leading(struct bnx2x *bp)
6755 /* reset IGU state */
6756 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6761 /* Wait for completion */
6762 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6767 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6769 struct bnx2x_fastpath *fp = &bp->fp[index];
6771 /* reset IGU state */
6772 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6775 fp->state = BNX2X_FP_STATE_OPENING;
6776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6779 /* Wait for completion */
6780 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6784 static int bnx2x_poll(struct napi_struct *napi, int budget);
6786 static void bnx2x_set_int_mode(struct bnx2x *bp)
6794 bp->num_rx_queues = num_queues;
6795 bp->num_tx_queues = num_queues;
6797 "set number of queues to %d\n", num_queues);
6802 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6803 num_queues = min_t(u32, num_online_cpus(),
6804 BNX2X_MAX_QUEUES(bp));
6807 bp->num_rx_queues = num_queues;
6808 bp->num_tx_queues = num_queues;
6809 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6810 " number of tx queues to %d\n",
6811 bp->num_rx_queues, bp->num_tx_queues);
6812 /* if we can't use MSI-X we only need one fp,
6813 * so try to enable MSI-X with the requested number of fp's
6814 * and fallback to MSI or legacy INTx with one fp
6816 if (bnx2x_enable_msix(bp)) {
6817 /* failed to enable MSI-X */
6819 bp->num_rx_queues = num_queues;
6820 bp->num_tx_queues = num_queues;
6822 BNX2X_ERR("Multi requested but failed to "
6823 "enable MSI-X set number of "
6824 "queues to %d\n", num_queues);
6828 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6831 static void bnx2x_set_rx_mode(struct net_device *dev);
6833 /* must be called with rtnl_lock */
6834 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6838 #ifdef BNX2X_STOP_ON_ERROR
6839 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6840 if (unlikely(bp->panic))
6844 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6846 bnx2x_set_int_mode(bp);
6848 if (bnx2x_alloc_mem(bp))
6851 for_each_rx_queue(bp, i)
6852 bnx2x_fp(bp, i, disable_tpa) =
6853 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6855 for_each_rx_queue(bp, i)
6856 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6859 #ifdef BNX2X_STOP_ON_ERROR
6860 for_each_rx_queue(bp, i) {
6861 struct bnx2x_fastpath *fp = &bp->fp[i];
6863 fp->poll_no_work = 0;
6865 fp->poll_max_calls = 0;
6866 fp->poll_complete = 0;
6870 bnx2x_napi_enable(bp);
6872 if (bp->flags & USING_MSIX_FLAG) {
6873 rc = bnx2x_req_msix_irqs(bp);
6875 pci_disable_msix(bp->pdev);
6879 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6880 bnx2x_enable_msi(bp);
6882 rc = bnx2x_req_irq(bp);
6884 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6885 if (bp->flags & USING_MSI_FLAG)
6886 pci_disable_msi(bp->pdev);
6889 if (bp->flags & USING_MSI_FLAG) {
6890 bp->dev->irq = bp->pdev->irq;
6891 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6892 bp->dev->name, bp->pdev->irq);
6896 /* Send LOAD_REQUEST command to MCP
6897 Returns the type of LOAD command:
6898 if it is the first port to be initialized
6899 common blocks should be initialized, otherwise - not
6901 if (!BP_NOMCP(bp)) {
6902 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6904 BNX2X_ERR("MCP response failure, aborting\n");
6908 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6909 rc = -EBUSY; /* other port in diagnostic mode */
6914 int port = BP_PORT(bp);
6916 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6917 load_count[0], load_count[1], load_count[2]);
6919 load_count[1 + port]++;
6920 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6921 load_count[0], load_count[1], load_count[2]);
6922 if (load_count[0] == 1)
6923 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6924 else if (load_count[1 + port] == 1)
6925 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6927 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6930 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6931 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6935 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6938 rc = bnx2x_init_hw(bp, load_code);
6940 BNX2X_ERR("HW init failed, aborting\n");
6944 /* Setup NIC internals and enable interrupts */
6945 bnx2x_nic_init(bp, load_code);
6947 /* Send LOAD_DONE command to MCP */
6948 if (!BP_NOMCP(bp)) {
6949 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6951 BNX2X_ERR("MCP response failure, aborting\n");
6957 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6959 rc = bnx2x_setup_leading(bp);
6961 BNX2X_ERR("Setup leading failed!\n");
6965 if (CHIP_IS_E1H(bp))
6966 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6967 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6968 bp->state = BNX2X_STATE_DISABLED;
6971 if (bp->state == BNX2X_STATE_OPEN)
6972 for_each_nondefault_queue(bp, i) {
6973 rc = bnx2x_setup_multi(bp, i);
6979 bnx2x_set_mac_addr_e1(bp, 1);
6981 bnx2x_set_mac_addr_e1h(bp, 1);
6984 bnx2x_initial_phy_init(bp, load_mode);
6986 /* Start fast path */
6987 switch (load_mode) {
6989 /* Tx queue should be only reenabled */
6990 netif_tx_wake_all_queues(bp->dev);
6991 /* Initialize the receive filter. */
6992 bnx2x_set_rx_mode(bp->dev);
6996 netif_tx_start_all_queues(bp->dev);
6997 /* Initialize the receive filter. */
6998 bnx2x_set_rx_mode(bp->dev);
7002 /* Initialize the receive filter. */
7003 bnx2x_set_rx_mode(bp->dev);
7004 bp->state = BNX2X_STATE_DIAG;
7012 bnx2x__link_status_update(bp);
7014 /* start the timer */
7015 mod_timer(&bp->timer, jiffies + bp->current_interval);
7021 bnx2x_int_disable_sync(bp, 1);
7022 if (!BP_NOMCP(bp)) {
7023 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7024 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7027 /* Free SKBs, SGEs, TPA pool and driver internals */
7028 bnx2x_free_skbs(bp);
7029 for_each_rx_queue(bp, i)
7030 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7035 bnx2x_napi_disable(bp);
7036 for_each_rx_queue(bp, i)
7037 netif_napi_del(&bnx2x_fp(bp, i, napi));
7043 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7045 struct bnx2x_fastpath *fp = &bp->fp[index];
7048 /* halt the connection */
7049 fp->state = BNX2X_FP_STATE_HALTING;
7050 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7052 /* Wait for completion */
7053 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7055 if (rc) /* timeout */
7058 /* delete cfc entry */
7059 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7061 /* Wait for completion */
7062 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7067 static int bnx2x_stop_leading(struct bnx2x *bp)
7069 __le16 dsb_sp_prod_idx;
7070 /* if the other port is handling traffic,
7071 this can take a lot of time */
7077 /* Send HALT ramrod */
7078 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7079 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7081 /* Wait for completion */
7082 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7083 &(bp->fp[0].state), 1);
7084 if (rc) /* timeout */
7087 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7089 /* Send PORT_DELETE ramrod */
7090 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7092 /* Wait for completion to arrive on default status block
7093 we are going to reset the chip anyway
7094 so there is not much to do if this times out
7096 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7098 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7099 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7100 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7101 #ifdef BNX2X_STOP_ON_ERROR
7109 rmb(); /* Refresh the dsb_sp_prod */
7111 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7112 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7117 static void bnx2x_reset_func(struct bnx2x *bp)
7119 int port = BP_PORT(bp);
7120 int func = BP_FUNC(bp);
7124 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7125 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7128 base = FUNC_ILT_BASE(func);
7129 for (i = base; i < base + ILT_PER_FUNC; i++)
7130 bnx2x_ilt_wr(bp, i, 0);
7133 static void bnx2x_reset_port(struct bnx2x *bp)
7135 int port = BP_PORT(bp);
7138 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7140 /* Do not rcv packets to BRB */
7141 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7142 /* Do not direct rcv packets that are not for MCP to the BRB */
7143 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7144 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7147 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7150 /* Check for BRB port occupancy */
7151 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7153 DP(NETIF_MSG_IFDOWN,
7154 "BRB1 is not empty %d blocks are occupied\n", val);
7156 /* TODO: Close Doorbell port? */
7159 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7161 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7162 BP_FUNC(bp), reset_code);
7164 switch (reset_code) {
7165 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7166 bnx2x_reset_port(bp);
7167 bnx2x_reset_func(bp);
7168 bnx2x_reset_common(bp);
7171 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7172 bnx2x_reset_port(bp);
7173 bnx2x_reset_func(bp);
7176 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7177 bnx2x_reset_func(bp);
7181 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7186 /* must be called with rtnl_lock */
7187 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7189 int port = BP_PORT(bp);
7193 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7195 bp->rx_mode = BNX2X_RX_MODE_NONE;
7196 bnx2x_set_storm_rx_mode(bp);
7198 bnx2x_netif_stop(bp, 1);
7200 del_timer_sync(&bp->timer);
7201 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7202 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7203 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7208 /* Wait until tx fastpath tasks complete */
7209 for_each_tx_queue(bp, i) {
7210 struct bnx2x_fastpath *fp = &bp->fp[i];
7214 while (bnx2x_has_tx_work_unload(fp)) {
7216 bnx2x_tx_int(fp, 1000);
7218 BNX2X_ERR("timeout waiting for queue[%d]\n",
7220 #ifdef BNX2X_STOP_ON_ERROR
7232 /* Give HW time to discard old tx messages */
7235 if (CHIP_IS_E1(bp)) {
7236 struct mac_configuration_cmd *config =
7237 bnx2x_sp(bp, mcast_config);
7239 bnx2x_set_mac_addr_e1(bp, 0);
7241 for (i = 0; i < config->hdr.length; i++)
7242 CAM_INVALIDATE(config->config_table[i]);
7244 config->hdr.length = i;
7245 if (CHIP_REV_IS_SLOW(bp))
7246 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7248 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7249 config->hdr.client_id = bp->fp->cl_id;
7250 config->hdr.reserved1 = 0;
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7257 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7259 bnx2x_set_mac_addr_e1h(bp, 0);
7261 for (i = 0; i < MC_HASH_SIZE; i++)
7262 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7265 if (unload_mode == UNLOAD_NORMAL)
7266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7268 else if (bp->flags & NO_WOL_FLAG) {
7269 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7270 if (CHIP_IS_E1H(bp))
7271 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7273 } else if (bp->wol) {
7274 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7275 u8 *mac_addr = bp->dev->dev_addr;
7277 /* The mac address is written to entries 1-4 to
7278 preserve entry 0 which is used by the PMF */
7279 u8 entry = (BP_E1HVN(bp) + 1)*8;
7281 val = (mac_addr[0] << 8) | mac_addr[1];
7282 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7284 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7285 (mac_addr[4] << 8) | mac_addr[5];
7286 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7288 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7291 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7293 /* Close multi and leading connections
7294 Completions for ramrods are collected in a synchronous way */
7295 for_each_nondefault_queue(bp, i)
7296 if (bnx2x_stop_multi(bp, i))
7299 rc = bnx2x_stop_leading(bp);
7301 BNX2X_ERR("Stop leading failed!\n");
7302 #ifdef BNX2X_STOP_ON_ERROR
7311 reset_code = bnx2x_fw_command(bp, reset_code);
7313 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7314 load_count[0], load_count[1], load_count[2]);
7316 load_count[1 + port]--;
7317 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7318 load_count[0], load_count[1], load_count[2]);
7319 if (load_count[0] == 0)
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7321 else if (load_count[1 + port] == 0)
7322 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7324 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7327 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7328 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7329 bnx2x__link_reset(bp);
7331 /* Reset the chip */
7332 bnx2x_reset_chip(bp, reset_code);
7334 /* Report UNLOAD_DONE to MCP */
7336 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7340 /* Free SKBs, SGEs, TPA pool and driver internals */
7341 bnx2x_free_skbs(bp);
7342 for_each_rx_queue(bp, i)
7343 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7344 for_each_rx_queue(bp, i)
7345 netif_napi_del(&bnx2x_fp(bp, i, napi));
7348 bp->state = BNX2X_STATE_CLOSED;
7350 netif_carrier_off(bp->dev);
7355 static void bnx2x_reset_task(struct work_struct *work)
7357 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7359 #ifdef BNX2X_STOP_ON_ERROR
7360 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7361 " so reset not done to allow debug dump,\n"
7362 KERN_ERR " you will need to reboot when done\n");
7368 if (!netif_running(bp->dev))
7369 goto reset_task_exit;
7371 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7372 bnx2x_nic_load(bp, LOAD_NORMAL);
7378 /* end of nic load/unload */
7383 * Init service functions
7386 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7389 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7390 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7391 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7392 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7393 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7394 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7395 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7396 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7398 BNX2X_ERR("Unsupported function index: %d\n", func);
7403 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7405 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7407 /* Flush all outstanding writes */
7410 /* Pretend to be function 0 */
7412 /* Flush the GRC transaction (in the chip) */
7413 new_val = REG_RD(bp, reg);
7415 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7420 /* From now we are in the "like-E1" mode */
7421 bnx2x_int_disable(bp);
7423 /* Flush all outstanding writes */
7426 /* Restore the original funtion settings */
7427 REG_WR(bp, reg, orig_func);
7428 new_val = REG_RD(bp, reg);
7429 if (new_val != orig_func) {
7430 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7431 orig_func, new_val);
7436 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7438 if (CHIP_IS_E1H(bp))
7439 bnx2x_undi_int_disable_e1h(bp, func);
7441 bnx2x_int_disable(bp);
7444 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7448 /* Check if there is any driver already loaded */
7449 val = REG_RD(bp, MISC_REG_UNPREPARED);
7451 /* Check if it is the UNDI driver
7452 * UNDI driver initializes CID offset for normal bell to 0x7
7454 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7455 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7457 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7459 int func = BP_FUNC(bp);
7463 /* clear the UNDI indication */
7464 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7466 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7468 /* try unload UNDI on port 0 */
7471 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7472 DRV_MSG_SEQ_NUMBER_MASK);
7473 reset_code = bnx2x_fw_command(bp, reset_code);
7475 /* if UNDI is loaded on the other port */
7476 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7478 /* send "DONE" for previous unload */
7479 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7481 /* unload UNDI on port 1 */
7484 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7485 DRV_MSG_SEQ_NUMBER_MASK);
7486 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7488 bnx2x_fw_command(bp, reset_code);
7491 /* now it's safe to release the lock */
7492 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7494 bnx2x_undi_int_disable(bp, func);
7496 /* close input traffic and wait for it */
7497 /* Do not rcv packets to BRB */
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7500 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7501 /* Do not direct rcv packets that are not for MCP to
7504 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7505 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7508 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7509 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7512 /* save NIG port swap info */
7513 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7514 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7517 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7520 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7522 /* take the NIG out of reset and restore swap values */
7524 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7525 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7526 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7527 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7529 /* send unload done to the MCP */
7530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7532 /* restore our func and fw_seq */
7535 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7536 DRV_MSG_SEQ_NUMBER_MASK);
7539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7543 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7545 u32 val, val2, val3, val4, id;
7548 /* Get the chip revision id and number. */
7549 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551 id = ((val & 0xffff) << 16);
7552 val = REG_RD(bp, MISC_REG_CHIP_REV);
7553 id |= ((val & 0xf) << 12);
7554 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555 id |= ((val & 0xff) << 4);
7556 val = REG_RD(bp, MISC_REG_BOND_ID);
7558 bp->common.chip_id = id;
7559 bp->link_params.chip_id = bp->common.chip_id;
7560 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7562 val = (REG_RD(bp, 0x2874) & 0x55);
7563 if ((bp->common.chip_id & 0x1) ||
7564 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7565 bp->flags |= ONE_PORT_FLAG;
7566 BNX2X_DEV_INFO("single port device\n");
7569 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7570 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7571 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7572 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7573 bp->common.flash_size, bp->common.flash_size);
7575 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7576 bp->link_params.shmem_base = bp->common.shmem_base;
7577 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7579 if (!bp->common.shmem_base ||
7580 (bp->common.shmem_base < 0xA0000) ||
7581 (bp->common.shmem_base >= 0xC0000)) {
7582 BNX2X_DEV_INFO("MCP not active\n");
7583 bp->flags |= NO_MCP_FLAG;
7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7590 BNX2X_ERR("BAD MCP validity signature\n");
7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7595 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7596 SHARED_HW_CFG_LED_MODE_MASK) >>
7597 SHARED_HW_CFG_LED_MODE_SHIFT);
7599 bp->link_params.feature_config_flags = 0;
7600 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7601 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7602 bp->link_params.feature_config_flags |=
7603 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7605 bp->link_params.feature_config_flags &=
7606 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7608 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7609 bp->common.bc_ver = val;
7610 BNX2X_DEV_INFO("bc_ver %X\n", val);
7611 if (val < BNX2X_BC_VER) {
7612 /* for now only warn
7613 * later we might need to enforce this */
7614 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7615 " please upgrade BC\n", BNX2X_BC_VER, val);
7618 if (BP_E1HVN(bp) == 0) {
7619 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7620 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7622 /* no WOL capability for E1HVN != 0 */
7623 bp->flags |= NO_WOL_FLAG;
7625 BNX2X_DEV_INFO("%sWoL capable\n",
7626 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7628 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7629 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7630 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7631 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7633 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7634 val, val2, val3, val4);
7637 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7640 int port = BP_PORT(bp);
7643 switch (switch_cfg) {
7645 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7648 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7649 switch (ext_phy_type) {
7650 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7651 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7654 bp->port.supported |= (SUPPORTED_10baseT_Half |
7655 SUPPORTED_10baseT_Full |
7656 SUPPORTED_100baseT_Half |
7657 SUPPORTED_100baseT_Full |
7658 SUPPORTED_1000baseT_Full |
7659 SUPPORTED_2500baseX_Full |
7664 SUPPORTED_Asym_Pause);
7667 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7668 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7671 bp->port.supported |= (SUPPORTED_10baseT_Half |
7672 SUPPORTED_10baseT_Full |
7673 SUPPORTED_100baseT_Half |
7674 SUPPORTED_100baseT_Full |
7675 SUPPORTED_1000baseT_Full |
7680 SUPPORTED_Asym_Pause);
7684 BNX2X_ERR("NVRAM config error. "
7685 "BAD SerDes ext_phy_config 0x%x\n",
7686 bp->link_params.ext_phy_config);
7690 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7692 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7695 case SWITCH_CFG_10G:
7696 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7699 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7700 switch (ext_phy_type) {
7701 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7702 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7705 bp->port.supported |= (SUPPORTED_10baseT_Half |
7706 SUPPORTED_10baseT_Full |
7707 SUPPORTED_100baseT_Half |
7708 SUPPORTED_100baseT_Full |
7709 SUPPORTED_1000baseT_Full |
7710 SUPPORTED_2500baseX_Full |
7711 SUPPORTED_10000baseT_Full |
7716 SUPPORTED_Asym_Pause);
7719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7720 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7723 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7724 SUPPORTED_1000baseT_Full |
7728 SUPPORTED_Asym_Pause);
7731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7732 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7735 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7736 SUPPORTED_2500baseX_Full |
7737 SUPPORTED_1000baseT_Full |
7741 SUPPORTED_Asym_Pause);
7744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7745 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7748 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7751 SUPPORTED_Asym_Pause);
7754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7755 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7758 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7759 SUPPORTED_1000baseT_Full |
7762 SUPPORTED_Asym_Pause);
7765 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7766 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7769 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7770 SUPPORTED_1000baseT_Full |
7774 SUPPORTED_Asym_Pause);
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7781 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7785 SUPPORTED_Asym_Pause);
7788 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7789 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7792 bp->port.supported |= (SUPPORTED_10baseT_Half |
7793 SUPPORTED_10baseT_Full |
7794 SUPPORTED_100baseT_Half |
7795 SUPPORTED_100baseT_Full |
7796 SUPPORTED_1000baseT_Full |
7797 SUPPORTED_10000baseT_Full |
7801 SUPPORTED_Asym_Pause);
7804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7805 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7806 bp->link_params.ext_phy_config);
7810 BNX2X_ERR("NVRAM config error. "
7811 "BAD XGXS ext_phy_config 0x%x\n",
7812 bp->link_params.ext_phy_config);
7816 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7818 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7823 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7824 bp->port.link_config);
7827 bp->link_params.phy_addr = bp->port.phy_addr;
7829 /* mask what we support according to speed_cap_mask */
7830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7832 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7836 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7840 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7842 if (!(bp->link_params.speed_cap_mask &
7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7844 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7848 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7849 SUPPORTED_1000baseT_Full);
7851 if (!(bp->link_params.speed_cap_mask &
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7853 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7855 if (!(bp->link_params.speed_cap_mask &
7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7857 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7859 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7862 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7864 bp->link_params.req_duplex = DUPLEX_FULL;
7866 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7867 case PORT_FEATURE_LINK_SPEED_AUTO:
7868 if (bp->port.supported & SUPPORTED_Autoneg) {
7869 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7870 bp->port.advertising = bp->port.supported;
7873 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7875 if ((ext_phy_type ==
7876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7878 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7879 /* force 10G, no AN */
7880 bp->link_params.req_line_speed = SPEED_10000;
7881 bp->port.advertising =
7882 (ADVERTISED_10000baseT_Full |
7886 BNX2X_ERR("NVRAM config error. "
7887 "Invalid link_config 0x%x"
7888 " Autoneg not supported\n",
7889 bp->port.link_config);
7894 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7895 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7896 bp->link_params.req_line_speed = SPEED_10;
7897 bp->port.advertising = (ADVERTISED_10baseT_Full |
7900 BNX2X_ERR("NVRAM config error. "
7901 "Invalid link_config 0x%x"
7902 " speed_cap_mask 0x%x\n",
7903 bp->port.link_config,
7904 bp->link_params.speed_cap_mask);
7909 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7910 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7911 bp->link_params.req_line_speed = SPEED_10;
7912 bp->link_params.req_duplex = DUPLEX_HALF;
7913 bp->port.advertising = (ADVERTISED_10baseT_Half |
7916 BNX2X_ERR("NVRAM config error. "
7917 "Invalid link_config 0x%x"
7918 " speed_cap_mask 0x%x\n",
7919 bp->port.link_config,
7920 bp->link_params.speed_cap_mask);
7925 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7926 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7927 bp->link_params.req_line_speed = SPEED_100;
7928 bp->port.advertising = (ADVERTISED_100baseT_Full |
7931 BNX2X_ERR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
7934 bp->port.link_config,
7935 bp->link_params.speed_cap_mask);
7940 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7941 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7942 bp->link_params.req_line_speed = SPEED_100;
7943 bp->link_params.req_duplex = DUPLEX_HALF;
7944 bp->port.advertising = (ADVERTISED_100baseT_Half |
7947 BNX2X_ERR("NVRAM config error. "
7948 "Invalid link_config 0x%x"
7949 " speed_cap_mask 0x%x\n",
7950 bp->port.link_config,
7951 bp->link_params.speed_cap_mask);
7956 case PORT_FEATURE_LINK_SPEED_1G:
7957 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7958 bp->link_params.req_line_speed = SPEED_1000;
7959 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
7965 bp->port.link_config,
7966 bp->link_params.speed_cap_mask);
7971 case PORT_FEATURE_LINK_SPEED_2_5G:
7972 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7973 bp->link_params.req_line_speed = SPEED_2500;
7974 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7977 BNX2X_ERR("NVRAM config error. "
7978 "Invalid link_config 0x%x"
7979 " speed_cap_mask 0x%x\n",
7980 bp->port.link_config,
7981 bp->link_params.speed_cap_mask);
7986 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7987 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7988 case PORT_FEATURE_LINK_SPEED_10G_KR:
7989 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7990 bp->link_params.req_line_speed = SPEED_10000;
7991 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7994 BNX2X_ERR("NVRAM config error. "
7995 "Invalid link_config 0x%x"
7996 " speed_cap_mask 0x%x\n",
7997 bp->port.link_config,
7998 bp->link_params.speed_cap_mask);
8004 BNX2X_ERR("NVRAM config error. "
8005 "BAD link speed link_config 0x%x\n",
8006 bp->port.link_config);
8007 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8008 bp->port.advertising = bp->port.supported;
8012 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8013 PORT_FEATURE_FLOW_CONTROL_MASK);
8014 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8015 !(bp->port.supported & SUPPORTED_Autoneg))
8016 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8018 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8019 " advertising 0x%x\n",
8020 bp->link_params.req_line_speed,
8021 bp->link_params.req_duplex,
8022 bp->link_params.req_flow_ctrl, bp->port.advertising);
8025 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8027 int port = BP_PORT(bp);
8032 bp->link_params.bp = bp;
8033 bp->link_params.port = port;
8035 bp->link_params.lane_config =
8036 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8037 bp->link_params.ext_phy_config =
8039 dev_info.port_hw_config[port].external_phy_config);
8040 bp->link_params.speed_cap_mask =
8042 dev_info.port_hw_config[port].speed_capability_mask);
8044 bp->port.link_config =
8045 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8047 /* Get the 4 lanes xgxs config rx and tx */
8048 for (i = 0; i < 2; i++) {
8050 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8051 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8055 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8056 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8057 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8060 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8061 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8062 bp->link_params.feature_config_flags |=
8063 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8065 bp->link_params.feature_config_flags &=
8066 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8068 /* If the device is capable of WoL, set the default state according
8071 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8072 (config & PORT_FEATURE_WOL_ENABLED));
8074 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8075 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8076 bp->link_params.lane_config,
8077 bp->link_params.ext_phy_config,
8078 bp->link_params.speed_cap_mask, bp->port.link_config);
8080 bp->link_params.switch_cfg = (bp->port.link_config &
8081 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8082 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8084 bnx2x_link_settings_requested(bp);
8086 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8087 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8088 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8089 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8090 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8091 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8092 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8093 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8094 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8095 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8098 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8100 int func = BP_FUNC(bp);
8104 bnx2x_get_common_hwinfo(bp);
8108 if (CHIP_IS_E1H(bp)) {
8110 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8112 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8113 FUNC_MF_CFG_E1HOV_TAG_MASK);
8114 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8118 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8120 func, bp->e1hov, bp->e1hov);
8122 BNX2X_DEV_INFO("single function mode\n");
8124 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8125 " aborting\n", func);
8131 if (!BP_NOMCP(bp)) {
8132 bnx2x_get_port_hwinfo(bp);
8134 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8135 DRV_MSG_SEQ_NUMBER_MASK);
8136 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8140 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8141 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8142 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8143 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8144 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8145 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8146 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8147 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8148 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8149 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8150 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8152 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8160 /* only supposed to happen on emulation/FPGA */
8161 BNX2X_ERR("warning random MAC workaround active\n");
8162 random_ether_addr(bp->dev->dev_addr);
8163 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8169 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8171 int func = BP_FUNC(bp);
8175 /* Disable interrupt handling until HW is initialized */
8176 atomic_set(&bp->intr_sem, 1);
8178 mutex_init(&bp->port.phy_mutex);
8180 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8181 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8183 rc = bnx2x_get_hwinfo(bp);
8185 /* need to reset chip if undi was active */
8187 bnx2x_undi_unload(bp);
8189 if (CHIP_REV_IS_FPGA(bp))
8190 printk(KERN_ERR PFX "FPGA detected\n");
8192 if (BP_NOMCP(bp) && (func == 0))
8194 "MCP disabled, must load devices in order!\n");
8196 /* Set multi queue mode */
8197 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8198 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8200 "Multi disabled since int_mode requested is not MSI-X\n");
8201 multi_mode = ETH_RSS_MODE_DISABLED;
8203 bp->multi_mode = multi_mode;
8208 bp->flags &= ~TPA_ENABLE_FLAG;
8209 bp->dev->features &= ~NETIF_F_LRO;
8211 bp->flags |= TPA_ENABLE_FLAG;
8212 bp->dev->features |= NETIF_F_LRO;
8217 bp->tx_ring_size = MAX_TX_AVAIL;
8218 bp->rx_ring_size = MAX_RX_AVAIL;
8225 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8226 bp->current_interval = (poll ? poll : timer_interval);
8228 init_timer(&bp->timer);
8229 bp->timer.expires = jiffies + bp->current_interval;
8230 bp->timer.data = (unsigned long) bp;
8231 bp->timer.function = bnx2x_timer;
8237 * ethtool service functions
8240 /* All ethtool functions called with rtnl_lock */
8242 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8244 struct bnx2x *bp = netdev_priv(dev);
8246 cmd->supported = bp->port.supported;
8247 cmd->advertising = bp->port.advertising;
8249 if (netif_carrier_ok(dev)) {
8250 cmd->speed = bp->link_vars.line_speed;
8251 cmd->duplex = bp->link_vars.duplex;
8253 cmd->speed = bp->link_params.req_line_speed;
8254 cmd->duplex = bp->link_params.req_duplex;
8259 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8260 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8261 if (vn_max_rate < cmd->speed)
8262 cmd->speed = vn_max_rate;
8265 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8267 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8269 switch (ext_phy_type) {
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8276 cmd->port = PORT_FIBRE;
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8281 cmd->port = PORT_TP;
8284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8285 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8286 bp->link_params.ext_phy_config);
8290 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8291 bp->link_params.ext_phy_config);
8295 cmd->port = PORT_TP;
8297 cmd->phy_address = bp->port.phy_addr;
8298 cmd->transceiver = XCVR_INTERNAL;
8300 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8301 cmd->autoneg = AUTONEG_ENABLE;
8303 cmd->autoneg = AUTONEG_DISABLE;
8308 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8309 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8310 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8311 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8312 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8313 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8314 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8319 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8321 struct bnx2x *bp = netdev_priv(dev);
8327 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8328 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8329 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8330 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8331 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8332 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8333 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8335 if (cmd->autoneg == AUTONEG_ENABLE) {
8336 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8337 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8341 /* advertise the requested speed and duplex if supported */
8342 cmd->advertising &= bp->port.supported;
8344 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8345 bp->link_params.req_duplex = DUPLEX_FULL;
8346 bp->port.advertising |= (ADVERTISED_Autoneg |
8349 } else { /* forced speed */
8350 /* advertise the requested speed and duplex if supported */
8351 switch (cmd->speed) {
8353 if (cmd->duplex == DUPLEX_FULL) {
8354 if (!(bp->port.supported &
8355 SUPPORTED_10baseT_Full)) {
8357 "10M full not supported\n");
8361 advertising = (ADVERTISED_10baseT_Full |
8364 if (!(bp->port.supported &
8365 SUPPORTED_10baseT_Half)) {
8367 "10M half not supported\n");
8371 advertising = (ADVERTISED_10baseT_Half |
8377 if (cmd->duplex == DUPLEX_FULL) {
8378 if (!(bp->port.supported &
8379 SUPPORTED_100baseT_Full)) {
8381 "100M full not supported\n");
8385 advertising = (ADVERTISED_100baseT_Full |
8388 if (!(bp->port.supported &
8389 SUPPORTED_100baseT_Half)) {
8391 "100M half not supported\n");
8395 advertising = (ADVERTISED_100baseT_Half |
8401 if (cmd->duplex != DUPLEX_FULL) {
8402 DP(NETIF_MSG_LINK, "1G half not supported\n");
8406 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8407 DP(NETIF_MSG_LINK, "1G full not supported\n");
8411 advertising = (ADVERTISED_1000baseT_Full |
8416 if (cmd->duplex != DUPLEX_FULL) {
8418 "2.5G half not supported\n");
8422 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8424 "2.5G full not supported\n");
8428 advertising = (ADVERTISED_2500baseX_Full |
8433 if (cmd->duplex != DUPLEX_FULL) {
8434 DP(NETIF_MSG_LINK, "10G half not supported\n");
8438 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8439 DP(NETIF_MSG_LINK, "10G full not supported\n");
8443 advertising = (ADVERTISED_10000baseT_Full |
8448 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8452 bp->link_params.req_line_speed = cmd->speed;
8453 bp->link_params.req_duplex = cmd->duplex;
8454 bp->port.advertising = advertising;
8457 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8458 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8459 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8460 bp->port.advertising);
8462 if (netif_running(dev)) {
8463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8470 #define PHY_FW_VER_LEN 10
8472 static void bnx2x_get_drvinfo(struct net_device *dev,
8473 struct ethtool_drvinfo *info)
8475 struct bnx2x *bp = netdev_priv(dev);
8476 u8 phy_fw_ver[PHY_FW_VER_LEN];
8478 strcpy(info->driver, DRV_MODULE_NAME);
8479 strcpy(info->version, DRV_MODULE_VERSION);
8481 phy_fw_ver[0] = '\0';
8483 bnx2x_acquire_phy_lock(bp);
8484 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8485 (bp->state != BNX2X_STATE_CLOSED),
8486 phy_fw_ver, PHY_FW_VER_LEN);
8487 bnx2x_release_phy_lock(bp);
8490 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8491 (bp->common.bc_ver & 0xff0000) >> 16,
8492 (bp->common.bc_ver & 0xff00) >> 8,
8493 (bp->common.bc_ver & 0xff),
8494 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8495 strcpy(info->bus_info, pci_name(bp->pdev));
8496 info->n_stats = BNX2X_NUM_STATS;
8497 info->testinfo_len = BNX2X_NUM_TESTS;
8498 info->eedump_len = bp->common.flash_size;
8499 info->regdump_len = 0;
8502 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8504 struct bnx2x *bp = netdev_priv(dev);
8506 if (bp->flags & NO_WOL_FLAG) {
8510 wol->supported = WAKE_MAGIC;
8512 wol->wolopts = WAKE_MAGIC;
8516 memset(&wol->sopass, 0, sizeof(wol->sopass));
8519 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8521 struct bnx2x *bp = netdev_priv(dev);
8523 if (wol->wolopts & ~WAKE_MAGIC)
8526 if (wol->wolopts & WAKE_MAGIC) {
8527 if (bp->flags & NO_WOL_FLAG)
8537 static u32 bnx2x_get_msglevel(struct net_device *dev)
8539 struct bnx2x *bp = netdev_priv(dev);
8541 return bp->msglevel;
8544 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8546 struct bnx2x *bp = netdev_priv(dev);
8548 if (capable(CAP_NET_ADMIN))
8549 bp->msglevel = level;
8552 static int bnx2x_nway_reset(struct net_device *dev)
8554 struct bnx2x *bp = netdev_priv(dev);
8559 if (netif_running(dev)) {
8560 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8567 static int bnx2x_get_eeprom_len(struct net_device *dev)
8569 struct bnx2x *bp = netdev_priv(dev);
8571 return bp->common.flash_size;
8574 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8576 int port = BP_PORT(bp);
8580 /* adjust timeout for emulation/FPGA */
8581 count = NVRAM_TIMEOUT_COUNT;
8582 if (CHIP_REV_IS_SLOW(bp))
8585 /* request access to nvram interface */
8586 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8587 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8589 for (i = 0; i < count*10; i++) {
8590 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8591 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8597 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8598 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8605 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8607 int port = BP_PORT(bp);
8611 /* adjust timeout for emulation/FPGA */
8612 count = NVRAM_TIMEOUT_COUNT;
8613 if (CHIP_REV_IS_SLOW(bp))
8616 /* relinquish nvram interface */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8618 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8620 for (i = 0; i < count*10; i++) {
8621 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8622 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8628 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8629 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8636 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8640 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8642 /* enable both bits, even on read */
8643 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8644 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8645 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8648 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8652 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8654 /* disable both bits, even after read */
8655 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8656 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8657 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8660 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8666 /* build the command word */
8667 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8669 /* need to clear DONE bit separately */
8670 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8672 /* address of the NVRAM to read from */
8673 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8674 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8676 /* issue a read command */
8677 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8679 /* adjust timeout for emulation/FPGA */
8680 count = NVRAM_TIMEOUT_COUNT;
8681 if (CHIP_REV_IS_SLOW(bp))
8684 /* wait for completion */
8687 for (i = 0; i < count; i++) {
8689 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8691 if (val & MCPR_NVM_COMMAND_DONE) {
8692 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8693 /* we read nvram data in cpu order
8694 * but ethtool sees it as an array of bytes
8695 * converting to big-endian will do the work */
8696 *ret_val = cpu_to_be32(val);
8705 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8712 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8714 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8719 if (offset + buf_size > bp->common.flash_size) {
8720 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8721 " buf_size (0x%x) > flash_size (0x%x)\n",
8722 offset, buf_size, bp->common.flash_size);
8726 /* request access to nvram interface */
8727 rc = bnx2x_acquire_nvram_lock(bp);
8731 /* enable access to nvram interface */
8732 bnx2x_enable_nvram_access(bp);
8734 /* read the first word(s) */
8735 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8736 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8737 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8738 memcpy(ret_buf, &val, 4);
8740 /* advance to the next dword */
8741 offset += sizeof(u32);
8742 ret_buf += sizeof(u32);
8743 buf_size -= sizeof(u32);
8748 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8749 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8750 memcpy(ret_buf, &val, 4);
8753 /* disable access to nvram interface */
8754 bnx2x_disable_nvram_access(bp);
8755 bnx2x_release_nvram_lock(bp);
8760 static int bnx2x_get_eeprom(struct net_device *dev,
8761 struct ethtool_eeprom *eeprom, u8 *eebuf)
8763 struct bnx2x *bp = netdev_priv(dev);
8766 if (!netif_running(dev))
8769 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8770 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8771 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8772 eeprom->len, eeprom->len);
8774 /* parameters already validated in ethtool_get_eeprom */
8776 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8781 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8786 /* build the command word */
8787 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8789 /* need to clear DONE bit separately */
8790 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8792 /* write the data */
8793 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8795 /* address of the NVRAM to write to */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8797 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8799 /* issue the write command */
8800 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8802 /* adjust timeout for emulation/FPGA */
8803 count = NVRAM_TIMEOUT_COUNT;
8804 if (CHIP_REV_IS_SLOW(bp))
8807 /* wait for completion */
8809 for (i = 0; i < count; i++) {
8811 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8812 if (val & MCPR_NVM_COMMAND_DONE) {
8821 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8823 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8831 if (offset + buf_size > bp->common.flash_size) {
8832 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8833 " buf_size (0x%x) > flash_size (0x%x)\n",
8834 offset, buf_size, bp->common.flash_size);
8838 /* request access to nvram interface */
8839 rc = bnx2x_acquire_nvram_lock(bp);
8843 /* enable access to nvram interface */
8844 bnx2x_enable_nvram_access(bp);
8846 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8847 align_offset = (offset & ~0x03);
8848 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8851 val &= ~(0xff << BYTE_OFFSET(offset));
8852 val |= (*data_buf << BYTE_OFFSET(offset));
8854 /* nvram data is returned as an array of bytes
8855 * convert it back to cpu order */
8856 val = be32_to_cpu(val);
8858 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8862 /* disable access to nvram interface */
8863 bnx2x_disable_nvram_access(bp);
8864 bnx2x_release_nvram_lock(bp);
8869 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8877 if (buf_size == 1) /* ethtool */
8878 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8880 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8882 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8887 if (offset + buf_size > bp->common.flash_size) {
8888 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8889 " buf_size (0x%x) > flash_size (0x%x)\n",
8890 offset, buf_size, bp->common.flash_size);
8894 /* request access to nvram interface */
8895 rc = bnx2x_acquire_nvram_lock(bp);
8899 /* enable access to nvram interface */
8900 bnx2x_enable_nvram_access(bp);
8903 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8904 while ((written_so_far < buf_size) && (rc == 0)) {
8905 if (written_so_far == (buf_size - sizeof(u32)))
8906 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8908 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8909 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8910 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8912 memcpy(&val, data_buf, 4);
8914 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8916 /* advance to the next dword */
8917 offset += sizeof(u32);
8918 data_buf += sizeof(u32);
8919 written_so_far += sizeof(u32);
8923 /* disable access to nvram interface */
8924 bnx2x_disable_nvram_access(bp);
8925 bnx2x_release_nvram_lock(bp);
8930 static int bnx2x_set_eeprom(struct net_device *dev,
8931 struct ethtool_eeprom *eeprom, u8 *eebuf)
8933 struct bnx2x *bp = netdev_priv(dev);
8936 if (!netif_running(dev))
8939 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8940 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8941 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8942 eeprom->len, eeprom->len);
8944 /* parameters already validated in ethtool_set_eeprom */
8946 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8947 if (eeprom->magic == 0x00504859)
8950 bnx2x_acquire_phy_lock(bp);
8951 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8952 bp->link_params.ext_phy_config,
8953 (bp->state != BNX2X_STATE_CLOSED),
8954 eebuf, eeprom->len);
8955 if ((bp->state == BNX2X_STATE_OPEN) ||
8956 (bp->state == BNX2X_STATE_DISABLED)) {
8957 rc |= bnx2x_link_reset(&bp->link_params,
8959 rc |= bnx2x_phy_init(&bp->link_params,
8962 bnx2x_release_phy_lock(bp);
8964 } else /* Only the PMF can access the PHY */
8967 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8972 static int bnx2x_get_coalesce(struct net_device *dev,
8973 struct ethtool_coalesce *coal)
8975 struct bnx2x *bp = netdev_priv(dev);
8977 memset(coal, 0, sizeof(struct ethtool_coalesce));
8979 coal->rx_coalesce_usecs = bp->rx_ticks;
8980 coal->tx_coalesce_usecs = bp->tx_ticks;
8985 static int bnx2x_set_coalesce(struct net_device *dev,
8986 struct ethtool_coalesce *coal)
8988 struct bnx2x *bp = netdev_priv(dev);
8990 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8991 if (bp->rx_ticks > 3000)
8992 bp->rx_ticks = 3000;
8994 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8995 if (bp->tx_ticks > 0x3000)
8996 bp->tx_ticks = 0x3000;
8998 if (netif_running(dev))
8999 bnx2x_update_coalesce(bp);
9004 static void bnx2x_get_ringparam(struct net_device *dev,
9005 struct ethtool_ringparam *ering)
9007 struct bnx2x *bp = netdev_priv(dev);
9009 ering->rx_max_pending = MAX_RX_AVAIL;
9010 ering->rx_mini_max_pending = 0;
9011 ering->rx_jumbo_max_pending = 0;
9013 ering->rx_pending = bp->rx_ring_size;
9014 ering->rx_mini_pending = 0;
9015 ering->rx_jumbo_pending = 0;
9017 ering->tx_max_pending = MAX_TX_AVAIL;
9018 ering->tx_pending = bp->tx_ring_size;
9021 static int bnx2x_set_ringparam(struct net_device *dev,
9022 struct ethtool_ringparam *ering)
9024 struct bnx2x *bp = netdev_priv(dev);
9027 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9028 (ering->tx_pending > MAX_TX_AVAIL) ||
9029 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9032 bp->rx_ring_size = ering->rx_pending;
9033 bp->tx_ring_size = ering->tx_pending;
9035 if (netif_running(dev)) {
9036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9037 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9043 static void bnx2x_get_pauseparam(struct net_device *dev,
9044 struct ethtool_pauseparam *epause)
9046 struct bnx2x *bp = netdev_priv(dev);
9048 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9049 BNX2X_FLOW_CTRL_AUTO) &&
9050 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9052 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9053 BNX2X_FLOW_CTRL_RX);
9054 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9055 BNX2X_FLOW_CTRL_TX);
9057 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9058 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9059 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9062 static int bnx2x_set_pauseparam(struct net_device *dev,
9063 struct ethtool_pauseparam *epause)
9065 struct bnx2x *bp = netdev_priv(dev);
9070 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9071 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9072 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9076 if (epause->rx_pause)
9077 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9079 if (epause->tx_pause)
9080 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9082 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9085 if (epause->autoneg) {
9086 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9087 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9091 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9092 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9096 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9098 if (netif_running(dev)) {
9099 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9106 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9108 struct bnx2x *bp = netdev_priv(dev);
9112 /* TPA requires Rx CSUM offloading */
9113 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9114 if (!(dev->features & NETIF_F_LRO)) {
9115 dev->features |= NETIF_F_LRO;
9116 bp->flags |= TPA_ENABLE_FLAG;
9120 } else if (dev->features & NETIF_F_LRO) {
9121 dev->features &= ~NETIF_F_LRO;
9122 bp->flags &= ~TPA_ENABLE_FLAG;
9126 if (changed && netif_running(dev)) {
9127 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9128 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9134 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9136 struct bnx2x *bp = netdev_priv(dev);
9141 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9143 struct bnx2x *bp = netdev_priv(dev);
9148 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9149 TPA'ed packets will be discarded due to wrong TCP CSUM */
9151 u32 flags = ethtool_op_get_flags(dev);
9153 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9159 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9162 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9163 dev->features |= NETIF_F_TSO6;
9165 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9166 dev->features &= ~NETIF_F_TSO6;
9172 static const struct {
9173 char string[ETH_GSTRING_LEN];
9174 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9175 { "register_test (offline)" },
9176 { "memory_test (offline)" },
9177 { "loopback_test (offline)" },
9178 { "nvram_test (online)" },
9179 { "interrupt_test (online)" },
9180 { "link_test (online)" },
9181 { "idle check (online)" }
9184 static int bnx2x_self_test_count(struct net_device *dev)
9186 return BNX2X_NUM_TESTS;
9189 static int bnx2x_test_registers(struct bnx2x *bp)
9191 int idx, i, rc = -ENODEV;
9193 int port = BP_PORT(bp);
9194 static const struct {
9199 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9200 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9201 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9202 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9203 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9204 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9205 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9206 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9207 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9208 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9209 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9210 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9211 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9212 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9213 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9214 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9215 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9216 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9217 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9218 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9219 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9220 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9221 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9222 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9223 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9224 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9225 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9226 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9227 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9228 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9229 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9230 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9231 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9232 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9233 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9234 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9235 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9236 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9238 { 0xffffffff, 0, 0x00000000 }
9241 if (!netif_running(bp->dev))
9244 /* Repeat the test twice:
9245 First by writing 0x00000000, second by writing 0xffffffff */
9246 for (idx = 0; idx < 2; idx++) {
9253 wr_val = 0xffffffff;
9257 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9258 u32 offset, mask, save_val, val;
9260 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9261 mask = reg_tbl[i].mask;
9263 save_val = REG_RD(bp, offset);
9265 REG_WR(bp, offset, wr_val);
9266 val = REG_RD(bp, offset);
9268 /* Restore the original register's value */
9269 REG_WR(bp, offset, save_val);
9271 /* verify that value is as expected value */
9272 if ((val & mask) != (wr_val & mask))
9283 static int bnx2x_test_memory(struct bnx2x *bp)
9285 int i, j, rc = -ENODEV;
9287 static const struct {
9291 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9292 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9293 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9294 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9295 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9296 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9297 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9301 static const struct {
9307 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9308 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9309 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9310 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9311 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9312 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9314 { NULL, 0xffffffff, 0, 0 }
9317 if (!netif_running(bp->dev))
9320 /* Go through all the memories */
9321 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9322 for (j = 0; j < mem_tbl[i].size; j++)
9323 REG_RD(bp, mem_tbl[i].offset + j*4);
9325 /* Check the parity status */
9326 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9327 val = REG_RD(bp, prty_tbl[i].offset);
9328 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9329 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9331 "%s is 0x%x\n", prty_tbl[i].name, val);
9342 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9347 while (bnx2x_link_test(bp) && cnt--)
9351 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9353 unsigned int pkt_size, num_pkts, i;
9354 struct sk_buff *skb;
9355 unsigned char *packet;
9356 struct bnx2x_fastpath *fp = &bp->fp[0];
9357 u16 tx_start_idx, tx_idx;
9358 u16 rx_start_idx, rx_idx;
9360 struct sw_tx_bd *tx_buf;
9361 struct eth_tx_bd *tx_bd;
9363 union eth_rx_cqe *cqe;
9365 struct sw_rx_bd *rx_buf;
9369 /* check the loopback mode */
9370 switch (loopback_mode) {
9371 case BNX2X_PHY_LOOPBACK:
9372 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9375 case BNX2X_MAC_LOOPBACK:
9376 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9377 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9383 /* prepare the loopback packet */
9384 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9385 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9386 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9389 goto test_loopback_exit;
9391 packet = skb_put(skb, pkt_size);
9392 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9393 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9394 for (i = ETH_HLEN; i < pkt_size; i++)
9395 packet[i] = (unsigned char) (i & 0xff);
9397 /* send the loopback packet */
9399 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9400 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9402 pkt_prod = fp->tx_pkt_prod++;
9403 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9404 tx_buf->first_bd = fp->tx_bd_prod;
9407 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9408 mapping = pci_map_single(bp->pdev, skb->data,
9409 skb_headlen(skb), PCI_DMA_TODEVICE);
9410 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9411 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9412 tx_bd->nbd = cpu_to_le16(1);
9413 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9414 tx_bd->vlan = cpu_to_le16(pkt_prod);
9415 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9416 ETH_TX_BD_FLAGS_END_BD);
9417 tx_bd->general_data = ((UNICAST_ADDRESS <<
9418 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9422 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9423 mb(); /* FW restriction: must not reorder writing nbd and packets */
9424 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9425 DOORBELL(bp, fp->index, 0);
9431 bp->dev->trans_start = jiffies;
9435 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9436 if (tx_idx != tx_start_idx + num_pkts)
9437 goto test_loopback_exit;
9439 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9440 if (rx_idx != rx_start_idx + num_pkts)
9441 goto test_loopback_exit;
9443 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9444 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9445 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9446 goto test_loopback_rx_exit;
9448 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9449 if (len != pkt_size)
9450 goto test_loopback_rx_exit;
9452 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9454 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9455 for (i = ETH_HLEN; i < pkt_size; i++)
9456 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9457 goto test_loopback_rx_exit;
9461 test_loopback_rx_exit:
9463 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9464 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9465 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9466 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9468 /* Update producers */
9469 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9473 bp->link_params.loopback_mode = LOOPBACK_NONE;
9478 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9482 if (!netif_running(bp->dev))
9483 return BNX2X_LOOPBACK_FAILED;
9485 bnx2x_netif_stop(bp, 1);
9486 bnx2x_acquire_phy_lock(bp);
9488 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9490 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9491 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9494 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9496 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9497 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9500 bnx2x_release_phy_lock(bp);
9501 bnx2x_netif_start(bp);
9506 #define CRC32_RESIDUAL 0xdebb20e3
9508 static int bnx2x_test_nvram(struct bnx2x *bp)
9510 static const struct {
9514 { 0, 0x14 }, /* bootstrap */
9515 { 0x14, 0xec }, /* dir */
9516 { 0x100, 0x350 }, /* manuf_info */
9517 { 0x450, 0xf0 }, /* feature_info */
9518 { 0x640, 0x64 }, /* upgrade_key_info */
9520 { 0x708, 0x70 }, /* manuf_key_info */
9524 __be32 buf[0x350 / 4];
9525 u8 *data = (u8 *)buf;
9529 rc = bnx2x_nvram_read(bp, 0, data, 4);
9531 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9532 goto test_nvram_exit;
9535 magic = be32_to_cpu(buf[0]);
9536 if (magic != 0x669955aa) {
9537 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9539 goto test_nvram_exit;
9542 for (i = 0; nvram_tbl[i].size; i++) {
9544 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9548 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9549 goto test_nvram_exit;
9552 csum = ether_crc_le(nvram_tbl[i].size, data);
9553 if (csum != CRC32_RESIDUAL) {
9555 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9557 goto test_nvram_exit;
9565 static int bnx2x_test_intr(struct bnx2x *bp)
9567 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9570 if (!netif_running(bp->dev))
9573 config->hdr.length = 0;
9575 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9577 config->hdr.offset = BP_FUNC(bp);
9578 config->hdr.client_id = bp->fp->cl_id;
9579 config->hdr.reserved1 = 0;
9581 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9582 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9583 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9585 bp->set_mac_pending++;
9586 for (i = 0; i < 10; i++) {
9587 if (!bp->set_mac_pending)
9589 msleep_interruptible(10);
9598 static void bnx2x_self_test(struct net_device *dev,
9599 struct ethtool_test *etest, u64 *buf)
9601 struct bnx2x *bp = netdev_priv(dev);
9603 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9605 if (!netif_running(dev))
9608 /* offline tests are not supported in MF mode */
9610 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9612 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9615 link_up = bp->link_vars.link_up;
9616 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9617 bnx2x_nic_load(bp, LOAD_DIAG);
9618 /* wait until link state is restored */
9619 bnx2x_wait_for_link(bp, link_up);
9621 if (bnx2x_test_registers(bp) != 0) {
9623 etest->flags |= ETH_TEST_FL_FAILED;
9625 if (bnx2x_test_memory(bp) != 0) {
9627 etest->flags |= ETH_TEST_FL_FAILED;
9629 buf[2] = bnx2x_test_loopback(bp, link_up);
9631 etest->flags |= ETH_TEST_FL_FAILED;
9633 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9634 bnx2x_nic_load(bp, LOAD_NORMAL);
9635 /* wait until link state is restored */
9636 bnx2x_wait_for_link(bp, link_up);
9638 if (bnx2x_test_nvram(bp) != 0) {
9640 etest->flags |= ETH_TEST_FL_FAILED;
9642 if (bnx2x_test_intr(bp) != 0) {
9644 etest->flags |= ETH_TEST_FL_FAILED;
9647 if (bnx2x_link_test(bp) != 0) {
9649 etest->flags |= ETH_TEST_FL_FAILED;
9652 #ifdef BNX2X_EXTRA_DEBUG
9653 bnx2x_panic_dump(bp);
9657 static const struct {
9660 u8 string[ETH_GSTRING_LEN];
9661 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9662 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9663 { Q_STATS_OFFSET32(error_bytes_received_hi),
9664 8, "[%d]: rx_error_bytes" },
9665 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9666 8, "[%d]: rx_ucast_packets" },
9667 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9668 8, "[%d]: rx_mcast_packets" },
9669 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9670 8, "[%d]: rx_bcast_packets" },
9671 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9672 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9673 4, "[%d]: rx_phy_ip_err_discards"},
9674 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9675 4, "[%d]: rx_skb_alloc_discard" },
9676 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9678 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9679 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9680 8, "[%d]: tx_packets" }
9683 static const struct {
9687 #define STATS_FLAGS_PORT 1
9688 #define STATS_FLAGS_FUNC 2
9689 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9690 u8 string[ETH_GSTRING_LEN];
9691 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9692 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9693 8, STATS_FLAGS_BOTH, "rx_bytes" },
9694 { STATS_OFFSET32(error_bytes_received_hi),
9695 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9696 { STATS_OFFSET32(total_unicast_packets_received_hi),
9697 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9698 { STATS_OFFSET32(total_multicast_packets_received_hi),
9699 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9700 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9701 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9702 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9703 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9704 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9705 8, STATS_FLAGS_PORT, "rx_align_errors" },
9706 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9707 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9708 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9709 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9710 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9711 8, STATS_FLAGS_PORT, "rx_fragments" },
9712 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9713 8, STATS_FLAGS_PORT, "rx_jabbers" },
9714 { STATS_OFFSET32(no_buff_discard_hi),
9715 8, STATS_FLAGS_BOTH, "rx_discards" },
9716 { STATS_OFFSET32(mac_filter_discard),
9717 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9718 { STATS_OFFSET32(xxoverflow_discard),
9719 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9720 { STATS_OFFSET32(brb_drop_hi),
9721 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9722 { STATS_OFFSET32(brb_truncate_hi),
9723 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9724 { STATS_OFFSET32(pause_frames_received_hi),
9725 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9726 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9727 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9728 { STATS_OFFSET32(nig_timer_max),
9729 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9730 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9731 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9732 { STATS_OFFSET32(rx_skb_alloc_failed),
9733 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9734 { STATS_OFFSET32(hw_csum_err),
9735 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9737 { STATS_OFFSET32(total_bytes_transmitted_hi),
9738 8, STATS_FLAGS_BOTH, "tx_bytes" },
9739 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9740 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9741 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9742 8, STATS_FLAGS_BOTH, "tx_packets" },
9743 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9744 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9745 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9746 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9747 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9748 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9749 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9750 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9751 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9752 8, STATS_FLAGS_PORT, "tx_deferred" },
9753 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9754 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9755 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9756 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9757 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9758 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9759 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9760 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9761 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9762 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9763 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9764 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9765 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9766 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9767 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9768 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9769 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9770 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9771 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9772 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9773 { STATS_OFFSET32(pause_frames_sent_hi),
9774 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9777 #define IS_PORT_STAT(i) \
9778 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9779 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9780 #define IS_E1HMF_MODE_STAT(bp) \
9781 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9783 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9785 struct bnx2x *bp = netdev_priv(dev);
9788 switch (stringset) {
9792 for_each_queue(bp, i) {
9793 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9794 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9795 bnx2x_q_stats_arr[j].string, i);
9796 k += BNX2X_NUM_Q_STATS;
9798 if (IS_E1HMF_MODE_STAT(bp))
9800 for (j = 0; j < BNX2X_NUM_STATS; j++)
9801 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9802 bnx2x_stats_arr[j].string);
9804 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9805 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9807 strcpy(buf + j*ETH_GSTRING_LEN,
9808 bnx2x_stats_arr[i].string);
9815 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9820 static int bnx2x_get_stats_count(struct net_device *dev)
9822 struct bnx2x *bp = netdev_priv(dev);
9826 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9827 if (!IS_E1HMF_MODE_STAT(bp))
9828 num_stats += BNX2X_NUM_STATS;
9830 if (IS_E1HMF_MODE_STAT(bp)) {
9832 for (i = 0; i < BNX2X_NUM_STATS; i++)
9833 if (IS_FUNC_STAT(i))
9836 num_stats = BNX2X_NUM_STATS;
9842 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9843 struct ethtool_stats *stats, u64 *buf)
9845 struct bnx2x *bp = netdev_priv(dev);
9846 u32 *hw_stats, *offset;
9851 for_each_queue(bp, i) {
9852 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9853 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9854 if (bnx2x_q_stats_arr[j].size == 0) {
9855 /* skip this counter */
9859 offset = (hw_stats +
9860 bnx2x_q_stats_arr[j].offset);
9861 if (bnx2x_q_stats_arr[j].size == 4) {
9862 /* 4-byte counter */
9863 buf[k + j] = (u64) *offset;
9866 /* 8-byte counter */
9867 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9869 k += BNX2X_NUM_Q_STATS;
9871 if (IS_E1HMF_MODE_STAT(bp))
9873 hw_stats = (u32 *)&bp->eth_stats;
9874 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9875 if (bnx2x_stats_arr[j].size == 0) {
9876 /* skip this counter */
9880 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9881 if (bnx2x_stats_arr[j].size == 4) {
9882 /* 4-byte counter */
9883 buf[k + j] = (u64) *offset;
9886 /* 8-byte counter */
9887 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9890 hw_stats = (u32 *)&bp->eth_stats;
9891 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9892 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9894 if (bnx2x_stats_arr[i].size == 0) {
9895 /* skip this counter */
9900 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9901 if (bnx2x_stats_arr[i].size == 4) {
9902 /* 4-byte counter */
9903 buf[j] = (u64) *offset;
9907 /* 8-byte counter */
9908 buf[j] = HILO_U64(*offset, *(offset + 1));
9914 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9916 struct bnx2x *bp = netdev_priv(dev);
9917 int port = BP_PORT(bp);
9920 if (!netif_running(dev))
9929 for (i = 0; i < (data * 2); i++) {
9931 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9932 bp->link_params.hw_led_mode,
9933 bp->link_params.chip_id);
9935 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9936 bp->link_params.hw_led_mode,
9937 bp->link_params.chip_id);
9939 msleep_interruptible(500);
9940 if (signal_pending(current))
9944 if (bp->link_vars.link_up)
9945 bnx2x_set_led(bp, port, LED_MODE_OPER,
9946 bp->link_vars.line_speed,
9947 bp->link_params.hw_led_mode,
9948 bp->link_params.chip_id);
9953 static struct ethtool_ops bnx2x_ethtool_ops = {
9954 .get_settings = bnx2x_get_settings,
9955 .set_settings = bnx2x_set_settings,
9956 .get_drvinfo = bnx2x_get_drvinfo,
9957 .get_wol = bnx2x_get_wol,
9958 .set_wol = bnx2x_set_wol,
9959 .get_msglevel = bnx2x_get_msglevel,
9960 .set_msglevel = bnx2x_set_msglevel,
9961 .nway_reset = bnx2x_nway_reset,
9962 .get_link = ethtool_op_get_link,
9963 .get_eeprom_len = bnx2x_get_eeprom_len,
9964 .get_eeprom = bnx2x_get_eeprom,
9965 .set_eeprom = bnx2x_set_eeprom,
9966 .get_coalesce = bnx2x_get_coalesce,
9967 .set_coalesce = bnx2x_set_coalesce,
9968 .get_ringparam = bnx2x_get_ringparam,
9969 .set_ringparam = bnx2x_set_ringparam,
9970 .get_pauseparam = bnx2x_get_pauseparam,
9971 .set_pauseparam = bnx2x_set_pauseparam,
9972 .get_rx_csum = bnx2x_get_rx_csum,
9973 .set_rx_csum = bnx2x_set_rx_csum,
9974 .get_tx_csum = ethtool_op_get_tx_csum,
9975 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9976 .set_flags = bnx2x_set_flags,
9977 .get_flags = ethtool_op_get_flags,
9978 .get_sg = ethtool_op_get_sg,
9979 .set_sg = ethtool_op_set_sg,
9980 .get_tso = ethtool_op_get_tso,
9981 .set_tso = bnx2x_set_tso,
9982 .self_test_count = bnx2x_self_test_count,
9983 .self_test = bnx2x_self_test,
9984 .get_strings = bnx2x_get_strings,
9985 .phys_id = bnx2x_phys_id,
9986 .get_stats_count = bnx2x_get_stats_count,
9987 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9990 /* end of ethtool_ops */
9992 /****************************************************************************
9993 * General service functions
9994 ****************************************************************************/
9996 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10000 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10004 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10005 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10006 PCI_PM_CTRL_PME_STATUS));
10008 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10009 /* delay required during transition out of D3hot */
10014 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10018 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10023 /* No more memory access after this point until
10024 * device is brought back to D0.
10034 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10038 /* Tell compiler that status block fields can change */
10040 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10041 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10043 return (fp->rx_comp_cons != rx_cons_sb);
10047 * net_device service functions
10050 static int bnx2x_poll(struct napi_struct *napi, int budget)
10052 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10054 struct bnx2x *bp = fp->bp;
10057 #ifdef BNX2X_STOP_ON_ERROR
10058 if (unlikely(bp->panic))
10062 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10063 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10064 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10066 bnx2x_update_fpsb_idx(fp);
10068 if (bnx2x_has_tx_work(fp))
10069 bnx2x_tx_int(fp, budget);
10071 if (bnx2x_has_rx_work(fp))
10072 work_done = bnx2x_rx_int(fp, budget);
10074 rmb(); /* BNX2X_HAS_WORK() reads the status block */
10076 /* must not complete if we consumed full budget */
10077 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10079 #ifdef BNX2X_STOP_ON_ERROR
10082 napi_complete(napi);
10084 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10085 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10086 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10087 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10094 /* we split the first BD into headers and data BDs
10095 * to ease the pain of our fellow microcode engineers
10096 * we use one mapping for both BDs
10097 * So far this has only been observed to happen
10098 * in Other Operating Systems(TM)
10100 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10101 struct bnx2x_fastpath *fp,
10102 struct eth_tx_bd **tx_bd, u16 hlen,
10103 u16 bd_prod, int nbd)
10105 struct eth_tx_bd *h_tx_bd = *tx_bd;
10106 struct eth_tx_bd *d_tx_bd;
10107 dma_addr_t mapping;
10108 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10110 /* first fix first BD */
10111 h_tx_bd->nbd = cpu_to_le16(nbd);
10112 h_tx_bd->nbytes = cpu_to_le16(hlen);
10114 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10115 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10116 h_tx_bd->addr_lo, h_tx_bd->nbd);
10118 /* now get a new data BD
10119 * (after the pbd) and fill it */
10120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10121 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10123 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10124 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10126 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10127 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10128 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10130 /* this marks the BD as one that has no individual mapping
10131 * the FW ignores this flag in a BD not marked start
10133 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10134 DP(NETIF_MSG_TX_QUEUED,
10135 "TSO split data size is %d (%x:%x)\n",
10136 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10138 /* update tx_bd for marking the last BD flag */
10144 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10147 csum = (u16) ~csum_fold(csum_sub(csum,
10148 csum_partial(t_header - fix, fix, 0)));
10151 csum = (u16) ~csum_fold(csum_add(csum,
10152 csum_partial(t_header, -fix, 0)));
10154 return swab16(csum);
10157 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10161 if (skb->ip_summed != CHECKSUM_PARTIAL)
10165 if (skb->protocol == htons(ETH_P_IPV6)) {
10167 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10168 rc |= XMIT_CSUM_TCP;
10172 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10173 rc |= XMIT_CSUM_TCP;
10177 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10180 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10186 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10187 /* check if packet requires linearization (packet is too fragmented)
10188 no need to check fragmentation if page size > 8K (there will be no
10189 violation to FW restrictions) */
10190 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10195 int first_bd_sz = 0;
10197 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10198 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10200 if (xmit_type & XMIT_GSO) {
10201 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10202 /* Check if LSO packet needs to be copied:
10203 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10204 int wnd_size = MAX_FETCH_BD - 3;
10205 /* Number of windows to check */
10206 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10211 /* Headers length */
10212 hlen = (int)(skb_transport_header(skb) - skb->data) +
10215 /* Amount of data (w/o headers) on linear part of SKB*/
10216 first_bd_sz = skb_headlen(skb) - hlen;
10218 wnd_sum = first_bd_sz;
10220 /* Calculate the first sum - it's special */
10221 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10223 skb_shinfo(skb)->frags[frag_idx].size;
10225 /* If there was data on linear skb data - check it */
10226 if (first_bd_sz > 0) {
10227 if (unlikely(wnd_sum < lso_mss)) {
10232 wnd_sum -= first_bd_sz;
10235 /* Others are easier: run through the frag list and
10236 check all windows */
10237 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10239 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10241 if (unlikely(wnd_sum < lso_mss)) {
10246 skb_shinfo(skb)->frags[wnd_idx].size;
10249 /* in non-LSO too fragmented packet should always
10256 if (unlikely(to_copy))
10257 DP(NETIF_MSG_TX_QUEUED,
10258 "Linearization IS REQUIRED for %s packet. "
10259 "num_frags %d hlen %d first_bd_sz %d\n",
10260 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10261 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10267 /* called with netif_tx_lock
10268 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10269 * netif_wake_queue()
10271 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10273 struct bnx2x *bp = netdev_priv(dev);
10274 struct bnx2x_fastpath *fp;
10275 struct netdev_queue *txq;
10276 struct sw_tx_bd *tx_buf;
10277 struct eth_tx_bd *tx_bd;
10278 struct eth_tx_parse_bd *pbd = NULL;
10279 u16 pkt_prod, bd_prod;
10281 dma_addr_t mapping;
10282 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10283 int vlan_off = (bp->e1hov ? 4 : 0);
10287 #ifdef BNX2X_STOP_ON_ERROR
10288 if (unlikely(bp->panic))
10289 return NETDEV_TX_BUSY;
10292 fp_index = skb_get_queue_mapping(skb);
10293 txq = netdev_get_tx_queue(dev, fp_index);
10295 fp = &bp->fp[fp_index];
10297 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10298 fp->eth_q_stats.driver_xoff++,
10299 netif_tx_stop_queue(txq);
10300 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10301 return NETDEV_TX_BUSY;
10304 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10305 " gso type %x xmit_type %x\n",
10306 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10307 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10309 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10310 /* First, check if we need to linearize the skb (due to FW
10311 restrictions). No need to check fragmentation if page size > 8K
10312 (there will be no violation to FW restrictions) */
10313 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10314 /* Statistics of linearization */
10316 if (skb_linearize(skb) != 0) {
10317 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10318 "silently dropping this SKB\n");
10319 dev_kfree_skb_any(skb);
10320 return NETDEV_TX_OK;
10326 Please read carefully. First we use one BD which we mark as start,
10327 then for TSO or xsum we have a parsing info BD,
10328 and only then we have the rest of the TSO BDs.
10329 (don't forget to mark the last one as last,
10330 and to unmap only AFTER you write to the BD ...)
10331 And above all, all pdb sizes are in words - NOT DWORDS!
10334 pkt_prod = fp->tx_pkt_prod++;
10335 bd_prod = TX_BD(fp->tx_bd_prod);
10337 /* get a tx_buf and first BD */
10338 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10339 tx_bd = &fp->tx_desc_ring[bd_prod];
10341 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10342 tx_bd->general_data = (UNICAST_ADDRESS <<
10343 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10345 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10347 /* remember the first BD of the packet */
10348 tx_buf->first_bd = fp->tx_bd_prod;
10351 DP(NETIF_MSG_TX_QUEUED,
10352 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10353 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10356 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10357 (bp->flags & HW_VLAN_TX_FLAG)) {
10358 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10359 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10363 tx_bd->vlan = cpu_to_le16(pkt_prod);
10366 /* turn on parsing and get a BD */
10367 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10368 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10370 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10373 if (xmit_type & XMIT_CSUM) {
10374 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10376 /* for now NS flag is not used in Linux */
10378 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10379 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10381 pbd->ip_hlen = (skb_transport_header(skb) -
10382 skb_network_header(skb)) / 2;
10384 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10386 pbd->total_hlen = cpu_to_le16(hlen);
10387 hlen = hlen*2 - vlan_off;
10389 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10391 if (xmit_type & XMIT_CSUM_V4)
10392 tx_bd->bd_flags.as_bitfield |=
10393 ETH_TX_BD_FLAGS_IP_CSUM;
10395 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10397 if (xmit_type & XMIT_CSUM_TCP) {
10398 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10401 s8 fix = SKB_CS_OFF(skb); /* signed! */
10403 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10404 pbd->cs_offset = fix / 2;
10406 DP(NETIF_MSG_TX_QUEUED,
10407 "hlen %d offset %d fix %d csum before fix %x\n",
10408 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10411 /* HW bug: fixup the CSUM */
10412 pbd->tcp_pseudo_csum =
10413 bnx2x_csum_fix(skb_transport_header(skb),
10416 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10417 pbd->tcp_pseudo_csum);
10421 mapping = pci_map_single(bp->pdev, skb->data,
10422 skb_headlen(skb), PCI_DMA_TODEVICE);
10424 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10425 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10426 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10427 tx_bd->nbd = cpu_to_le16(nbd);
10428 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10430 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10431 " nbytes %d flags %x vlan %x\n",
10432 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10433 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10434 le16_to_cpu(tx_bd->vlan));
10436 if (xmit_type & XMIT_GSO) {
10438 DP(NETIF_MSG_TX_QUEUED,
10439 "TSO packet len %d hlen %d total len %d tso size %d\n",
10440 skb->len, hlen, skb_headlen(skb),
10441 skb_shinfo(skb)->gso_size);
10443 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10445 if (unlikely(skb_headlen(skb) > hlen))
10446 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10449 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10450 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10451 pbd->tcp_flags = pbd_tcp_flags(skb);
10453 if (xmit_type & XMIT_GSO_V4) {
10454 pbd->ip_id = swab16(ip_hdr(skb)->id);
10455 pbd->tcp_pseudo_csum =
10456 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10457 ip_hdr(skb)->daddr,
10458 0, IPPROTO_TCP, 0));
10461 pbd->tcp_pseudo_csum =
10462 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10463 &ipv6_hdr(skb)->daddr,
10464 0, IPPROTO_TCP, 0));
10466 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10469 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10470 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10472 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10473 tx_bd = &fp->tx_desc_ring[bd_prod];
10475 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10476 frag->size, PCI_DMA_TODEVICE);
10478 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10479 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10480 tx_bd->nbytes = cpu_to_le16(frag->size);
10481 tx_bd->vlan = cpu_to_le16(pkt_prod);
10482 tx_bd->bd_flags.as_bitfield = 0;
10484 DP(NETIF_MSG_TX_QUEUED,
10485 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10486 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10487 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10490 /* now at last mark the BD as the last BD */
10491 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10493 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10494 tx_bd, tx_bd->bd_flags.as_bitfield);
10496 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10498 /* now send a tx doorbell, counting the next BD
10499 * if the packet contains or ends with it
10501 if (TX_BD_POFF(bd_prod) < nbd)
10505 DP(NETIF_MSG_TX_QUEUED,
10506 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10507 " tcp_flags %x xsum %x seq %u hlen %u\n",
10508 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10509 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10510 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10512 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10515 * Make sure that the BD data is updated before updating the producer
10516 * since FW might read the BD right after the producer is updated.
10517 * This is only applicable for weak-ordered memory model archs such
10518 * as IA-64. The following barrier is also mandatory since FW will
10519 * assumes packets must have BDs.
10523 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10524 mb(); /* FW restriction: must not reorder writing nbd and packets */
10525 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10526 DOORBELL(bp, fp->index, 0);
10530 fp->tx_bd_prod += nbd;
10531 dev->trans_start = jiffies;
10533 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10534 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10535 if we put Tx into XOFF state. */
10537 netif_tx_stop_queue(txq);
10538 fp->eth_q_stats.driver_xoff++;
10539 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10540 netif_tx_wake_queue(txq);
10544 return NETDEV_TX_OK;
10547 /* called with rtnl_lock */
10548 static int bnx2x_open(struct net_device *dev)
10550 struct bnx2x *bp = netdev_priv(dev);
10552 netif_carrier_off(dev);
10554 bnx2x_set_power_state(bp, PCI_D0);
10556 return bnx2x_nic_load(bp, LOAD_OPEN);
10559 /* called with rtnl_lock */
10560 static int bnx2x_close(struct net_device *dev)
10562 struct bnx2x *bp = netdev_priv(dev);
10564 /* Unload the driver, release IRQs */
10565 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10566 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10567 if (!CHIP_REV_IS_SLOW(bp))
10568 bnx2x_set_power_state(bp, PCI_D3hot);
10573 /* called with netif_tx_lock from dev_mcast.c */
10574 static void bnx2x_set_rx_mode(struct net_device *dev)
10576 struct bnx2x *bp = netdev_priv(dev);
10577 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10578 int port = BP_PORT(bp);
10580 if (bp->state != BNX2X_STATE_OPEN) {
10581 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10585 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10587 if (dev->flags & IFF_PROMISC)
10588 rx_mode = BNX2X_RX_MODE_PROMISC;
10590 else if ((dev->flags & IFF_ALLMULTI) ||
10591 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10592 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10594 else { /* some multicasts */
10595 if (CHIP_IS_E1(bp)) {
10596 int i, old, offset;
10597 struct dev_mc_list *mclist;
10598 struct mac_configuration_cmd *config =
10599 bnx2x_sp(bp, mcast_config);
10601 for (i = 0, mclist = dev->mc_list;
10602 mclist && (i < dev->mc_count);
10603 i++, mclist = mclist->next) {
10605 config->config_table[i].
10606 cam_entry.msb_mac_addr =
10607 swab16(*(u16 *)&mclist->dmi_addr[0]);
10608 config->config_table[i].
10609 cam_entry.middle_mac_addr =
10610 swab16(*(u16 *)&mclist->dmi_addr[2]);
10611 config->config_table[i].
10612 cam_entry.lsb_mac_addr =
10613 swab16(*(u16 *)&mclist->dmi_addr[4]);
10614 config->config_table[i].cam_entry.flags =
10616 config->config_table[i].
10617 target_table_entry.flags = 0;
10618 config->config_table[i].
10619 target_table_entry.client_id = 0;
10620 config->config_table[i].
10621 target_table_entry.vlan_id = 0;
10624 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10625 config->config_table[i].
10626 cam_entry.msb_mac_addr,
10627 config->config_table[i].
10628 cam_entry.middle_mac_addr,
10629 config->config_table[i].
10630 cam_entry.lsb_mac_addr);
10632 old = config->hdr.length;
10634 for (; i < old; i++) {
10635 if (CAM_IS_INVALID(config->
10636 config_table[i])) {
10637 /* already invalidated */
10641 CAM_INVALIDATE(config->
10646 if (CHIP_REV_IS_SLOW(bp))
10647 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10649 offset = BNX2X_MAX_MULTICAST*(1 + port);
10651 config->hdr.length = i;
10652 config->hdr.offset = offset;
10653 config->hdr.client_id = bp->fp->cl_id;
10654 config->hdr.reserved1 = 0;
10656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10657 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10658 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10661 /* Accept one or more multicasts */
10662 struct dev_mc_list *mclist;
10663 u32 mc_filter[MC_HASH_SIZE];
10664 u32 crc, bit, regidx;
10667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10669 for (i = 0, mclist = dev->mc_list;
10670 mclist && (i < dev->mc_count);
10671 i++, mclist = mclist->next) {
10673 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10676 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10677 bit = (crc >> 24) & 0xff;
10680 mc_filter[regidx] |= (1 << bit);
10683 for (i = 0; i < MC_HASH_SIZE; i++)
10684 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10689 bp->rx_mode = rx_mode;
10690 bnx2x_set_storm_rx_mode(bp);
10693 /* called with rtnl_lock */
10694 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10696 struct sockaddr *addr = p;
10697 struct bnx2x *bp = netdev_priv(dev);
10699 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10702 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10703 if (netif_running(dev)) {
10704 if (CHIP_IS_E1(bp))
10705 bnx2x_set_mac_addr_e1(bp, 1);
10707 bnx2x_set_mac_addr_e1h(bp, 1);
10713 /* called with rtnl_lock */
10714 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10716 struct mii_ioctl_data *data = if_mii(ifr);
10717 struct bnx2x *bp = netdev_priv(dev);
10718 int port = BP_PORT(bp);
10723 data->phy_id = bp->port.phy_addr;
10727 case SIOCGMIIREG: {
10730 if (!netif_running(dev))
10733 mutex_lock(&bp->port.phy_mutex);
10734 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10735 DEFAULT_PHY_DEV_ADDR,
10736 (data->reg_num & 0x1f), &mii_regval);
10737 data->val_out = mii_regval;
10738 mutex_unlock(&bp->port.phy_mutex);
10743 if (!capable(CAP_NET_ADMIN))
10746 if (!netif_running(dev))
10749 mutex_lock(&bp->port.phy_mutex);
10750 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10751 DEFAULT_PHY_DEV_ADDR,
10752 (data->reg_num & 0x1f), data->val_in);
10753 mutex_unlock(&bp->port.phy_mutex);
10761 return -EOPNOTSUPP;
10764 /* called with rtnl_lock */
10765 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10767 struct bnx2x *bp = netdev_priv(dev);
10770 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10771 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10774 /* This does not race with packet allocation
10775 * because the actual alloc size is
10776 * only updated as part of load
10778 dev->mtu = new_mtu;
10780 if (netif_running(dev)) {
10781 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10782 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10788 static void bnx2x_tx_timeout(struct net_device *dev)
10790 struct bnx2x *bp = netdev_priv(dev);
10792 #ifdef BNX2X_STOP_ON_ERROR
10796 /* This allows the netif to be shutdown gracefully before resetting */
10797 schedule_work(&bp->reset_task);
10801 /* called with rtnl_lock */
10802 static void bnx2x_vlan_rx_register(struct net_device *dev,
10803 struct vlan_group *vlgrp)
10805 struct bnx2x *bp = netdev_priv(dev);
10809 /* Set flags according to the required capabilities */
10810 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10812 if (dev->features & NETIF_F_HW_VLAN_TX)
10813 bp->flags |= HW_VLAN_TX_FLAG;
10815 if (dev->features & NETIF_F_HW_VLAN_RX)
10816 bp->flags |= HW_VLAN_RX_FLAG;
10818 if (netif_running(dev))
10819 bnx2x_set_client_config(bp);
10824 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10825 static void poll_bnx2x(struct net_device *dev)
10827 struct bnx2x *bp = netdev_priv(dev);
10829 disable_irq(bp->pdev->irq);
10830 bnx2x_interrupt(bp->pdev->irq, dev);
10831 enable_irq(bp->pdev->irq);
10835 static const struct net_device_ops bnx2x_netdev_ops = {
10836 .ndo_open = bnx2x_open,
10837 .ndo_stop = bnx2x_close,
10838 .ndo_start_xmit = bnx2x_start_xmit,
10839 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10840 .ndo_set_mac_address = bnx2x_change_mac_addr,
10841 .ndo_validate_addr = eth_validate_addr,
10842 .ndo_do_ioctl = bnx2x_ioctl,
10843 .ndo_change_mtu = bnx2x_change_mtu,
10844 .ndo_tx_timeout = bnx2x_tx_timeout,
10846 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10848 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10849 .ndo_poll_controller = poll_bnx2x,
10853 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10854 struct net_device *dev)
10859 SET_NETDEV_DEV(dev, &pdev->dev);
10860 bp = netdev_priv(dev);
10865 bp->func = PCI_FUNC(pdev->devfn);
10867 rc = pci_enable_device(pdev);
10869 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10874 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10877 goto err_out_disable;
10880 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10881 printk(KERN_ERR PFX "Cannot find second PCI device"
10882 " base address, aborting\n");
10884 goto err_out_disable;
10887 if (atomic_read(&pdev->enable_cnt) == 1) {
10888 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10890 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10892 goto err_out_disable;
10895 pci_set_master(pdev);
10896 pci_save_state(pdev);
10899 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10900 if (bp->pm_cap == 0) {
10901 printk(KERN_ERR PFX "Cannot find power management"
10902 " capability, aborting\n");
10904 goto err_out_release;
10907 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10908 if (bp->pcie_cap == 0) {
10909 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10912 goto err_out_release;
10915 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10916 bp->flags |= USING_DAC_FLAG;
10917 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10918 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10919 " failed, aborting\n");
10921 goto err_out_release;
10924 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10925 printk(KERN_ERR PFX "System does not support DMA,"
10928 goto err_out_release;
10931 dev->mem_start = pci_resource_start(pdev, 0);
10932 dev->base_addr = dev->mem_start;
10933 dev->mem_end = pci_resource_end(pdev, 0);
10935 dev->irq = pdev->irq;
10937 bp->regview = pci_ioremap_bar(pdev, 0);
10938 if (!bp->regview) {
10939 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10941 goto err_out_release;
10944 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10945 min_t(u64, BNX2X_DB_SIZE,
10946 pci_resource_len(pdev, 2)));
10947 if (!bp->doorbells) {
10948 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10950 goto err_out_unmap;
10953 bnx2x_set_power_state(bp, PCI_D0);
10955 /* clean indirect addresses */
10956 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10957 PCICFG_VENDOR_ID_OFFSET);
10958 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10959 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10960 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10961 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10963 dev->watchdog_timeo = TX_TIMEOUT;
10965 dev->netdev_ops = &bnx2x_netdev_ops;
10966 dev->ethtool_ops = &bnx2x_ethtool_ops;
10967 dev->features |= NETIF_F_SG;
10968 dev->features |= NETIF_F_HW_CSUM;
10969 if (bp->flags & USING_DAC_FLAG)
10970 dev->features |= NETIF_F_HIGHDMA;
10972 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10973 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10975 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10976 dev->features |= NETIF_F_TSO6;
10982 iounmap(bp->regview);
10983 bp->regview = NULL;
10985 if (bp->doorbells) {
10986 iounmap(bp->doorbells);
10987 bp->doorbells = NULL;
10991 if (atomic_read(&pdev->enable_cnt) == 1)
10992 pci_release_regions(pdev);
10995 pci_disable_device(pdev);
10996 pci_set_drvdata(pdev, NULL);
11002 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11004 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11006 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11010 /* return value of 1=2.5GHz 2=5GHz */
11011 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11013 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11015 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11019 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11020 const struct pci_device_id *ent)
11022 static int version_printed;
11023 struct net_device *dev = NULL;
11027 if (version_printed++ == 0)
11028 printk(KERN_INFO "%s", version);
11030 /* dev zeroed in init_etherdev */
11031 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11033 printk(KERN_ERR PFX "Cannot allocate net device\n");
11037 bp = netdev_priv(dev);
11038 bp->msglevel = debug;
11040 rc = bnx2x_init_dev(pdev, dev);
11046 pci_set_drvdata(pdev, dev);
11048 rc = bnx2x_init_bp(bp);
11050 goto init_one_exit;
11052 rc = register_netdev(dev);
11054 dev_err(&pdev->dev, "Cannot register net device\n");
11055 goto init_one_exit;
11058 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11059 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11060 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11061 bnx2x_get_pcie_width(bp),
11062 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11063 dev->base_addr, bp->pdev->irq);
11064 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11069 iounmap(bp->regview);
11072 iounmap(bp->doorbells);
11076 if (atomic_read(&pdev->enable_cnt) == 1)
11077 pci_release_regions(pdev);
11079 pci_disable_device(pdev);
11080 pci_set_drvdata(pdev, NULL);
11085 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11087 struct net_device *dev = pci_get_drvdata(pdev);
11091 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11094 bp = netdev_priv(dev);
11096 unregister_netdev(dev);
11099 iounmap(bp->regview);
11102 iounmap(bp->doorbells);
11106 if (atomic_read(&pdev->enable_cnt) == 1)
11107 pci_release_regions(pdev);
11109 pci_disable_device(pdev);
11110 pci_set_drvdata(pdev, NULL);
11113 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11115 struct net_device *dev = pci_get_drvdata(pdev);
11119 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11122 bp = netdev_priv(dev);
11126 pci_save_state(pdev);
11128 if (!netif_running(dev)) {
11133 netif_device_detach(dev);
11135 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11137 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11144 static int bnx2x_resume(struct pci_dev *pdev)
11146 struct net_device *dev = pci_get_drvdata(pdev);
11151 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11154 bp = netdev_priv(dev);
11158 pci_restore_state(pdev);
11160 if (!netif_running(dev)) {
11165 bnx2x_set_power_state(bp, PCI_D0);
11166 netif_device_attach(dev);
11168 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11175 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11179 bp->state = BNX2X_STATE_ERROR;
11181 bp->rx_mode = BNX2X_RX_MODE_NONE;
11183 bnx2x_netif_stop(bp, 0);
11185 del_timer_sync(&bp->timer);
11186 bp->stats_state = STATS_STATE_DISABLED;
11187 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11190 bnx2x_free_irq(bp);
11192 if (CHIP_IS_E1(bp)) {
11193 struct mac_configuration_cmd *config =
11194 bnx2x_sp(bp, mcast_config);
11196 for (i = 0; i < config->hdr.length; i++)
11197 CAM_INVALIDATE(config->config_table[i]);
11200 /* Free SKBs, SGEs, TPA pool and driver internals */
11201 bnx2x_free_skbs(bp);
11202 for_each_rx_queue(bp, i)
11203 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11204 for_each_rx_queue(bp, i)
11205 netif_napi_del(&bnx2x_fp(bp, i, napi));
11206 bnx2x_free_mem(bp);
11208 bp->state = BNX2X_STATE_CLOSED;
11210 netif_carrier_off(bp->dev);
11215 static void bnx2x_eeh_recover(struct bnx2x *bp)
11219 mutex_init(&bp->port.phy_mutex);
11221 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11222 bp->link_params.shmem_base = bp->common.shmem_base;
11223 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11225 if (!bp->common.shmem_base ||
11226 (bp->common.shmem_base < 0xA0000) ||
11227 (bp->common.shmem_base >= 0xC0000)) {
11228 BNX2X_DEV_INFO("MCP not active\n");
11229 bp->flags |= NO_MCP_FLAG;
11233 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11234 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11235 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11236 BNX2X_ERR("BAD MCP validity signature\n");
11238 if (!BP_NOMCP(bp)) {
11239 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11240 & DRV_MSG_SEQ_NUMBER_MASK);
11241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11246 * bnx2x_io_error_detected - called when PCI error is detected
11247 * @pdev: Pointer to PCI device
11248 * @state: The current pci connection state
11250 * This function is called after a PCI bus error affecting
11251 * this device has been detected.
11253 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11254 pci_channel_state_t state)
11256 struct net_device *dev = pci_get_drvdata(pdev);
11257 struct bnx2x *bp = netdev_priv(dev);
11261 netif_device_detach(dev);
11263 if (netif_running(dev))
11264 bnx2x_eeh_nic_unload(bp);
11266 pci_disable_device(pdev);
11270 /* Request a slot reset */
11271 return PCI_ERS_RESULT_NEED_RESET;
11275 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11276 * @pdev: Pointer to PCI device
11278 * Restart the card from scratch, as if from a cold-boot.
11280 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11282 struct net_device *dev = pci_get_drvdata(pdev);
11283 struct bnx2x *bp = netdev_priv(dev);
11287 if (pci_enable_device(pdev)) {
11288 dev_err(&pdev->dev,
11289 "Cannot re-enable PCI device after reset\n");
11291 return PCI_ERS_RESULT_DISCONNECT;
11294 pci_set_master(pdev);
11295 pci_restore_state(pdev);
11297 if (netif_running(dev))
11298 bnx2x_set_power_state(bp, PCI_D0);
11302 return PCI_ERS_RESULT_RECOVERED;
11306 * bnx2x_io_resume - called when traffic can start flowing again
11307 * @pdev: Pointer to PCI device
11309 * This callback is called when the error recovery driver tells us that
11310 * its OK to resume normal operation.
11312 static void bnx2x_io_resume(struct pci_dev *pdev)
11314 struct net_device *dev = pci_get_drvdata(pdev);
11315 struct bnx2x *bp = netdev_priv(dev);
11319 bnx2x_eeh_recover(bp);
11321 if (netif_running(dev))
11322 bnx2x_nic_load(bp, LOAD_NORMAL);
11324 netif_device_attach(dev);
11329 static struct pci_error_handlers bnx2x_err_handler = {
11330 .error_detected = bnx2x_io_error_detected,
11331 .slot_reset = bnx2x_io_slot_reset,
11332 .resume = bnx2x_io_resume,
11335 static struct pci_driver bnx2x_pci_driver = {
11336 .name = DRV_MODULE_NAME,
11337 .id_table = bnx2x_pci_tbl,
11338 .probe = bnx2x_init_one,
11339 .remove = __devexit_p(bnx2x_remove_one),
11340 .suspend = bnx2x_suspend,
11341 .resume = bnx2x_resume,
11342 .err_handler = &bnx2x_err_handler,
11345 static int __init bnx2x_init(void)
11347 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11348 if (bnx2x_wq == NULL) {
11349 printk(KERN_ERR PFX "Cannot create workqueue\n");
11353 return pci_register_driver(&bnx2x_pci_driver);
11356 static void __exit bnx2x_cleanup(void)
11358 pci_unregister_driver(&bnx2x_pci_driver);
11360 destroy_workqueue(bnx2x_wq);
11363 module_init(bnx2x_init);
11364 module_exit(bnx2x_cleanup);