1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
57 #define DRV_MODULE_VERSION "1.48.102"
58 #define DRV_MODULE_RELDATE "2009/02/12"
59 #define BNX2X_BC_VER 0x040200
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT (5*HZ)
64 static char version[] __devinitdata =
65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
76 static int disable_tpa;
77 module_param(disable_tpa, int, 0);
78 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
81 module_param(int_mode, int, 0);
82 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
85 module_param(poll, int, 0);
86 MODULE_PARM_DESC(poll, " Use polling (for debug)");
89 module_param(mrrs, int, 0);
90 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(debug, " Default debug msglevel");
96 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
98 static struct workqueue_struct *bnx2x_wq;
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
516 for_each_rx_queue(bp, i) {
517 struct bnx2x_fastpath *fp = &bp->fp[i];
519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
522 i, fp->rx_bd_prod, fp->rx_bd_cons,
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554 for (j = start; j != end; j = RX_BD(j + 1)) {
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
564 for (j = start; j != end; j = RX_SGE(j + 1)) {
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
607 BNX2X_ERR("end crash dump -----------------\n");
610 static void bnx2x_int_enable(struct bnx2x *bp)
612 int port = BP_PORT(bp);
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
637 REG_WR(bp, addr, val);
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
645 REG_WR(bp, addr, val);
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
652 /* enable nig and gpio3 attention */
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
662 static void bnx2x_int_disable(struct bnx2x *bp)
664 int port = BP_PORT(bp);
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 /* flush all outstanding writes */
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
685 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
690 /* disable interrupt handling */
691 atomic_inc(&bp->intr_sem);
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
696 /* make sure all ISRs are done */
698 synchronize_irq(bp->msix_table[0].vector);
700 for_each_queue(bp, i)
701 synchronize_irq(bp->msix_table[i + offset].vector);
703 synchronize_irq(bp->pdev->irq);
705 /* make sure sp_task is not running */
706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
713 * General service functions
716 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
717 u8 storm, u16 index, u8 op, u8 update)
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
721 struct igu_ack_register igu_ack;
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
735 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
737 struct host_status_block *fpsb = fp->status_blk;
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
752 static u16 bnx2x_ack_int(struct bnx2x *bp)
754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
766 * fast path service functions
769 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
773 /* Tell compiler that status block fields can change */
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
776 return (fp->tx_pkt_cons != tx_cons_sb);
779 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
781 /* Tell compiler that consumer and producer can change */
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
786 /* free skb in the packet ring at pos idx
787 * return idx of last bd freed
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
808 new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
811 BNX2X_ERR("BAD nbd!\n");
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 tx_buf->first_bd = 0;
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
860 barrier(); /* Tell compiler that prod and cons can change */
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
868 #ifdef BNX2X_STOP_ON_ERROR
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
874 return (s16)(fp->bp->tx_ring_size) - used;
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
879 struct bnx2x *bp = fp->bp;
880 struct netdev_queue *txq;
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
884 #ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
889 txq = netdev_get_tx_queue(bp->dev, fp->index);
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
893 while (sw_cons != hw_cons) {
896 pkt_cons = TX_BD(sw_cons);
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
901 hw_cons, sw_cons, pkt_cons);
903 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
926 /* TBD need a thresh? */
927 if (unlikely(netif_tx_queue_stopped(txq))) {
929 __netif_tx_lock(txq, smp_processor_id());
931 if ((netif_tx_queue_stopped(txq)) &&
932 (bp->state == BNX2X_STATE_OPEN) &&
933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934 netif_tx_wake_queue(txq);
936 __netif_tx_unlock(txq);
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
950 fp->index, cid, command, bp->state,
951 rr_cqe->ramrod_cqe.ramrod_type);
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 fp->state = BNX2X_FP_STATE_OPEN;
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 fp->state = BNX2X_FP_STATE_HALTED;
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
975 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000 bp->set_mac_pending = 0;
1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1009 command, bp->state);
1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022 /* Skip "next page" elements */
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030 sw_buf->page = NULL;
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052 if (unlikely(page == NULL))
1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056 PCI_DMA_FROMDEVICE);
1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1099 /* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1126 u16 last_max = fp->last_max_sge;
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1149 struct bnx2x *bp = fp->bp;
1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
1153 u16 last_max, last_elem, first_elem;
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240 #ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247 fp->tpa_queue_used);
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1256 struct sw_rx_page *rx_pg, old_rx_pg;
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1265 /* This is needed in order to enable forwarding support */
1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268 max(frag_size, (u32)len_on_bd));
1270 #ifdef BNX2X_STOP_ON_ERROR
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289 rx_pg = &fp->rx_page_ring[sge_idx];
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
1296 fp->eth_q_stats.rx_skb_alloc_failed++;
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1311 frag_size -= frag_len;
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1332 if (likely(new_skb)) {
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1344 prefetch(((char *)(skb)) + 128);
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1356 skb_reserve(skb, pad);
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365 iph = (struct iphdr *)skb->data;
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1386 netif_receive_skb(skb);
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1398 /* else drop the packet and keep the buffer in the bin */
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1412 struct ustorm_eth_rx_producers rx_prods = {0};
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433 ((u32 *)&rx_prods)[i]);
1435 mmiowb(); /* keep prod updates ordered */
1437 DP(NETIF_MSG_RX_STATUS,
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444 struct bnx2x *bp = fp->bp;
1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1449 #ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
1462 bd_prod_fw = bd_prod;
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1473 fp->index, hw_comp_cons, sw_comp_cons);
1475 while (sw_comp_cons != hw_comp_cons) {
1476 struct sw_rx_bd *rx_buf = NULL;
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1496 /* is this a slowpath msg? */
1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498 bnx2x_sp_event(fp, cqe);
1501 /* this is an rx packet */
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
1513 u16 queue = cqe->fast_path_cqe.queue_index;
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1520 bnx2x_tpa_start(fp, queue, skb,
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1534 /* This is a size of the linear data
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1556 prefetch(((char *)(skb)) + 128);
1558 /* is this an error packet? */
1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560 DP(NETIF_MSG_RX_ERR,
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
1563 fp->eth_q_stats.rx_err_discard_pkt++;
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1574 new_skb = netdev_alloc_skb(bp->dev,
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
1578 "ERROR packet dropped "
1579 "because of alloc failure\n");
1580 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped because "
1605 "of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1614 skb->ip_summed = CHECKSUM_NONE;
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619 fp->eth_q_stats.hw_csum_err++;
1623 skb_record_rx_queue(skb, fp->index);
1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1632 netif_receive_skb(skb);
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1646 if (rx_pkt == budget)
1650 fp->rx_bd_cons = bd_cons;
1651 fp->rx_bd_prod = bd_prod_fw;
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1659 fp->rx_pkt += rx_pkt;
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
1669 int index = fp->index;
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1681 #ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691 napi_schedule(&bnx2x_fp(bp, index, napi));
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698 struct bnx2x *bp = netdev_priv(dev_instance);
1699 u16 status = bnx2x_ack_int(bp);
1702 /* Return here if interrupt is shared and it's not for us */
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1707 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 #ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
1735 if (unlikely(status & 0x1)) {
1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1750 /* end of fast path */
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1757 * General service functions
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1763 u32 resource_bit = (1 << resource);
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1783 /* Validating that the resource is not already taken */
1784 lock_status = REG_RD(bp, hw_lock_control_reg);
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
1793 /* Try to acquire the lock */
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
1796 if (lock_status & resource_bit)
1801 DP(NETIF_MSG_HW, "Timeout\n");
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1808 u32 resource_bit = (1 << resource);
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1827 /* Validating that the resource is currently taken */
1828 lock_status = REG_RD(bp, hw_lock_control_reg);
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1842 mutex_lock(&bp->port.phy_mutex);
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 mutex_unlock(&bp->port.phy_mutex);
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1987 u32 spio_mask = (1 << spio_num);
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2040 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2045 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046 bp->port.advertising |= ADVERTISED_Asym_Pause;
2050 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2056 static void bnx2x_link_report(struct bnx2x *bp)
2058 if (bp->link_vars.link_up) {
2059 if (bp->state == BNX2X_STATE_OPEN)
2060 netif_carrier_on(bp->dev);
2061 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2063 printk("%d Mbps ", bp->link_vars.line_speed);
2065 if (bp->link_vars.duplex == DUPLEX_FULL)
2066 printk("full duplex");
2068 printk("half duplex");
2070 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072 printk(", receive ");
2073 if (bp->link_vars.flow_ctrl &
2075 printk("& transmit ");
2077 printk(", transmit ");
2079 printk("flow control ON");
2083 } else { /* link_down */
2084 netif_carrier_off(bp->dev);
2085 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2091 if (!BP_NOMCP(bp)) {
2094 /* Initialize link parameters structure variables */
2095 /* It is recommended to turn off RX FC for jumbo frames
2096 for better performance */
2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099 else if (bp->dev->mtu > 5000)
2100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2102 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2104 bnx2x_acquire_phy_lock(bp);
2106 if (load_mode == LOAD_DIAG)
2107 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2109 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2111 bnx2x_release_phy_lock(bp);
2113 bnx2x_calc_fc_adv(bp);
2115 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117 bnx2x_link_report(bp);
2122 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2126 static void bnx2x_link_set(struct bnx2x *bp)
2128 if (!BP_NOMCP(bp)) {
2129 bnx2x_acquire_phy_lock(bp);
2130 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131 bnx2x_release_phy_lock(bp);
2133 bnx2x_calc_fc_adv(bp);
2135 BNX2X_ERR("Bootcode is missing - can not set link\n");
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2140 if (!BP_NOMCP(bp)) {
2141 bnx2x_acquire_phy_lock(bp);
2142 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143 bnx2x_release_phy_lock(bp);
2145 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2152 bnx2x_acquire_phy_lock(bp);
2153 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154 bnx2x_release_phy_lock(bp);
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2161 u32 r_param = bp->link_vars.line_speed / 8;
2162 u32 fair_periodic_timeout_usec;
2165 memset(&(bp->cmng.rs_vars), 0,
2166 sizeof(struct rate_shaping_vars_per_port));
2167 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2169 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2172 /* this is the threshold below which no timer arming will occur
2173 1.25 coefficient is for the threshold to be a little bigger
2174 than the real time, to compensate for timer in-accuracy */
2175 bp->cmng.rs_vars.rs_threshold =
2176 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2178 /* resolution of fairness timer */
2179 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2183 /* this is the threshold below which we won't arm the timer anymore */
2184 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2186 /* we multiply by 1e3/8 to get bytes/msec.
2187 We don't want the credits to pass a credit
2188 of the t_fair*FAIR_MEM (algorithm resolution) */
2189 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190 /* since each tick is 4 usec */
2191 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2196 struct rate_shaping_vars_per_vn m_rs_vn;
2197 struct fairness_vars_per_vn m_fair_vn;
2198 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199 u16 vn_min_rate, vn_max_rate;
2202 /* If function is hidden - set min and max to zeroes */
2203 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2208 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210 /* If fairness is enabled (not all min rates are zeroes) and
2211 if current min rate is zero - set it to 1.
2212 This is a requirement of the algorithm. */
2213 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214 vn_min_rate = DEF_MIN_RATE;
2215 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2220 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2221 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2223 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2226 /* global vn counter - maximal Mbps for this vn */
2227 m_rs_vn.vn_counter.rate = vn_max_rate;
2229 /* quota - number of bytes transmitted in this period */
2230 m_rs_vn.vn_counter.quota =
2231 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2233 if (bp->vn_weight_sum) {
2234 /* credit for each period of the fairness algorithm:
2235 number of bytes in T_FAIR (the vn share the port rate).
2236 vn_weight_sum should not be larger than 10000, thus
2237 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2239 m_fair_vn.vn_credit_delta =
2240 max((u32)(vn_min_rate * (T_FAIR_COEF /
2241 (8 * bp->vn_weight_sum))),
2242 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244 m_fair_vn.vn_credit_delta);
2247 /* Store it to internal memory */
2248 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251 ((u32 *)(&m_rs_vn))[i]);
2253 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256 ((u32 *)(&m_fair_vn))[i]);
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2268 if (bp->link_vars.link_up) {
2270 /* dropless flow control */
2271 if (CHIP_IS_E1H(bp)) {
2272 int port = BP_PORT(bp);
2273 u32 pause_enabled = 0;
2275 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2278 REG_WR(bp, BAR_USTRORM_INTMEM +
2279 USTORM_PAUSE_ENABLED_OFFSET(port),
2283 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284 struct host_port_stats *pstats;
2286 pstats = bnx2x_sp(bp, port_stats);
2287 /* reset old bmac stats */
2288 memset(&(pstats->mac_stx[0]), 0,
2289 sizeof(struct mac_stx));
2291 if ((bp->state == BNX2X_STATE_OPEN) ||
2292 (bp->state == BNX2X_STATE_DISABLED))
2293 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2296 /* indicate link status */
2297 bnx2x_link_report(bp);
2300 int port = BP_PORT(bp);
2304 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305 if (vn == BP_E1HVN(bp))
2308 func = ((vn << 1) | port);
2310 /* Set the attention towards other drivers
2312 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2316 if (bp->link_vars.link_up) {
2319 /* Init rate shaping and fairness contexts */
2320 bnx2x_init_port_minmax(bp);
2322 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323 bnx2x_init_vn_minmax(bp, 2*vn + port);
2325 /* Store it to internal memory */
2327 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330 ((u32 *)(&bp->cmng))[i]);
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2337 if (bp->state != BNX2X_STATE_OPEN)
2340 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2342 if (bp->link_vars.link_up)
2343 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2347 /* indicate link status */
2348 bnx2x_link_report(bp);
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2353 int port = BP_PORT(bp);
2357 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2359 /* enable nig attention */
2360 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2364 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 * General service functions
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377 u32 data_hi, u32 data_lo, int common)
2379 int func = BP_FUNC(bp);
2381 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2383 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2387 #ifdef BNX2X_STOP_ON_ERROR
2388 if (unlikely(bp->panic))
2392 spin_lock_bh(&bp->spq_lock);
2394 if (!bp->spq_left) {
2395 BNX2X_ERR("BUG! SPQ ring full!\n");
2396 spin_unlock_bh(&bp->spq_lock);
2401 /* CID needs port number to be encoded int it */
2402 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2405 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2407 bp->spq_prod_bd->hdr.type |=
2408 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2410 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2415 if (bp->spq_prod_bd == bp->spq_last_bd) {
2416 bp->spq_prod_bd = bp->spq;
2417 bp->spq_prod_idx = 0;
2418 DP(NETIF_MSG_TIMER, "end of spq\n");
2425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2428 spin_unlock_bh(&bp->spq_lock);
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2440 for (j = 0; j < i*10; j++) {
2442 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444 if (val & (1L << 31))
2449 if (!(val & (1L << 31))) {
2450 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2462 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2467 struct host_def_status_block *def_sb = bp->def_status_blk;
2470 barrier(); /* status block is written to by the chip */
2471 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2475 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2479 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2483 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2487 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2495 * slow path service functions
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 int port = BP_PORT(bp);
2501 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502 COMMAND_REG_ATTN_BITS_SET);
2503 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506 NIG_REG_MASK_INTERRUPT_PORT0;
2510 if (bp->attn_state & asserted)
2511 BNX2X_ERR("IGU ERROR\n");
2513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514 aeu_mask = REG_RD(bp, aeu_addr);
2516 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2517 aeu_mask, asserted);
2518 aeu_mask &= ~(asserted & 0xff);
2519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2521 REG_WR(bp, aeu_addr, aeu_mask);
2522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525 bp->attn_state |= asserted;
2526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2528 if (asserted & ATTN_HARD_WIRED_MASK) {
2529 if (asserted & ATTN_NIG_FOR_FUNC) {
2531 bnx2x_acquire_phy_lock(bp);
2533 /* save nig interrupt mask */
2534 nig_mask = REG_RD(bp, nig_int_mask_addr);
2535 REG_WR(bp, nig_int_mask_addr, 0);
2537 bnx2x_link_attn(bp);
2539 /* handle unicore attn? */
2541 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2544 if (asserted & GPIO_2_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2547 if (asserted & GPIO_3_FUNC)
2548 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2550 if (asserted & GPIO_4_FUNC)
2551 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2554 if (asserted & ATTN_GENERAL_ATTN_1) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2558 if (asserted & ATTN_GENERAL_ATTN_2) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2562 if (asserted & ATTN_GENERAL_ATTN_3) {
2563 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2567 if (asserted & ATTN_GENERAL_ATTN_4) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2571 if (asserted & ATTN_GENERAL_ATTN_5) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2575 if (asserted & ATTN_GENERAL_ATTN_6) {
2576 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2581 } /* if hardwired */
2583 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2585 REG_WR(bp, hc_addr, asserted);
2587 /* now set back the mask */
2588 if (asserted & ATTN_NIG_FOR_FUNC) {
2589 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590 bnx2x_release_phy_lock(bp);
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2596 int port = BP_PORT(bp);
2600 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2603 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2605 val = REG_RD(bp, reg_offset);
2606 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607 REG_WR(bp, reg_offset, val);
2609 BNX2X_ERR("SPIO5 hw attention\n");
2611 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613 /* Fan failure attention */
2615 /* The PHY reset is controlled by GPIO 1 */
2616 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618 /* Low power mode is controlled by GPIO 2 */
2619 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621 /* mark the failure */
2622 bp->link_params.ext_phy_config &=
2623 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624 bp->link_params.ext_phy_config |=
2625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 dev_info.port_hw_config[port].
2628 external_phy_config,
2629 bp->link_params.ext_phy_config);
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network"
2632 " Controller %s has caused the driver to"
2633 " shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for"
2635 " assistance\n", bp->dev->name);
2643 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645 bnx2x_acquire_phy_lock(bp);
2646 bnx2x_handle_module_detect_int(&bp->link_params);
2647 bnx2x_release_phy_lock(bp);
2650 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654 REG_WR(bp, reg_offset, val);
2656 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_0));
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2666 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2668 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670 /* DORQ discard attention */
2672 BNX2X_ERR("FATAL error from DORQ\n");
2675 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2677 int port = BP_PORT(bp);
2680 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2683 val = REG_RD(bp, reg_offset);
2684 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685 REG_WR(bp, reg_offset, val);
2687 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688 (attn & HW_INTERRUT_ASSERT_SET_1));
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2697 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2699 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701 /* CFC error attention */
2703 BNX2X_ERR("FATAL error from CFC\n");
2706 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2708 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710 /* RQ_USDMDP_FIFO_OVERFLOW */
2712 BNX2X_ERR("FATAL error from PXP\n");
2715 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2717 int port = BP_PORT(bp);
2720 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2723 val = REG_RD(bp, reg_offset);
2724 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725 REG_WR(bp, reg_offset, val);
2727 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728 (attn & HW_INTERRUT_ASSERT_SET_2));
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2737 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2739 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740 int func = BP_FUNC(bp);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743 bnx2x__link_status_update(bp);
2744 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2746 bnx2x_pmf_update(bp);
2748 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2750 BNX2X_ERR("MC assert!\n");
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2757 } else if (attn & BNX2X_MCP_ASSERT) {
2759 BNX2X_ERR("MCP assert!\n");
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2764 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2767 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769 if (attn & BNX2X_GRC_TIMEOUT) {
2770 val = CHIP_IS_E1H(bp) ?
2771 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2774 if (attn & BNX2X_GRC_RSV) {
2775 val = CHIP_IS_E1H(bp) ?
2776 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2779 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2785 struct attn_route attn;
2786 struct attn_route group_mask;
2787 int port = BP_PORT(bp);
2793 /* need to take HW lock because MCP or other port might also
2794 try to handle this event */
2795 bnx2x_acquire_alr(bp);
2797 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2804 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805 if (deasserted & (1 << index)) {
2806 group_mask = bp->attn_group[index];
2808 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809 index, group_mask.sig[0], group_mask.sig[1],
2810 group_mask.sig[2], group_mask.sig[3]);
2812 bnx2x_attn_int_deasserted3(bp,
2813 attn.sig[3] & group_mask.sig[3]);
2814 bnx2x_attn_int_deasserted1(bp,
2815 attn.sig[1] & group_mask.sig[1]);
2816 bnx2x_attn_int_deasserted2(bp,
2817 attn.sig[2] & group_mask.sig[2]);
2818 bnx2x_attn_int_deasserted0(bp,
2819 attn.sig[0] & group_mask.sig[0]);
2821 if ((attn.sig[0] & group_mask.sig[0] &
2822 HW_PRTY_ASSERT_SET_0) ||
2823 (attn.sig[1] & group_mask.sig[1] &
2824 HW_PRTY_ASSERT_SET_1) ||
2825 (attn.sig[2] & group_mask.sig[2] &
2826 HW_PRTY_ASSERT_SET_2))
2827 BNX2X_ERR("FATAL HW block parity attention\n");
2831 bnx2x_release_alr(bp);
2833 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2836 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2838 REG_WR(bp, reg_addr, val);
2840 if (~bp->attn_state & deasserted)
2841 BNX2X_ERR("IGU ERROR\n");
2843 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847 aeu_mask = REG_RD(bp, reg_addr);
2849 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2850 aeu_mask, deasserted);
2851 aeu_mask |= (deasserted & 0xff);
2852 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2854 REG_WR(bp, reg_addr, aeu_mask);
2855 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2857 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858 bp->attn_state &= ~deasserted;
2859 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2864 /* read local copy of bits */
2865 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2869 u32 attn_state = bp->attn_state;
2871 /* look for changed bits */
2872 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2873 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2876 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2877 attn_bits, attn_ack, asserted, deasserted);
2879 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880 BNX2X_ERR("BAD attention state\n");
2882 /* handle bits that were raised */
2884 bnx2x_attn_int_asserted(bp, asserted);
2887 bnx2x_attn_int_deasserted(bp, deasserted);
2890 static void bnx2x_sp_task(struct work_struct *work)
2892 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2902 status = bnx2x_update_dsb_idx(bp);
2903 /* if (status == 0) */
2904 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2906 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2914 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2916 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2920 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2927 struct net_device *dev = dev_instance;
2928 struct bnx2x *bp = netdev_priv(dev);
2930 /* Return here if interrupt is disabled */
2931 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2936 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2938 #ifdef BNX2X_STOP_ON_ERROR
2939 if (unlikely(bp->panic))
2943 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2948 /* end of slow path */
2952 /****************************************************************************
2954 ****************************************************************************/
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2960 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2966 if (m_lo < s_lo) { \
2968 d_hi = m_hi - s_hi; \
2970 /* we can 'loan' 1 */ \
2972 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2974 /* m_hi <= s_hi */ \
2979 /* m_lo >= s_lo */ \
2980 if (m_hi < s_hi) { \
2984 /* m_hi >= s_hi */ \
2985 d_hi = m_hi - s_hi; \
2986 d_lo = m_lo - s_lo; \
2991 #define UPDATE_STAT64(s, t) \
2993 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998 pstats->mac_stx[1].t##_lo, diff.lo); \
3001 #define UPDATE_STAT64_NIG(s, t) \
3003 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004 diff.lo, new->s##_lo, old->s##_lo); \
3005 ADD_64(estats->t##_hi, diff.hi, \
3006 estats->t##_lo, diff.lo); \
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3013 s_hi += (s_lo < a) ? 1 : 0; \
3016 #define UPDATE_EXTEND_STAT(s) \
3018 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019 pstats->mac_stx[1].s##_lo, \
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3025 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026 old_tclient->s = tclient->s; \
3027 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3032 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033 old_uclient->s = uclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3039 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040 old_xclient->s = xclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3047 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3053 SUB_64(m_hi, 0, m_lo, s); \
3056 #define SUB_EXTEND_USTAT(s, t) \
3058 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3063 * General service functions
3066 static inline long bnx2x_hilo(u32 *hiref)
3068 u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3072 return HILO_U64(hi, lo);
3079 * Init service functions
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3084 if (!bp->stats_pending) {
3085 struct eth_query_ramrod_data ramrod_data = {0};
3088 ramrod_data.drv_counter = bp->stats_counter++;
3089 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090 for_each_queue(bp, i)
3091 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3093 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094 ((u32 *)&ramrod_data)[1],
3095 ((u32 *)&ramrod_data)[0], 0);
3097 /* stats ramrod has it's own slot on the spq */
3099 bp->stats_pending = 1;
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3106 int port = BP_PORT(bp);
3109 bp->stats_pending = 0;
3110 bp->executer_idx = 0;
3111 bp->stats_counter = 0;
3115 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3117 bp->port.port_stx = 0;
3118 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3120 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121 bp->port.old_nig_stats.brb_discard =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123 bp->port.old_nig_stats.brb_truncate =
3124 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3130 /* function stats */
3131 for_each_queue(bp, i) {
3132 struct bnx2x_fastpath *fp = &bp->fp[i];
3134 memset(&fp->old_tclient, 0,
3135 sizeof(struct tstorm_per_client_stats));
3136 memset(&fp->old_uclient, 0,
3137 sizeof(struct ustorm_per_client_stats));
3138 memset(&fp->old_xclient, 0,
3139 sizeof(struct xstorm_per_client_stats));
3140 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3143 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3146 bp->stats_state = STATS_STATE_DISABLED;
3147 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3153 struct dmae_command *dmae = &bp->stats_dmae;
3154 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3156 *stats_comp = DMAE_COMP_VAL;
3157 if (CHIP_REV_IS_SLOW(bp))
3161 if (bp->executer_idx) {
3162 int loader_idx = PMF_DMAE_C(bp);
3164 memset(dmae, 0, sizeof(struct dmae_command));
3166 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168 DMAE_CMD_DST_RESET |
3170 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172 DMAE_CMD_ENDIANITY_DW_SWAP |
3174 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3176 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180 sizeof(struct dmae_command) *
3181 (loader_idx + 1)) >> 2;
3182 dmae->dst_addr_hi = 0;
3183 dmae->len = sizeof(struct dmae_command) >> 2;
3186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187 dmae->comp_addr_hi = 0;
3191 bnx2x_post_dmae(bp, dmae, loader_idx);
3193 } else if (bp->func_stx) {
3195 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3201 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3205 while (*stats_comp != DMAE_COMP_VAL) {
3207 BNX2X_ERR("timeout waiting for stats finished\n");
3217 * Statistics service functions
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3222 struct dmae_command *dmae;
3224 int loader_idx = PMF_DMAE_C(bp);
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3228 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229 BNX2X_ERR("BUG!\n");
3233 bp->executer_idx = 0;
3235 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 DMAE_CMD_ENDIANITY_DW_SWAP |
3243 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248 dmae->src_addr_lo = bp->port.port_stx >> 2;
3249 dmae->src_addr_hi = 0;
3250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->len = DMAE_LEN32_RD_MAX;
3253 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254 dmae->comp_addr_hi = 0;
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260 dmae->src_addr_hi = 0;
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264 DMAE_LEN32_RD_MAX * 4);
3265 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_val = DMAE_COMP_VAL;
3271 bnx2x_hw_stats_post(bp);
3272 bnx2x_stats_comp(bp);
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3277 struct dmae_command *dmae;
3278 int port = BP_PORT(bp);
3279 int vn = BP_E1HVN(bp);
3281 int loader_idx = PMF_DMAE_C(bp);
3283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3286 if (!bp->link_vars.link_up || !bp->port.pmf) {
3287 BNX2X_ERR("BUG!\n");
3291 bp->executer_idx = 0;
3294 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3300 DMAE_CMD_ENDIANITY_DW_SWAP |
3302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303 (vn << DMAE_CMD_E1HVN_SHIFT));
3305 if (bp->port.port_stx) {
3307 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308 dmae->opcode = opcode;
3309 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312 dmae->dst_addr_hi = 0;
3313 dmae->len = sizeof(struct host_port_stats) >> 2;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->dst_addr_lo = bp->func_stx >> 2;
3326 dmae->dst_addr_hi = 0;
3327 dmae->len = sizeof(struct host_func_stats) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3334 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3338 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3340 DMAE_CMD_ENDIANITY_DW_SWAP |
3342 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343 (vn << DMAE_CMD_E1HVN_SHIFT));
3345 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3347 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348 NIG_REG_INGRESS_BMAC0_MEM);
3350 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351 BIGMAC_REGISTER_TX_STAT_GTBYT */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3365 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379 dmae->comp_addr_hi = 0;
3382 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3384 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3386 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3399 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401 dmae->opcode = opcode;
3402 dmae->src_addr_lo = (mac_addr +
3403 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3410 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411 dmae->comp_addr_hi = 0;
3414 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (mac_addr +
3418 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432 dmae->opcode = opcode;
3433 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435 dmae->src_addr_hi = 0;
3436 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440 dmae->comp_addr_hi = 0;
3443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444 dmae->opcode = opcode;
3445 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447 dmae->src_addr_hi = 0;
3448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452 dmae->len = (2*sizeof(u32)) >> 2;
3453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454 dmae->comp_addr_hi = 0;
3457 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464 DMAE_CMD_ENDIANITY_DW_SWAP |
3466 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467 (vn << DMAE_CMD_E1HVN_SHIFT));
3468 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470 dmae->src_addr_hi = 0;
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475 dmae->len = (2*sizeof(u32)) >> 2;
3476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_val = DMAE_COMP_VAL;
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3485 struct dmae_command *dmae = &bp->stats_dmae;
3486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3489 if (!bp->func_stx) {
3490 BNX2X_ERR("BUG!\n");
3494 bp->executer_idx = 0;
3495 memset(dmae, 0, sizeof(struct dmae_command));
3497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->dst_addr_lo = bp->func_stx >> 2;
3510 dmae->dst_addr_hi = 0;
3511 dmae->len = sizeof(struct host_func_stats) >> 2;
3512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_val = DMAE_COMP_VAL;
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3522 bnx2x_port_stats_init(bp);
3524 else if (bp->func_stx)
3525 bnx2x_func_stats_init(bp);
3527 bnx2x_hw_stats_post(bp);
3528 bnx2x_storm_stats_post(bp);
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3533 bnx2x_stats_comp(bp);
3534 bnx2x_stats_pmf_update(bp);
3535 bnx2x_stats_start(bp);
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_start(bp);
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3546 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3554 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566 UPDATE_STAT64(tx_stat_gt127,
3567 tx_stat_etherstatspkts65octetsto127octets);
3568 UPDATE_STAT64(tx_stat_gt255,
3569 tx_stat_etherstatspkts128octetsto255octets);
3570 UPDATE_STAT64(tx_stat_gt511,
3571 tx_stat_etherstatspkts256octetsto511octets);
3572 UPDATE_STAT64(tx_stat_gt1023,
3573 tx_stat_etherstatspkts512octetsto1023octets);
3574 UPDATE_STAT64(tx_stat_gt1518,
3575 tx_stat_etherstatspkts1024octetsto1522octets);
3576 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580 UPDATE_STAT64(tx_stat_gterr,
3581 tx_stat_dot3statsinternalmactransmiterrors);
3582 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3584 estats->pause_frames_received_hi =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586 estats->pause_frames_received_lo =
3587 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3589 estats->pause_frames_sent_hi =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591 estats->pause_frames_sent_lo =
3592 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3597 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3601 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3633 estats->pause_frames_received_hi =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635 estats->pause_frames_received_lo =
3636 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637 ADD_64(estats->pause_frames_received_hi,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639 estats->pause_frames_received_lo,
3640 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3642 estats->pause_frames_sent_hi =
3643 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644 estats->pause_frames_sent_lo =
3645 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646 ADD_64(estats->pause_frames_sent_hi,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648 estats->pause_frames_sent_lo,
3649 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3654 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655 struct nig_stats *old = &(bp->port.old_nig_stats);
3656 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3664 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665 bnx2x_bmac_stats_update(bp);
3667 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668 bnx2x_emac_stats_update(bp);
3670 else { /* unreached */
3671 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3675 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676 new->brb_discard - old->brb_discard);
3677 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678 new->brb_truncate - old->brb_truncate);
3680 UPDATE_STAT64_NIG(egress_mac_pkt0,
3681 etherstatspkts1024octetsto1522octets);
3682 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3684 memcpy(old, new, sizeof(struct nig_stats));
3686 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687 sizeof(struct mac_stx));
3688 estats->brb_drop_hi = pstats->brb_drop_hi;
3689 estats->brb_drop_lo = pstats->brb_drop_lo;
3691 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3693 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694 if (nig_timer_max != estats->nig_timer_max) {
3695 estats->nig_timer_max = nig_timer_max;
3696 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3704 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705 struct tstorm_per_port_stats *tport =
3706 &stats->tstorm_common.port_statistics;
3707 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3711 memset(&(fstats->total_bytes_received_hi), 0,
3712 sizeof(struct host_func_stats) - 2*sizeof(u32));
3713 estats->error_bytes_received_hi = 0;
3714 estats->error_bytes_received_lo = 0;
3715 estats->etherstatsoverrsizepkts_hi = 0;
3716 estats->etherstatsoverrsizepkts_lo = 0;
3717 estats->no_buff_discard_hi = 0;
3718 estats->no_buff_discard_lo = 0;
3720 for_each_queue(bp, i) {
3721 struct bnx2x_fastpath *fp = &bp->fp[i];
3722 int cl_id = fp->cl_id;
3723 struct tstorm_per_client_stats *tclient =
3724 &stats->tstorm_common.client_statistics[cl_id];
3725 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726 struct ustorm_per_client_stats *uclient =
3727 &stats->ustorm_common.client_statistics[cl_id];
3728 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729 struct xstorm_per_client_stats *xclient =
3730 &stats->xstorm_common.client_statistics[cl_id];
3731 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3735 /* are storm stats valid? */
3736 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737 bp->stats_counter) {
3738 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739 " xstorm counter (%d) != stats_counter (%d)\n",
3740 i, xclient->stats_counter, bp->stats_counter);
3743 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744 bp->stats_counter) {
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746 " tstorm counter (%d) != stats_counter (%d)\n",
3747 i, tclient->stats_counter, bp->stats_counter);
3750 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753 " ustorm counter (%d) != stats_counter (%d)\n",
3754 i, uclient->stats_counter, bp->stats_counter);
3758 qstats->total_bytes_received_hi =
3759 qstats->valid_bytes_received_hi =
3760 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761 qstats->total_bytes_received_lo =
3762 qstats->valid_bytes_received_lo =
3763 le32_to_cpu(tclient->total_rcv_bytes.lo);
3765 qstats->error_bytes_received_hi =
3766 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767 qstats->error_bytes_received_lo =
3768 le32_to_cpu(tclient->rcv_error_bytes.lo);
3770 ADD_64(qstats->total_bytes_received_hi,
3771 qstats->error_bytes_received_hi,
3772 qstats->total_bytes_received_lo,
3773 qstats->error_bytes_received_lo);
3775 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776 total_unicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778 total_multicast_packets_received);
3779 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780 total_broadcast_packets_received);
3781 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782 etherstatsoverrsizepkts);
3783 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3785 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786 total_unicast_packets_received);
3787 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788 total_multicast_packets_received);
3789 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790 total_broadcast_packets_received);
3791 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3795 qstats->total_bytes_transmitted_hi =
3796 le32_to_cpu(xclient->total_sent_bytes.hi);
3797 qstats->total_bytes_transmitted_lo =
3798 le32_to_cpu(xclient->total_sent_bytes.lo);
3800 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801 total_unicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803 total_multicast_packets_transmitted);
3804 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805 total_broadcast_packets_transmitted);
3807 old_tclient->checksum_discard = tclient->checksum_discard;
3808 old_tclient->ttl0_discard = tclient->ttl0_discard;
3810 ADD_64(fstats->total_bytes_received_hi,
3811 qstats->total_bytes_received_hi,
3812 fstats->total_bytes_received_lo,
3813 qstats->total_bytes_received_lo);
3814 ADD_64(fstats->total_bytes_transmitted_hi,
3815 qstats->total_bytes_transmitted_hi,
3816 fstats->total_bytes_transmitted_lo,
3817 qstats->total_bytes_transmitted_lo);
3818 ADD_64(fstats->total_unicast_packets_received_hi,
3819 qstats->total_unicast_packets_received_hi,
3820 fstats->total_unicast_packets_received_lo,
3821 qstats->total_unicast_packets_received_lo);
3822 ADD_64(fstats->total_multicast_packets_received_hi,
3823 qstats->total_multicast_packets_received_hi,
3824 fstats->total_multicast_packets_received_lo,
3825 qstats->total_multicast_packets_received_lo);
3826 ADD_64(fstats->total_broadcast_packets_received_hi,
3827 qstats->total_broadcast_packets_received_hi,
3828 fstats->total_broadcast_packets_received_lo,
3829 qstats->total_broadcast_packets_received_lo);
3830 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831 qstats->total_unicast_packets_transmitted_hi,
3832 fstats->total_unicast_packets_transmitted_lo,
3833 qstats->total_unicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835 qstats->total_multicast_packets_transmitted_hi,
3836 fstats->total_multicast_packets_transmitted_lo,
3837 qstats->total_multicast_packets_transmitted_lo);
3838 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839 qstats->total_broadcast_packets_transmitted_hi,
3840 fstats->total_broadcast_packets_transmitted_lo,
3841 qstats->total_broadcast_packets_transmitted_lo);
3842 ADD_64(fstats->valid_bytes_received_hi,
3843 qstats->valid_bytes_received_hi,
3844 fstats->valid_bytes_received_lo,
3845 qstats->valid_bytes_received_lo);
3847 ADD_64(estats->error_bytes_received_hi,
3848 qstats->error_bytes_received_hi,
3849 estats->error_bytes_received_lo,
3850 qstats->error_bytes_received_lo);
3851 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852 qstats->etherstatsoverrsizepkts_hi,
3853 estats->etherstatsoverrsizepkts_lo,
3854 qstats->etherstatsoverrsizepkts_lo);
3855 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3859 ADD_64(fstats->total_bytes_received_hi,
3860 estats->rx_stat_ifhcinbadoctets_hi,
3861 fstats->total_bytes_received_lo,
3862 estats->rx_stat_ifhcinbadoctets_lo);
3864 memcpy(estats, &(fstats->total_bytes_received_hi),
3865 sizeof(struct host_func_stats) - 2*sizeof(u32));
3867 ADD_64(estats->etherstatsoverrsizepkts_hi,
3868 estats->rx_stat_dot3statsframestoolong_hi,
3869 estats->etherstatsoverrsizepkts_lo,
3870 estats->rx_stat_dot3statsframestoolong_lo);
3871 ADD_64(estats->error_bytes_received_hi,
3872 estats->rx_stat_ifhcinbadoctets_hi,
3873 estats->error_bytes_received_lo,
3874 estats->rx_stat_ifhcinbadoctets_lo);
3877 estats->mac_filter_discard =
3878 le32_to_cpu(tport->mac_filter_discard);
3879 estats->xxoverflow_discard =
3880 le32_to_cpu(tport->xxoverflow_discard);
3881 estats->brb_truncate_discard =
3882 le32_to_cpu(tport->brb_truncate_discard);
3883 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3886 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3888 bp->stats_pending = 0;
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3895 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896 struct net_device_stats *nstats = &bp->dev->stats;
3899 nstats->rx_packets =
3900 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3904 nstats->tx_packets =
3905 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3909 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3911 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3913 nstats->rx_dropped = estats->mac_discard;
3914 for_each_queue(bp, i)
3915 nstats->rx_dropped +=
3916 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3918 nstats->tx_dropped = 0;
3921 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3923 nstats->collisions =
3924 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3926 nstats->rx_length_errors =
3927 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930 bnx2x_hilo(&estats->brb_truncate_hi);
3931 nstats->rx_crc_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933 nstats->rx_frame_errors =
3934 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936 nstats->rx_missed_errors = estats->xxoverflow_discard;
3938 nstats->rx_errors = nstats->rx_length_errors +
3939 nstats->rx_over_errors +
3940 nstats->rx_crc_errors +
3941 nstats->rx_frame_errors +
3942 nstats->rx_fifo_errors +
3943 nstats->rx_missed_errors;
3945 nstats->tx_aborted_errors =
3946 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948 nstats->tx_carrier_errors =
3949 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950 nstats->tx_fifo_errors = 0;
3951 nstats->tx_heartbeat_errors = 0;
3952 nstats->tx_window_errors = 0;
3954 nstats->tx_errors = nstats->tx_aborted_errors +
3955 nstats->tx_carrier_errors +
3956 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3964 estats->driver_xoff = 0;
3965 estats->rx_err_discard_pkt = 0;
3966 estats->rx_skb_alloc_failed = 0;
3967 estats->hw_csum_err = 0;
3968 for_each_queue(bp, i) {
3969 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3971 estats->driver_xoff += qstats->driver_xoff;
3972 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974 estats->hw_csum_err += qstats->hw_csum_err;
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3980 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3982 if (*stats_comp != DMAE_COMP_VAL)
3986 bnx2x_hw_stats_update(bp);
3988 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989 BNX2X_ERR("storm stats were not updated for 3 times\n");
3994 bnx2x_net_stats_update(bp);
3995 bnx2x_drv_stats_update(bp);
3997 if (bp->msglevel & NETIF_MSG_TIMER) {
3998 struct tstorm_per_client_stats *old_tclient =
3999 &bp->fp->old_tclient;
4000 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002 struct net_device_stats *nstats = &bp->dev->stats;
4005 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4008 bnx2x_tx_avail(bp->fp),
4009 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4012 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013 bp->fp->rx_comp_cons),
4014 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4016 "brb truncate %u\n",
4017 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018 qstats->driver_xoff,
4019 estats->brb_drop_lo, estats->brb_truncate_lo);
4020 printk(KERN_DEBUG "tstats: checksum_discard %u "
4021 "packets_too_big_discard %lu no_buff_discard %lu "
4022 "mac_discard %u mac_filter_discard %u "
4023 "xxovrflow_discard %u brb_truncate_discard %u "
4024 "ttl0_discard %u\n",
4025 le32_to_cpu(old_tclient->checksum_discard),
4026 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027 bnx2x_hilo(&qstats->no_buff_discard_hi),
4028 estats->mac_discard, estats->mac_filter_discard,
4029 estats->xxoverflow_discard, estats->brb_truncate_discard,
4030 le32_to_cpu(old_tclient->ttl0_discard));
4032 for_each_queue(bp, i) {
4033 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034 bnx2x_fp(bp, i, tx_pkt),
4035 bnx2x_fp(bp, i, rx_pkt),
4036 bnx2x_fp(bp, i, rx_calls));
4040 bnx2x_hw_stats_post(bp);
4041 bnx2x_storm_stats_post(bp);
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4046 struct dmae_command *dmae;
4048 int loader_idx = PMF_DMAE_C(bp);
4049 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4051 bp->executer_idx = 0;
4053 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4055 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4059 DMAE_CMD_ENDIANITY_DW_SWAP |
4061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4064 if (bp->port.port_stx) {
4066 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4070 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074 dmae->dst_addr_hi = 0;
4075 dmae->len = sizeof(struct host_port_stats) >> 2;
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4081 dmae->comp_addr_lo =
4082 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_addr_hi =
4084 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085 dmae->comp_val = DMAE_COMP_VAL;
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->dst_addr_lo = bp->func_stx >> 2;
4098 dmae->dst_addr_hi = 0;
4099 dmae->len = sizeof(struct host_func_stats) >> 2;
4100 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_val = DMAE_COMP_VAL;
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4112 bnx2x_stats_comp(bp);
4115 update = (bnx2x_hw_stats_update(bp) == 0);
4117 update |= (bnx2x_storm_stats_update(bp) == 0);
4120 bnx2x_net_stats_update(bp);
4123 bnx2x_port_stats_stop(bp);
4125 bnx2x_hw_stats_post(bp);
4126 bnx2x_stats_comp(bp);
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4134 static const struct {
4135 void (*action)(struct bnx2x *bp);
4136 enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4140 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4142 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4146 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4147 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4148 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4149 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4155 enum bnx2x_stats_state state = bp->stats_state;
4157 bnx2x_stats_stm[state][event].action(bp);
4158 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4160 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162 state, event, bp->stats_state);
4165 static void bnx2x_timer(unsigned long data)
4167 struct bnx2x *bp = (struct bnx2x *) data;
4169 if (!netif_running(bp->dev))
4172 if (atomic_read(&bp->intr_sem) != 0)
4176 struct bnx2x_fastpath *fp = &bp->fp[0];
4179 bnx2x_tx_int(fp, 1000);
4180 rc = bnx2x_rx_int(fp, 1000);
4183 if (!BP_NOMCP(bp)) {
4184 int func = BP_FUNC(bp);
4188 ++bp->fw_drv_pulse_wr_seq;
4189 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190 /* TBD - add SYSTEM_TIME */
4191 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4194 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195 MCP_PULSE_SEQ_MASK);
4196 /* The delta between driver pulse and mcp response
4197 * should be 1 (before mcp response) or 0 (after mcp response)
4199 if ((drv_pulse != mcp_pulse) &&
4200 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201 /* someone lost a heartbeat... */
4202 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203 drv_pulse, mcp_pulse);
4207 if ((bp->state == BNX2X_STATE_OPEN) ||
4208 (bp->state == BNX2X_STATE_DISABLED))
4209 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4212 mod_timer(&bp->timer, jiffies + bp->current_interval);
4215 /* end of Statistics */
4220 * nic init service functions
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4225 int port = BP_PORT(bp);
4227 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4228 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229 sizeof(struct ustorm_status_block)/4);
4230 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232 sizeof(struct cstorm_status_block)/4);
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236 dma_addr_t mapping, int sb_id)
4238 int port = BP_PORT(bp);
4239 int func = BP_FUNC(bp);
4244 section = ((u64)mapping) + offsetof(struct host_status_block,
4246 sb->u_status_block.status_block_id = sb_id;
4248 REG_WR(bp, BAR_USTRORM_INTMEM +
4249 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250 REG_WR(bp, BAR_USTRORM_INTMEM +
4251 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4253 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4256 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4261 section = ((u64)mapping) + offsetof(struct host_status_block,
4263 sb->c_status_block.status_block_id = sb_id;
4265 REG_WR(bp, BAR_CSTRORM_INTMEM +
4266 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267 REG_WR(bp, BAR_CSTRORM_INTMEM +
4268 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4270 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4273 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4277 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4282 int func = BP_FUNC(bp);
4284 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4285 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286 sizeof(struct ustorm_def_status_block)/4);
4287 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4288 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289 sizeof(struct cstorm_def_status_block)/4);
4290 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4291 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292 sizeof(struct xstorm_def_status_block)/4);
4293 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295 sizeof(struct tstorm_def_status_block)/4);
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299 struct host_def_status_block *def_sb,
4300 dma_addr_t mapping, int sb_id)
4302 int port = BP_PORT(bp);
4303 int func = BP_FUNC(bp);
4304 int index, val, reg_offset;
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 atten_status_block);
4310 def_sb->atten_status_block.status_block_id = sb_id;
4314 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4317 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318 bp->attn_group[index].sig[0] = REG_RD(bp,
4319 reg_offset + 0x10*index);
4320 bp->attn_group[index].sig[1] = REG_RD(bp,
4321 reg_offset + 0x4 + 0x10*index);
4322 bp->attn_group[index].sig[2] = REG_RD(bp,
4323 reg_offset + 0x8 + 0x10*index);
4324 bp->attn_group[index].sig[3] = REG_RD(bp,
4325 reg_offset + 0xc + 0x10*index);
4328 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329 HC_REG_ATTN_MSG0_ADDR_L);
4331 REG_WR(bp, reg_offset, U64_LO(section));
4332 REG_WR(bp, reg_offset + 4, U64_HI(section));
4334 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4336 val = REG_RD(bp, reg_offset);
4338 REG_WR(bp, reg_offset, val);
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 u_def_status_block);
4343 def_sb->u_def_status_block.status_block_id = sb_id;
4345 REG_WR(bp, BAR_USTRORM_INTMEM +
4346 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347 REG_WR(bp, BAR_USTRORM_INTMEM +
4348 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4350 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4353 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 c_def_status_block);
4360 def_sb->c_def_status_block.status_block_id = sb_id;
4362 REG_WR(bp, BAR_CSTRORM_INTMEM +
4363 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364 REG_WR(bp, BAR_CSTRORM_INTMEM +
4365 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4367 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4370 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4375 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376 t_def_status_block);
4377 def_sb->t_def_status_block.status_block_id = sb_id;
4379 REG_WR(bp, BAR_TSTRORM_INTMEM +
4380 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381 REG_WR(bp, BAR_TSTRORM_INTMEM +
4382 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4384 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4387 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4392 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393 x_def_status_block);
4394 def_sb->x_def_status_block.status_block_id = sb_id;
4396 REG_WR(bp, BAR_XSTRORM_INTMEM +
4397 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398 REG_WR(bp, BAR_XSTRORM_INTMEM +
4399 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4401 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4404 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4408 bp->stats_pending = 0;
4409 bp->set_mac_pending = 0;
4411 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4416 int port = BP_PORT(bp);
4419 for_each_queue(bp, i) {
4420 int sb_id = bp->fp[i].sb_id;
4422 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425 U_SB_ETH_RX_CQ_INDEX),
4427 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429 U_SB_ETH_RX_CQ_INDEX),
4430 bp->rx_ticks ? 0 : 1);
4432 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435 C_SB_ETH_TX_CQ_INDEX),
4437 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439 C_SB_ETH_TX_CQ_INDEX),
4440 bp->tx_ticks ? 0 : 1);
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445 struct bnx2x_fastpath *fp, int last)
4449 for (i = 0; i < last; i++) {
4450 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451 struct sk_buff *skb = rx_buf->skb;
4454 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4458 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459 pci_unmap_single(bp->pdev,
4460 pci_unmap_addr(rx_buf, mapping),
4461 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4470 int func = BP_FUNC(bp);
4471 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472 ETH_MAX_AGGREGATION_QUEUES_E1H;
4473 u16 ring_prod, cqe_ring_prod;
4476 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4478 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4480 if (bp->flags & TPA_ENABLE_FLAG) {
4482 for_each_rx_queue(bp, j) {
4483 struct bnx2x_fastpath *fp = &bp->fp[j];
4485 for (i = 0; i < max_agg_queues; i++) {
4486 fp->tpa_pool[i].skb =
4487 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488 if (!fp->tpa_pool[i].skb) {
4489 BNX2X_ERR("Failed to allocate TPA "
4490 "skb pool for queue[%d] - "
4491 "disabling TPA on this "
4493 bnx2x_free_tpa_pool(bp, fp, i);
4494 fp->disable_tpa = 1;
4497 pci_unmap_addr_set((struct sw_rx_bd *)
4498 &bp->fp->tpa_pool[i],
4500 fp->tpa_state[i] = BNX2X_TPA_STOP;
4505 for_each_rx_queue(bp, j) {
4506 struct bnx2x_fastpath *fp = &bp->fp[j];
4509 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4512 /* "next page" elements initialization */
4514 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515 struct eth_rx_sge *sge;
4517 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4519 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4526 bnx2x_init_sge_ring_bit_mask(fp);
4529 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530 struct eth_rx_bd *rx_bd;
4532 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4534 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4537 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4542 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543 struct eth_rx_cqe_next_page *nextpg;
4545 nextpg = (struct eth_rx_cqe_next_page *)
4546 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4548 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4551 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4555 /* Allocate SGEs and initialize the ring elements */
4556 for (i = 0, ring_prod = 0;
4557 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4559 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560 BNX2X_ERR("was only able to allocate "
4562 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563 /* Cleanup already allocated elements */
4564 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566 fp->disable_tpa = 1;
4570 ring_prod = NEXT_SGE_IDX(ring_prod);
4572 fp->rx_sge_prod = ring_prod;
4574 /* Allocate BDs and initialize BD ring */
4575 fp->rx_comp_cons = 0;
4576 cqe_ring_prod = ring_prod = 0;
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579 BNX2X_ERR("was only able to allocate "
4580 "%d rx skbs on queue[%d]\n", i, j);
4581 fp->eth_q_stats.rx_skb_alloc_failed++;
4584 ring_prod = NEXT_RX_IDX(ring_prod);
4585 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586 WARN_ON(ring_prod <= i);
4589 fp->rx_bd_prod = ring_prod;
4590 /* must not have more available CQEs than BDs */
4591 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4593 fp->rx_pkt = fp->rx_calls = 0;
4596 * this will generate an interrupt (to the TSTORM)
4597 * must only be done after chip is initialized
4599 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606 U64_LO(fp->rx_comp_mapping));
4607 REG_WR(bp, BAR_USTRORM_INTMEM +
4608 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609 U64_HI(fp->rx_comp_mapping));
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4617 for_each_tx_queue(bp, j) {
4618 struct bnx2x_fastpath *fp = &bp->fp[j];
4620 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621 struct eth_tx_bd *tx_bd =
4622 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4625 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4628 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4632 fp->tx_pkt_prod = 0;
4633 fp->tx_pkt_cons = 0;
4636 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4643 int func = BP_FUNC(bp);
4645 spin_lock_init(&bp->spq_lock);
4647 bp->spq_left = MAX_SPQ_PENDING;
4648 bp->spq_prod_idx = 0;
4649 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650 bp->spq_prod_bd = bp->spq;
4651 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4653 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654 U64_LO(bp->spq_mapping));
4656 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657 U64_HI(bp->spq_mapping));
4659 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4663 static void bnx2x_init_context(struct bnx2x *bp)
4667 for_each_queue(bp, i) {
4668 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669 struct bnx2x_fastpath *fp = &bp->fp[i];
4670 u8 cl_id = fp->cl_id;
4671 u8 sb_id = fp->sb_id;
4673 context->ustorm_st_context.common.sb_index_numbers =
4674 BNX2X_RX_SB_INDEX_NUM;
4675 context->ustorm_st_context.common.clientId = cl_id;
4676 context->ustorm_st_context.common.status_block_id = sb_id;
4677 context->ustorm_st_context.common.flags =
4678 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680 context->ustorm_st_context.common.statistics_counter_id =
4682 context->ustorm_st_context.common.mc_alignment_log_size =
4683 BNX2X_RX_ALIGN_SHIFT;
4684 context->ustorm_st_context.common.bd_buff_size =
4686 context->ustorm_st_context.common.bd_page_base_hi =
4687 U64_HI(fp->rx_desc_mapping);
4688 context->ustorm_st_context.common.bd_page_base_lo =
4689 U64_LO(fp->rx_desc_mapping);
4690 if (!fp->disable_tpa) {
4691 context->ustorm_st_context.common.flags |=
4692 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694 context->ustorm_st_context.common.sge_buff_size =
4695 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4697 context->ustorm_st_context.common.sge_page_base_hi =
4698 U64_HI(fp->rx_sge_mapping);
4699 context->ustorm_st_context.common.sge_page_base_lo =
4700 U64_LO(fp->rx_sge_mapping);
4703 context->ustorm_ag_context.cdu_usage =
4704 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705 CDU_REGION_NUMBER_UCM_AG,
4706 ETH_CONNECTION_TYPE);
4708 context->xstorm_st_context.tx_bd_page_base_hi =
4709 U64_HI(fp->tx_desc_mapping);
4710 context->xstorm_st_context.tx_bd_page_base_lo =
4711 U64_LO(fp->tx_desc_mapping);
4712 context->xstorm_st_context.db_data_addr_hi =
4713 U64_HI(fp->tx_prods_mapping);
4714 context->xstorm_st_context.db_data_addr_lo =
4715 U64_LO(fp->tx_prods_mapping);
4716 context->xstorm_st_context.statistics_data = (cl_id |
4717 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718 context->cstorm_st_context.sb_index_number =
4719 C_SB_ETH_TX_CQ_INDEX;
4720 context->cstorm_st_context.status_block_id = sb_id;
4722 context->xstorm_ag_context.cdu_reserved =
4723 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724 CDU_REGION_NUMBER_XCM_AG,
4725 ETH_CONNECTION_TYPE);
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4731 int func = BP_FUNC(bp);
4734 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4738 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4739 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742 bp->fp->cl_id + (i % bp->num_rx_queues));
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4747 struct tstorm_eth_client_config tstorm_client = {0};
4748 int port = BP_PORT(bp);
4751 tstorm_client.mtu = bp->dev->mtu;
4752 tstorm_client.config_flags =
4753 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4756 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757 tstorm_client.config_flags |=
4758 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4763 if (bp->flags & TPA_ENABLE_FLAG) {
4764 tstorm_client.max_sges_for_packet =
4765 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766 tstorm_client.max_sges_for_packet =
4767 ((tstorm_client.max_sges_for_packet +
4768 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769 PAGES_PER_SGE_SHIFT;
4771 tstorm_client.config_flags |=
4772 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4775 for_each_queue(bp, i) {
4776 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780 ((u32 *)&tstorm_client)[0]);
4781 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783 ((u32 *)&tstorm_client)[1]);
4786 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4792 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793 int mode = bp->rx_mode;
4794 int mask = (1 << BP_L_ID(bp));
4795 int func = BP_FUNC(bp);
4798 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4801 case BNX2X_RX_MODE_NONE: /* no Rx */
4802 tstorm_mac_filter.ucast_drop_all = mask;
4803 tstorm_mac_filter.mcast_drop_all = mask;
4804 tstorm_mac_filter.bcast_drop_all = mask;
4807 case BNX2X_RX_MODE_NORMAL:
4808 tstorm_mac_filter.bcast_accept_all = mask;
4811 case BNX2X_RX_MODE_ALLMULTI:
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
4816 case BNX2X_RX_MODE_PROMISC:
4817 tstorm_mac_filter.ucast_accept_all = mask;
4818 tstorm_mac_filter.mcast_accept_all = mask;
4819 tstorm_mac_filter.bcast_accept_all = mask;
4823 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4827 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830 ((u32 *)&tstorm_mac_filter)[i]);
4832 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833 ((u32 *)&tstorm_mac_filter)[i]); */
4836 if (mode != BNX2X_RX_MODE_NONE)
4837 bnx2x_set_client_config(bp);
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4844 if (bp->flags & TPA_ENABLE_FLAG) {
4845 struct tstorm_eth_tpa_exist tpa = {0};
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4851 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4855 /* Zero this manually as its initialization is
4856 currently missing in the initTool */
4857 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858 REG_WR(bp, BAR_USTRORM_INTMEM +
4859 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4864 int port = BP_PORT(bp);
4866 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4872 /* Calculates the sum of vn_min_rates.
4873 It's needed for further normalizing of the min_rates.
4875 sum of vn_min_rates.
4877 0 - if all the min_rates are 0.
4878 In the later case fainess algorithm should be deactivated.
4879 If not all min_rates are zero then those that are zeroes will be set to 1.
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4884 int port = BP_PORT(bp);
4887 bp->vn_weight_sum = 0;
4888 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889 int func = 2*vn + port;
4891 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4895 /* Skip hidden vns */
4896 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4899 /* If min rate is zero - set it to 1 */
4901 vn_min_rate = DEF_MIN_RATE;
4905 bp->vn_weight_sum += vn_min_rate;
4908 /* ... only if all min rates are zeros - disable fairness */
4910 bp->vn_weight_sum = 0;
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4915 struct tstorm_eth_function_common_config tstorm_config = {0};
4916 struct stats_indication_flags stats_flags = {0};
4917 int port = BP_PORT(bp);
4918 int func = BP_FUNC(bp);
4924 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925 tstorm_config.rss_result_mask = MULTI_MASK;
4928 tstorm_config.config_flags |=
4929 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4931 tstorm_config.leading_client_id = BP_L_ID(bp);
4933 REG_WR(bp, BAR_TSTRORM_INTMEM +
4934 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935 (*(u32 *)&tstorm_config));
4937 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938 bnx2x_set_storm_rx_mode(bp);
4940 for_each_queue(bp, i) {
4941 u8 cl_id = bp->fp[i].cl_id;
4943 /* reset xstorm per client statistics */
4944 offset = BAR_XSTRORM_INTMEM +
4945 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4950 /* reset tstorm per client statistics */
4951 offset = BAR_TSTRORM_INTMEM +
4952 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4957 /* reset ustorm per client statistics */
4958 offset = BAR_USTRORM_INTMEM +
4959 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4965 /* Init statistics related context */
4966 stats_flags.collect_eth = 1;
4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969 ((u32 *)&stats_flags)[0]);
4970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971 ((u32 *)&stats_flags)[1]);
4973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974 ((u32 *)&stats_flags)[0]);
4975 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976 ((u32 *)&stats_flags)[1]);
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979 ((u32 *)&stats_flags)[0]);
4980 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981 ((u32 *)&stats_flags)[1]);
4983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984 ((u32 *)&stats_flags)[0]);
4985 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986 ((u32 *)&stats_flags)[1]);
4988 REG_WR(bp, BAR_XSTRORM_INTMEM +
4989 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995 REG_WR(bp, BAR_TSTRORM_INTMEM +
4996 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_TSTRORM_INTMEM +
4999 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002 REG_WR(bp, BAR_USTRORM_INTMEM +
5003 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_USTRORM_INTMEM +
5006 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009 if (CHIP_IS_E1H(bp)) {
5010 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5012 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5014 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5016 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5019 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5023 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5025 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026 SGE_PAGE_SIZE * PAGES_PER_SGE),
5028 for_each_rx_queue(bp, i) {
5029 struct bnx2x_fastpath *fp = &bp->fp[i];
5031 REG_WR(bp, BAR_USTRORM_INTMEM +
5032 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033 U64_LO(fp->rx_comp_mapping));
5034 REG_WR(bp, BAR_USTRORM_INTMEM +
5035 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036 U64_HI(fp->rx_comp_mapping));
5038 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5043 /* dropless flow control */
5044 if (CHIP_IS_E1H(bp)) {
5045 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5047 rx_pause.bd_thr_low = 250;
5048 rx_pause.cqe_thr_low = 250;
5050 rx_pause.sge_thr_low = 0;
5051 rx_pause.bd_thr_high = 350;
5052 rx_pause.cqe_thr_high = 350;
5053 rx_pause.sge_thr_high = 0;
5055 for_each_rx_queue(bp, i) {
5056 struct bnx2x_fastpath *fp = &bp->fp[i];
5058 if (!fp->disable_tpa) {
5059 rx_pause.sge_thr_low = 150;
5060 rx_pause.sge_thr_high = 250;
5064 offset = BAR_USTRORM_INTMEM +
5065 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5068 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5070 REG_WR(bp, offset + j*4,
5071 ((u32 *)&rx_pause)[j]);
5075 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5077 /* Init rate shaping and fairness contexts */
5081 /* During init there is no active link
5082 Until link is up, set link rate to 10Gbps */
5083 bp->link_vars.line_speed = SPEED_10000;
5084 bnx2x_init_port_minmax(bp);
5086 bnx2x_calc_vn_weight_sum(bp);
5088 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089 bnx2x_init_vn_minmax(bp, 2*vn + port);
5091 /* Enable rate shaping and fairness */
5092 bp->cmng.flags.cmng_enables =
5093 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094 if (bp->vn_weight_sum)
5095 bp->cmng.flags.cmng_enables |=
5096 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5098 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099 " fairness will be disabled\n");
5101 /* rate shaping and fairness are disabled */
5103 "single function mode minmax will be disabled\n");
5107 /* Store it to internal memory */
5109 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110 REG_WR(bp, BAR_XSTRORM_INTMEM +
5111 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112 ((u32 *)(&bp->cmng))[i]);
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5117 switch (load_code) {
5118 case FW_MSG_CODE_DRV_LOAD_COMMON:
5119 bnx2x_init_internal_common(bp);
5122 case FW_MSG_CODE_DRV_LOAD_PORT:
5123 bnx2x_init_internal_port(bp);
5126 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127 bnx2x_init_internal_func(bp);
5131 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5140 for_each_queue(bp, i) {
5141 struct bnx2x_fastpath *fp = &bp->fp[i];
5144 fp->state = BNX2X_FP_STATE_CLOSED;
5146 fp->cl_id = BP_L_ID(bp) + i;
5147 fp->sb_id = fp->cl_id;
5149 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5150 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5153 bnx2x_update_fpsb_idx(fp);
5156 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5158 bnx2x_update_dsb_idx(bp);
5159 bnx2x_update_coalesce(bp);
5160 bnx2x_init_rx_rings(bp);
5161 bnx2x_init_tx_ring(bp);
5162 bnx2x_init_sp_ring(bp);
5163 bnx2x_init_context(bp);
5164 bnx2x_init_internal(bp, load_code);
5165 bnx2x_init_ind_table(bp);
5166 bnx2x_stats_init(bp);
5168 /* At this point, we are ready for interrupts */
5169 atomic_set(&bp->intr_sem, 0);
5171 /* flush all before enabling interrupts */
5175 bnx2x_int_enable(bp);
5178 /* end of nic init */
5181 * gzip service functions
5184 static int bnx2x_gunzip_init(struct bnx2x *bp)
5186 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5187 &bp->gunzip_mapping);
5188 if (bp->gunzip_buf == NULL)
5191 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5192 if (bp->strm == NULL)
5195 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5197 if (bp->strm->workspace == NULL)
5207 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5208 bp->gunzip_mapping);
5209 bp->gunzip_buf = NULL;
5212 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5213 " un-compression\n", bp->dev->name);
5217 static void bnx2x_gunzip_end(struct bnx2x *bp)
5219 kfree(bp->strm->workspace);
5224 if (bp->gunzip_buf) {
5225 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5226 bp->gunzip_mapping);
5227 bp->gunzip_buf = NULL;
5231 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5235 /* check gzip header */
5236 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5243 if (zbuf[3] & FNAME)
5244 while ((zbuf[n++] != 0) && (n < len));
5246 bp->strm->next_in = zbuf + n;
5247 bp->strm->avail_in = len - n;
5248 bp->strm->next_out = bp->gunzip_buf;
5249 bp->strm->avail_out = FW_BUF_SIZE;
5251 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5255 rc = zlib_inflate(bp->strm, Z_FINISH);
5256 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5257 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5258 bp->dev->name, bp->strm->msg);
5260 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5261 if (bp->gunzip_outlen & 0x3)
5262 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5263 " gunzip_outlen (%d) not aligned\n",
5264 bp->dev->name, bp->gunzip_outlen);
5265 bp->gunzip_outlen >>= 2;
5267 zlib_inflateEnd(bp->strm);
5269 if (rc == Z_STREAM_END)
5275 /* nic load/unload */
5278 * General service functions
5281 /* send a NIG loopback debug packet */
5282 static void bnx2x_lb_pckt(struct bnx2x *bp)
5286 /* Ethernet source and destination addresses */
5287 wb_write[0] = 0x55555555;
5288 wb_write[1] = 0x55555555;
5289 wb_write[2] = 0x20; /* SOP */
5290 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5292 /* NON-IP protocol */
5293 wb_write[0] = 0x09000000;
5294 wb_write[1] = 0x55555555;
5295 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5296 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5299 /* some of the internal memories
5300 * are not directly readable from the driver
5301 * to test them we send debug packets
5303 static int bnx2x_int_mem_test(struct bnx2x *bp)
5309 if (CHIP_REV_IS_FPGA(bp))
5311 else if (CHIP_REV_IS_EMUL(bp))
5316 DP(NETIF_MSG_HW, "start part1\n");
5318 /* Disable inputs of parser neighbor blocks */
5319 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5320 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5321 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5322 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5324 /* Write 0 to parser credits for CFC search request */
5325 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5327 /* send Ethernet packet */
5330 /* TODO do i reset NIG statistic? */
5331 /* Wait until NIG register shows 1 packet of size 0x10 */
5332 count = 1000 * factor;
5335 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5336 val = *bnx2x_sp(bp, wb_data[0]);
5344 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5348 /* Wait until PRS register shows 1 packet */
5349 count = 1000 * factor;
5351 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5359 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5363 /* Reset and init BRB, PRS */
5364 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5366 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5368 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5369 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5371 DP(NETIF_MSG_HW, "part2\n");
5373 /* Disable inputs of parser neighbor blocks */
5374 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5375 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5376 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5377 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5379 /* Write 0 to parser credits for CFC search request */
5380 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5382 /* send 10 Ethernet packets */
5383 for (i = 0; i < 10; i++)
5386 /* Wait until NIG register shows 10 + 1
5387 packets of size 11*0x10 = 0xb0 */
5388 count = 1000 * factor;
5391 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5392 val = *bnx2x_sp(bp, wb_data[0]);
5400 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5404 /* Wait until PRS register shows 2 packets */
5405 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5407 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409 /* Write 1 to parser credits for CFC search request */
5410 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5412 /* Wait until PRS register shows 3 packets */
5413 msleep(10 * factor);
5414 /* Wait until NIG register shows 1 packet of size 0x10 */
5415 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5417 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5419 /* clear NIG EOP FIFO */
5420 for (i = 0; i < 11; i++)
5421 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5422 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5424 BNX2X_ERR("clear of NIG failed\n");
5428 /* Reset and init BRB, PRS, NIG */
5429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5431 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5433 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5434 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5437 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5440 /* Enable inputs of parser neighbor blocks */
5441 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5442 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5443 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5444 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5446 DP(NETIF_MSG_HW, "done\n");
5451 static void enable_blocks_attention(struct bnx2x *bp)
5453 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5454 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5455 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5456 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5457 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5458 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5459 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5460 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5461 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5462 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5463 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5464 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5465 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5466 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5467 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5468 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5469 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5470 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5471 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5472 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5473 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5474 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5475 if (CHIP_REV_IS_FPGA(bp))
5476 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5478 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5479 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5480 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5481 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5482 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5483 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5484 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5485 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5486 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5487 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5491 static void bnx2x_reset_common(struct bnx2x *bp)
5494 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5496 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5499 static int bnx2x_init_common(struct bnx2x *bp)
5503 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5505 bnx2x_reset_common(bp);
5506 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5509 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5510 if (CHIP_IS_E1H(bp))
5511 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5515 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5517 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5518 if (CHIP_IS_E1(bp)) {
5519 /* enable HW interrupt from PXP on USDM overflow
5520 bit 16 on INT_MASK_0 */
5521 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5524 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5528 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5529 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5530 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5531 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5533 /* make sure this value is 0 */
5534 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5536 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5537 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5538 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5539 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5540 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5545 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5546 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5547 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5550 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5551 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5553 /* let the HW do it's magic ... */
5555 /* finish PXP init */
5556 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5558 BNX2X_ERR("PXP2 CFG failed\n");
5561 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5563 BNX2X_ERR("PXP2 RD_INIT failed\n");
5567 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5568 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5570 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5572 /* clean the DMAE memory */
5574 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5576 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5577 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5578 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5579 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5581 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5582 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5583 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5584 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5587 /* soft reset pulse */
5588 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5589 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5592 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5595 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5596 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5597 if (!CHIP_REV_IS_SLOW(bp)) {
5598 /* enable hw interrupt from doorbell Q */
5599 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5602 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5603 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5604 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5606 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5607 if (CHIP_IS_E1H(bp))
5608 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5610 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5611 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5612 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5613 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5615 if (CHIP_IS_E1H(bp)) {
5616 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5617 STORM_INTMEM_SIZE_E1H/2);
5619 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5620 0, STORM_INTMEM_SIZE_E1H/2);
5621 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5622 STORM_INTMEM_SIZE_E1H/2);
5624 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5625 0, STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5627 STORM_INTMEM_SIZE_E1H/2);
5629 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5630 0, STORM_INTMEM_SIZE_E1H/2);
5631 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5632 STORM_INTMEM_SIZE_E1H/2);
5634 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5635 0, STORM_INTMEM_SIZE_E1H/2);
5637 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5638 STORM_INTMEM_SIZE_E1);
5639 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5640 STORM_INTMEM_SIZE_E1);
5641 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5642 STORM_INTMEM_SIZE_E1);
5643 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5644 STORM_INTMEM_SIZE_E1);
5647 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5648 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5649 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5650 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5653 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5658 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5659 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5660 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5662 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5663 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5664 REG_WR(bp, i, 0xc0cac01a);
5665 /* TODO: replace with something meaningful */
5667 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5668 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5670 if (sizeof(union cdu_context) != 1024)
5671 /* we currently assume that a context is 1024 bytes */
5672 printk(KERN_ALERT PFX "please adjust the size of"
5673 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5675 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5676 val = (4 << 24) + (0 << 12) + 1024;
5677 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5678 if (CHIP_IS_E1(bp)) {
5679 /* !!! fix pxp client crdit until excel update */
5680 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5681 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5684 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5685 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5686 /* enable context validation interrupt from CFC */
5687 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5689 /* set the thresholds to prevent CFC/CDU race */
5690 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5692 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5693 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5695 /* PXPCS COMMON comes here */
5696 /* Reset PCIE errors for debug */
5697 REG_WR(bp, 0x2814, 0xffffffff);
5698 REG_WR(bp, 0x3820, 0xffffffff);
5700 /* EMAC0 COMMON comes here */
5701 /* EMAC1 COMMON comes here */
5702 /* DBU COMMON comes here */
5703 /* DBG COMMON comes here */
5705 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5706 if (CHIP_IS_E1H(bp)) {
5707 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5708 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5711 if (CHIP_REV_IS_SLOW(bp))
5714 /* finish CFC init */
5715 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5717 BNX2X_ERR("CFC LL_INIT failed\n");
5720 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5722 BNX2X_ERR("CFC AC_INIT failed\n");
5725 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5727 BNX2X_ERR("CFC CAM_INIT failed\n");
5730 REG_WR(bp, CFC_REG_DEBUG0, 0);
5732 /* read NIG statistic
5733 to see if this is our first up since powerup */
5734 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5735 val = *bnx2x_sp(bp, wb_data[0]);
5737 /* do internal memory self test */
5738 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5739 BNX2X_ERR("internal mem self test failed\n");
5743 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5747 bp->port.need_hw_lock = 1;
5750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5751 /* Fan failure is indicated by SPIO 5 */
5752 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5753 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5755 /* set to active low mode */
5756 val = REG_RD(bp, MISC_REG_SPIO_INT);
5757 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5758 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5759 REG_WR(bp, MISC_REG_SPIO_INT, val);
5761 /* enable interrupt to signal the IGU */
5762 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5763 val |= (1 << MISC_REGISTERS_SPIO_5);
5764 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5771 /* clear PXP2 attentions */
5772 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5774 enable_blocks_attention(bp);
5776 if (!BP_NOMCP(bp)) {
5777 bnx2x_acquire_phy_lock(bp);
5778 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5779 bnx2x_release_phy_lock(bp);
5781 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5786 static int bnx2x_init_port(struct bnx2x *bp)
5788 int port = BP_PORT(bp);
5792 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5794 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5796 /* Port PXP comes here */
5797 /* Port PXP2 comes here */
5802 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5803 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5804 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5805 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5810 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5811 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5812 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5813 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5818 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5819 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5820 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5821 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5823 /* Port CMs come here */
5824 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5825 (port ? XCM_PORT1_END : XCM_PORT0_END));
5827 /* Port QM comes here */
5829 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5830 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5832 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5833 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5835 /* Port DQ comes here */
5837 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5838 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5839 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5840 /* no pause for emulation and FPGA */
5845 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5846 else if (bp->dev->mtu > 4096) {
5847 if (bp->flags & ONE_PORT_FLAG)
5851 /* (24*1024 + val*4)/256 */
5852 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5855 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5856 high = low + 56; /* 14*1024/256 */
5858 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5859 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5862 /* Port PRS comes here */
5863 /* Port TSDM comes here */
5864 /* Port CSDM comes here */
5865 /* Port USDM comes here */
5866 /* Port XSDM comes here */
5868 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5869 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5870 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5871 port ? USEM_PORT1_END : USEM_PORT0_END);
5872 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5873 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5874 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5875 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5877 /* Port UPB comes here */
5878 /* Port XPB comes here */
5880 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5881 port ? PBF_PORT1_END : PBF_PORT0_END);
5883 /* configure PBF to work without PAUSE mtu 9000 */
5884 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5886 /* update threshold */
5887 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5888 /* update init credit */
5889 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5892 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5894 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5897 /* tell the searcher where the T2 table is */
5898 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5900 wb_write[0] = U64_LO(bp->t2_mapping);
5901 wb_write[1] = U64_HI(bp->t2_mapping);
5902 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5903 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5904 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5905 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5907 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5908 /* Port SRCH comes here */
5910 /* Port CDU comes here */
5911 /* Port CFC comes here */
5913 if (CHIP_IS_E1(bp)) {
5914 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5915 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5917 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5918 port ? HC_PORT1_END : HC_PORT0_END);
5920 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5921 MISC_AEU_PORT0_START,
5922 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5923 /* init aeu_mask_attn_func_0/1:
5924 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5925 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5926 * bits 4-7 are used for "per vn group attention" */
5927 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5928 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5930 /* Port PXPCS comes here */
5931 /* Port EMAC0 comes here */
5932 /* Port EMAC1 comes here */
5933 /* Port DBU comes here */
5934 /* Port DBG comes here */
5936 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5937 port ? NIG_PORT1_END : NIG_PORT0_END);
5939 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5941 if (CHIP_IS_E1H(bp)) {
5942 /* 0x2 disable e1hov, 0x1 enable */
5943 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5944 (IS_E1HMF(bp) ? 0x1 : 0x2));
5946 /* support pause requests from USDM, TSDM and BRB */
5947 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5950 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5951 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5952 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5956 /* Port MCP comes here */
5957 /* Port DMAE comes here */
5959 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5962 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5964 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5967 /* The GPIO should be swapped if the swap register is
5969 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5972 /* Select function upon port-swap configuration */
5974 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975 aeu_gpio_mask = (swap_val && swap_override) ?
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5979 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980 aeu_gpio_mask = (swap_val && swap_override) ?
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5984 val = REG_RD(bp, offset);
5985 /* add GPIO3 to group */
5986 val |= aeu_gpio_mask;
5987 REG_WR(bp, offset, val);
5991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5992 /* add SPIO 5 to group 0 */
5993 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6002 bnx2x__link_reset(bp);
6007 #define ILT_PER_FUNC (768/2)
6008 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010 1=valid bit added to the 53rd bit
6011 then since this is a wide register(TM)
6012 we split it into two 32 bit writes
6014 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6019 #define CNIC_ILT_LINES 0
6021 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6025 if (CHIP_IS_E1H(bp))
6026 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6028 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6030 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6033 static int bnx2x_init_func(struct bnx2x *bp)
6035 int port = BP_PORT(bp);
6036 int func = BP_FUNC(bp);
6040 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6042 /* set MSI reconfigure capability */
6043 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044 val = REG_RD(bp, addr);
6045 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046 REG_WR(bp, addr, val);
6048 i = FUNC_ILT_BASE(func);
6050 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051 if (CHIP_IS_E1H(bp)) {
6052 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6055 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6059 if (CHIP_IS_E1H(bp)) {
6060 for (i = 0; i < 9; i++)
6061 bnx2x_init_block(bp,
6062 cm_start[func][i], cm_end[func][i]);
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6068 /* HC init per function */
6069 if (CHIP_IS_E1H(bp)) {
6070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6072 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6075 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6077 /* Reset PCIE errors for debug */
6078 REG_WR(bp, 0x2114, 0xffffffff);
6079 REG_WR(bp, 0x2120, 0xffffffff);
6084 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6088 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6089 BP_FUNC(bp), load_code);
6092 mutex_init(&bp->dmae_mutex);
6093 bnx2x_gunzip_init(bp);
6095 switch (load_code) {
6096 case FW_MSG_CODE_DRV_LOAD_COMMON:
6097 rc = bnx2x_init_common(bp);
6102 case FW_MSG_CODE_DRV_LOAD_PORT:
6104 rc = bnx2x_init_port(bp);
6109 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6111 rc = bnx2x_init_func(bp);
6117 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6121 if (!BP_NOMCP(bp)) {
6122 int func = BP_FUNC(bp);
6124 bp->fw_drv_pulse_wr_seq =
6125 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6126 DRV_PULSE_SEQ_MASK);
6127 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6129 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6133 /* this needs to be done before gunzip end */
6134 bnx2x_zero_def_sb(bp);
6135 for_each_queue(bp, i)
6136 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6139 bnx2x_gunzip_end(bp);
6144 /* send the MCP a request, block until there is a reply */
6145 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6147 int func = BP_FUNC(bp);
6148 u32 seq = ++bp->fw_seq;
6151 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6153 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6154 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6157 /* let the FW do it's magic ... */
6160 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6162 /* Give the FW up to 2 second (200*10ms) */
6163 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6165 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166 cnt*delay, rc, seq);
6168 /* is this a reply to our command? */
6169 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170 rc &= FW_MSG_CODE_MASK;
6174 BNX2X_ERR("FW failed to respond!\n");
6182 static void bnx2x_free_mem(struct bnx2x *bp)
6185 #define BNX2X_PCI_FREE(x, y, size) \
6188 pci_free_consistent(bp->pdev, size, x, y); \
6194 #define BNX2X_FREE(x) \
6206 for_each_queue(bp, i) {
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210 bnx2x_fp(bp, i, status_blk_mapping),
6211 sizeof(struct host_status_block) +
6212 sizeof(struct eth_tx_db_data));
6215 for_each_rx_queue(bp, i) {
6217 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220 bnx2x_fp(bp, i, rx_desc_mapping),
6221 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6223 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224 bnx2x_fp(bp, i, rx_comp_mapping),
6225 sizeof(struct eth_fast_path_rx_cqe) *
6229 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6230 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231 bnx2x_fp(bp, i, rx_sge_mapping),
6232 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6235 for_each_tx_queue(bp, i) {
6237 /* fastpath tx rings: tx_buf tx_desc */
6238 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240 bnx2x_fp(bp, i, tx_desc_mapping),
6241 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6243 /* end of fastpath */
6245 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6246 sizeof(struct host_def_status_block));
6248 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6249 sizeof(struct bnx2x_slowpath));
6252 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6257 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6259 #undef BNX2X_PCI_FREE
6263 static int bnx2x_alloc_mem(struct bnx2x *bp)
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6268 x = pci_alloc_consistent(bp->pdev, size, y); \
6270 goto alloc_mem_err; \
6271 memset(x, 0, size); \
6274 #define BNX2X_ALLOC(x, size) \
6276 x = vmalloc(size); \
6278 goto alloc_mem_err; \
6279 memset(x, 0, size); \
6286 for_each_queue(bp, i) {
6287 bnx2x_fp(bp, i, bp) = bp;
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291 &bnx2x_fp(bp, i, status_blk_mapping),
6292 sizeof(struct host_status_block) +
6293 sizeof(struct eth_tx_db_data));
6296 for_each_rx_queue(bp, i) {
6298 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302 &bnx2x_fp(bp, i, rx_desc_mapping),
6303 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6305 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306 &bnx2x_fp(bp, i, rx_comp_mapping),
6307 sizeof(struct eth_fast_path_rx_cqe) *
6311 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314 &bnx2x_fp(bp, i, rx_sge_mapping),
6315 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6318 for_each_tx_queue(bp, i) {
6320 bnx2x_fp(bp, i, hw_tx_prods) =
6321 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6323 bnx2x_fp(bp, i, tx_prods_mapping) =
6324 bnx2x_fp(bp, i, status_blk_mapping) +
6325 sizeof(struct host_status_block);
6327 /* fastpath tx rings: tx_buf tx_desc */
6328 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331 &bnx2x_fp(bp, i, tx_desc_mapping),
6332 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6334 /* end of fastpath */
6336 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337 sizeof(struct host_def_status_block));
6339 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340 sizeof(struct bnx2x_slowpath));
6343 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6346 for (i = 0; i < 64*1024; i += 64) {
6347 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6351 /* allocate searcher T2 table
6352 we allocate 1/4 of alloc num for T2
6353 (which is not entered into the ILT) */
6354 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6357 for (i = 0; i < 16*1024; i += 64)
6358 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6360 /* now fixup the last line in the block to point to the next block */
6361 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6363 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6366 /* QM queues (128*MAX_CONN) */
6367 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6370 /* Slow path ring */
6371 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6379 #undef BNX2X_PCI_ALLOC
6383 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6387 for_each_tx_queue(bp, i) {
6388 struct bnx2x_fastpath *fp = &bp->fp[i];
6390 u16 bd_cons = fp->tx_bd_cons;
6391 u16 sw_prod = fp->tx_pkt_prod;
6392 u16 sw_cons = fp->tx_pkt_cons;
6394 while (sw_cons != sw_prod) {
6395 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6401 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6405 for_each_rx_queue(bp, j) {
6406 struct bnx2x_fastpath *fp = &bp->fp[j];
6408 for (i = 0; i < NUM_RX_BD; i++) {
6409 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410 struct sk_buff *skb = rx_buf->skb;
6415 pci_unmap_single(bp->pdev,
6416 pci_unmap_addr(rx_buf, mapping),
6417 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6422 if (!fp->disable_tpa)
6423 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424 ETH_MAX_AGGREGATION_QUEUES_E1 :
6425 ETH_MAX_AGGREGATION_QUEUES_E1H);
6429 static void bnx2x_free_skbs(struct bnx2x *bp)
6431 bnx2x_free_tx_skbs(bp);
6432 bnx2x_free_rx_skbs(bp);
6435 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6439 free_irq(bp->msix_table[0].vector, bp->dev);
6440 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6441 bp->msix_table[0].vector);
6443 for_each_queue(bp, i) {
6444 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6445 "state %x\n", i, bp->msix_table[i + offset].vector,
6446 bnx2x_fp(bp, i, state));
6448 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6452 static void bnx2x_free_irq(struct bnx2x *bp)
6454 if (bp->flags & USING_MSIX_FLAG) {
6455 bnx2x_free_msix_irqs(bp);
6456 pci_disable_msix(bp->pdev);
6457 bp->flags &= ~USING_MSIX_FLAG;
6459 } else if (bp->flags & USING_MSI_FLAG) {
6460 free_irq(bp->pdev->irq, bp->dev);
6461 pci_disable_msi(bp->pdev);
6462 bp->flags &= ~USING_MSI_FLAG;
6465 free_irq(bp->pdev->irq, bp->dev);
6468 static int bnx2x_enable_msix(struct bnx2x *bp)
6470 int i, rc, offset = 1;
6473 bp->msix_table[0].entry = igu_vec;
6474 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6476 for_each_queue(bp, i) {
6477 igu_vec = BP_L_ID(bp) + offset + i;
6478 bp->msix_table[i + offset].entry = igu_vec;
6479 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480 "(fastpath #%u)\n", i + offset, igu_vec, i);
6483 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6484 BNX2X_NUM_QUEUES(bp) + offset);
6486 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6490 bp->flags |= USING_MSIX_FLAG;
6495 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6497 int i, rc, offset = 1;
6499 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500 bp->dev->name, bp->dev);
6502 BNX2X_ERR("request sp irq failed\n");
6506 for_each_queue(bp, i) {
6507 struct bnx2x_fastpath *fp = &bp->fp[i];
6509 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6510 rc = request_irq(bp->msix_table[i + offset].vector,
6511 bnx2x_msix_fp_int, 0, fp->name, fp);
6513 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6514 bnx2x_free_msix_irqs(bp);
6518 fp->state = BNX2X_FP_STATE_IRQ;
6521 i = BNX2X_NUM_QUEUES(bp);
6523 printk(KERN_INFO PFX
6524 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6525 bp->dev->name, bp->msix_table[0].vector,
6526 bp->msix_table[offset].vector,
6527 bp->msix_table[offset + i - 1].vector);
6529 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6530 bp->dev->name, bp->msix_table[0].vector,
6531 bp->msix_table[offset + i - 1].vector);
6536 static int bnx2x_enable_msi(struct bnx2x *bp)
6540 rc = pci_enable_msi(bp->pdev);
6542 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6545 bp->flags |= USING_MSI_FLAG;
6550 static int bnx2x_req_irq(struct bnx2x *bp)
6552 unsigned long flags;
6555 if (bp->flags & USING_MSI_FLAG)
6558 flags = IRQF_SHARED;
6560 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6561 bp->dev->name, bp->dev);
6563 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6568 static void bnx2x_napi_enable(struct bnx2x *bp)
6572 for_each_rx_queue(bp, i)
6573 napi_enable(&bnx2x_fp(bp, i, napi));
6576 static void bnx2x_napi_disable(struct bnx2x *bp)
6580 for_each_rx_queue(bp, i)
6581 napi_disable(&bnx2x_fp(bp, i, napi));
6584 static void bnx2x_netif_start(struct bnx2x *bp)
6586 if (atomic_dec_and_test(&bp->intr_sem)) {
6587 if (netif_running(bp->dev)) {
6588 bnx2x_napi_enable(bp);
6589 bnx2x_int_enable(bp);
6590 if (bp->state == BNX2X_STATE_OPEN)
6591 netif_tx_wake_all_queues(bp->dev);
6596 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6598 bnx2x_int_disable_sync(bp, disable_hw);
6599 bnx2x_napi_disable(bp);
6600 if (netif_running(bp->dev)) {
6601 netif_tx_disable(bp->dev);
6602 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6607 * Init service functions
6610 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6612 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6613 int port = BP_PORT(bp);
6616 * unicasts 0-31:port0 32-63:port1
6617 * multicast 64-127:port0 128-191:port1
6619 config->hdr.length = 2;
6620 config->hdr.offset = port ? 32 : 0;
6621 config->hdr.client_id = bp->fp->cl_id;
6622 config->hdr.reserved1 = 0;
6625 config->config_table[0].cam_entry.msb_mac_addr =
6626 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6627 config->config_table[0].cam_entry.middle_mac_addr =
6628 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6629 config->config_table[0].cam_entry.lsb_mac_addr =
6630 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6631 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6633 config->config_table[0].target_table_entry.flags = 0;
6635 CAM_INVALIDATE(config->config_table[0]);
6636 config->config_table[0].target_table_entry.client_id = 0;
6637 config->config_table[0].target_table_entry.vlan_id = 0;
6639 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6640 (set ? "setting" : "clearing"),
6641 config->config_table[0].cam_entry.msb_mac_addr,
6642 config->config_table[0].cam_entry.middle_mac_addr,
6643 config->config_table[0].cam_entry.lsb_mac_addr);
6646 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6647 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6648 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6649 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6651 config->config_table[1].target_table_entry.flags =
6652 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6654 CAM_INVALIDATE(config->config_table[1]);
6655 config->config_table[1].target_table_entry.client_id = 0;
6656 config->config_table[1].target_table_entry.vlan_id = 0;
6658 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6659 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6660 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6665 struct mac_configuration_cmd_e1h *config =
6666 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6668 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6669 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6673 /* CAM allocation for E1H
6674 * unicasts: by func number
6675 * multicast: 20+FUNC*20, 20 each
6677 config->hdr.length = 1;
6678 config->hdr.offset = BP_FUNC(bp);
6679 config->hdr.client_id = bp->fp->cl_id;
6680 config->hdr.reserved1 = 0;
6683 config->config_table[0].msb_mac_addr =
6684 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6685 config->config_table[0].middle_mac_addr =
6686 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6687 config->config_table[0].lsb_mac_addr =
6688 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6689 config->config_table[0].client_id = BP_L_ID(bp);
6690 config->config_table[0].vlan_id = 0;
6691 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6693 config->config_table[0].flags = BP_PORT(bp);
6695 config->config_table[0].flags =
6696 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6698 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6699 (set ? "setting" : "clearing"),
6700 config->config_table[0].msb_mac_addr,
6701 config->config_table[0].middle_mac_addr,
6702 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6704 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6705 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6706 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6709 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6710 int *state_p, int poll)
6712 /* can take a while if any port is running */
6715 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6716 poll ? "polling" : "waiting", state, idx);
6721 bnx2x_rx_int(bp->fp, 10);
6722 /* if index is different from 0
6723 * the reply for some commands will
6724 * be on the non default queue
6727 bnx2x_rx_int(&bp->fp[idx], 10);
6730 mb(); /* state is changed by bnx2x_sp_event() */
6731 if (*state_p == state) {
6732 #ifdef BNX2X_STOP_ON_ERROR
6733 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6742 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6743 poll ? "polling" : "waiting", state, idx);
6744 #ifdef BNX2X_STOP_ON_ERROR
6751 static int bnx2x_setup_leading(struct bnx2x *bp)
6755 /* reset IGU state */
6756 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6761 /* Wait for completion */
6762 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6767 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6769 struct bnx2x_fastpath *fp = &bp->fp[index];
6771 /* reset IGU state */
6772 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6775 fp->state = BNX2X_FP_STATE_OPENING;
6776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6779 /* Wait for completion */
6780 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6784 static int bnx2x_poll(struct napi_struct *napi, int budget);
6786 static void bnx2x_set_int_mode(struct bnx2x *bp)
6794 bp->num_rx_queues = num_queues;
6795 bp->num_tx_queues = num_queues;
6797 "set number of queues to %d\n", num_queues);
6802 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6803 num_queues = min_t(u32, num_online_cpus(),
6804 BNX2X_MAX_QUEUES(bp));
6807 bp->num_rx_queues = num_queues;
6808 bp->num_tx_queues = num_queues;
6809 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6810 " number of tx queues to %d\n",
6811 bp->num_rx_queues, bp->num_tx_queues);
6812 /* if we can't use MSI-X we only need one fp,
6813 * so try to enable MSI-X with the requested number of fp's
6814 * and fallback to MSI or legacy INTx with one fp
6816 if (bnx2x_enable_msix(bp)) {
6817 /* failed to enable MSI-X */
6819 bp->num_rx_queues = num_queues;
6820 bp->num_tx_queues = num_queues;
6822 BNX2X_ERR("Multi requested but failed to "
6823 "enable MSI-X set number of "
6824 "queues to %d\n", num_queues);
6828 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6831 static void bnx2x_set_rx_mode(struct net_device *dev);
6833 /* must be called with rtnl_lock */
6834 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6838 #ifdef BNX2X_STOP_ON_ERROR
6839 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6840 if (unlikely(bp->panic))
6844 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6846 bnx2x_set_int_mode(bp);
6848 if (bnx2x_alloc_mem(bp))
6851 for_each_rx_queue(bp, i)
6852 bnx2x_fp(bp, i, disable_tpa) =
6853 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6855 for_each_rx_queue(bp, i)
6856 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6859 #ifdef BNX2X_STOP_ON_ERROR
6860 for_each_rx_queue(bp, i) {
6861 struct bnx2x_fastpath *fp = &bp->fp[i];
6863 fp->poll_no_work = 0;
6865 fp->poll_max_calls = 0;
6866 fp->poll_complete = 0;
6870 bnx2x_napi_enable(bp);
6872 if (bp->flags & USING_MSIX_FLAG) {
6873 rc = bnx2x_req_msix_irqs(bp);
6875 pci_disable_msix(bp->pdev);
6879 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6880 bnx2x_enable_msi(bp);
6882 rc = bnx2x_req_irq(bp);
6884 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6885 if (bp->flags & USING_MSI_FLAG)
6886 pci_disable_msi(bp->pdev);
6889 if (bp->flags & USING_MSI_FLAG) {
6890 bp->dev->irq = bp->pdev->irq;
6891 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6892 bp->dev->name, bp->pdev->irq);
6896 /* Send LOAD_REQUEST command to MCP
6897 Returns the type of LOAD command:
6898 if it is the first port to be initialized
6899 common blocks should be initialized, otherwise - not
6901 if (!BP_NOMCP(bp)) {
6902 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6904 BNX2X_ERR("MCP response failure, aborting\n");
6908 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6909 rc = -EBUSY; /* other port in diagnostic mode */
6914 int port = BP_PORT(bp);
6916 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
6917 load_count[0], load_count[1], load_count[2]);
6919 load_count[1 + port]++;
6920 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
6921 load_count[0], load_count[1], load_count[2]);
6922 if (load_count[0] == 1)
6923 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6924 else if (load_count[1 + port] == 1)
6925 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6927 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6930 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6931 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6935 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6938 rc = bnx2x_init_hw(bp, load_code);
6940 BNX2X_ERR("HW init failed, aborting\n");
6944 /* Setup NIC internals and enable interrupts */
6945 bnx2x_nic_init(bp, load_code);
6947 /* Send LOAD_DONE command to MCP */
6948 if (!BP_NOMCP(bp)) {
6949 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6951 BNX2X_ERR("MCP response failure, aborting\n");
6957 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6959 rc = bnx2x_setup_leading(bp);
6961 BNX2X_ERR("Setup leading failed!\n");
6965 if (CHIP_IS_E1H(bp))
6966 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6967 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6968 bp->state = BNX2X_STATE_DISABLED;
6971 if (bp->state == BNX2X_STATE_OPEN)
6972 for_each_nondefault_queue(bp, i) {
6973 rc = bnx2x_setup_multi(bp, i);
6979 bnx2x_set_mac_addr_e1(bp, 1);
6981 bnx2x_set_mac_addr_e1h(bp, 1);
6984 bnx2x_initial_phy_init(bp, load_mode);
6986 /* Start fast path */
6987 switch (load_mode) {
6989 /* Tx queue should be only reenabled */
6990 netif_tx_wake_all_queues(bp->dev);
6991 /* Initialize the receive filter. */
6992 bnx2x_set_rx_mode(bp->dev);
6996 netif_tx_start_all_queues(bp->dev);
6997 /* Initialize the receive filter. */
6998 bnx2x_set_rx_mode(bp->dev);
7002 /* Initialize the receive filter. */
7003 bnx2x_set_rx_mode(bp->dev);
7004 bp->state = BNX2X_STATE_DIAG;
7012 bnx2x__link_status_update(bp);
7014 /* start the timer */
7015 mod_timer(&bp->timer, jiffies + bp->current_interval);
7021 bnx2x_int_disable_sync(bp, 1);
7022 if (!BP_NOMCP(bp)) {
7023 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7024 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7027 /* Free SKBs, SGEs, TPA pool and driver internals */
7028 bnx2x_free_skbs(bp);
7029 for_each_rx_queue(bp, i)
7030 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7035 bnx2x_napi_disable(bp);
7036 for_each_rx_queue(bp, i)
7037 netif_napi_del(&bnx2x_fp(bp, i, napi));
7043 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7045 struct bnx2x_fastpath *fp = &bp->fp[index];
7048 /* halt the connection */
7049 fp->state = BNX2X_FP_STATE_HALTING;
7050 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7052 /* Wait for completion */
7053 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7055 if (rc) /* timeout */
7058 /* delete cfc entry */
7059 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7061 /* Wait for completion */
7062 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7067 static int bnx2x_stop_leading(struct bnx2x *bp)
7069 __le16 dsb_sp_prod_idx;
7070 /* if the other port is handling traffic,
7071 this can take a lot of time */
7077 /* Send HALT ramrod */
7078 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7079 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7081 /* Wait for completion */
7082 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7083 &(bp->fp[0].state), 1);
7084 if (rc) /* timeout */
7087 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7089 /* Send PORT_DELETE ramrod */
7090 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7092 /* Wait for completion to arrive on default status block
7093 we are going to reset the chip anyway
7094 so there is not much to do if this times out
7096 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7098 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7099 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7100 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7101 #ifdef BNX2X_STOP_ON_ERROR
7109 rmb(); /* Refresh the dsb_sp_prod */
7111 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7112 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7117 static void bnx2x_reset_func(struct bnx2x *bp)
7119 int port = BP_PORT(bp);
7120 int func = BP_FUNC(bp);
7124 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7125 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7128 base = FUNC_ILT_BASE(func);
7129 for (i = base; i < base + ILT_PER_FUNC; i++)
7130 bnx2x_ilt_wr(bp, i, 0);
7133 static void bnx2x_reset_port(struct bnx2x *bp)
7135 int port = BP_PORT(bp);
7138 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7140 /* Do not rcv packets to BRB */
7141 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7142 /* Do not direct rcv packets that are not for MCP to the BRB */
7143 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7144 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7147 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7150 /* Check for BRB port occupancy */
7151 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7153 DP(NETIF_MSG_IFDOWN,
7154 "BRB1 is not empty %d blocks are occupied\n", val);
7156 /* TODO: Close Doorbell port? */
7159 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7161 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7162 BP_FUNC(bp), reset_code);
7164 switch (reset_code) {
7165 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7166 bnx2x_reset_port(bp);
7167 bnx2x_reset_func(bp);
7168 bnx2x_reset_common(bp);
7171 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7172 bnx2x_reset_port(bp);
7173 bnx2x_reset_func(bp);
7176 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7177 bnx2x_reset_func(bp);
7181 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7186 /* must be called with rtnl_lock */
7187 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7189 int port = BP_PORT(bp);
7193 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7195 bp->rx_mode = BNX2X_RX_MODE_NONE;
7196 bnx2x_set_storm_rx_mode(bp);
7198 bnx2x_netif_stop(bp, 1);
7200 del_timer_sync(&bp->timer);
7201 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7202 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7203 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7208 /* Wait until tx fastpath tasks complete */
7209 for_each_tx_queue(bp, i) {
7210 struct bnx2x_fastpath *fp = &bp->fp[i];
7213 while (bnx2x_has_tx_work_unload(fp)) {
7215 bnx2x_tx_int(fp, 1000);
7217 BNX2X_ERR("timeout waiting for queue[%d]\n",
7219 #ifdef BNX2X_STOP_ON_ERROR
7230 /* Give HW time to discard old tx messages */
7233 if (CHIP_IS_E1(bp)) {
7234 struct mac_configuration_cmd *config =
7235 bnx2x_sp(bp, mcast_config);
7237 bnx2x_set_mac_addr_e1(bp, 0);
7239 for (i = 0; i < config->hdr.length; i++)
7240 CAM_INVALIDATE(config->config_table[i]);
7242 config->hdr.length = i;
7243 if (CHIP_REV_IS_SLOW(bp))
7244 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7246 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7247 config->hdr.client_id = bp->fp->cl_id;
7248 config->hdr.reserved1 = 0;
7250 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7251 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7252 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7255 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7257 bnx2x_set_mac_addr_e1h(bp, 0);
7259 for (i = 0; i < MC_HASH_SIZE; i++)
7260 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7263 if (unload_mode == UNLOAD_NORMAL)
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7266 else if (bp->flags & NO_WOL_FLAG) {
7267 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7268 if (CHIP_IS_E1H(bp))
7269 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7271 } else if (bp->wol) {
7272 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7273 u8 *mac_addr = bp->dev->dev_addr;
7275 /* The mac address is written to entries 1-4 to
7276 preserve entry 0 which is used by the PMF */
7277 u8 entry = (BP_E1HVN(bp) + 1)*8;
7279 val = (mac_addr[0] << 8) | mac_addr[1];
7280 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7282 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7283 (mac_addr[4] << 8) | mac_addr[5];
7284 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7289 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7291 /* Close multi and leading connections
7292 Completions for ramrods are collected in a synchronous way */
7293 for_each_nondefault_queue(bp, i)
7294 if (bnx2x_stop_multi(bp, i))
7297 rc = bnx2x_stop_leading(bp);
7299 BNX2X_ERR("Stop leading failed!\n");
7300 #ifdef BNX2X_STOP_ON_ERROR
7309 reset_code = bnx2x_fw_command(bp, reset_code);
7311 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7312 load_count[0], load_count[1], load_count[2]);
7314 load_count[1 + port]--;
7315 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7316 load_count[0], load_count[1], load_count[2]);
7317 if (load_count[0] == 0)
7318 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7319 else if (load_count[1 + port] == 0)
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7322 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7325 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7326 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7327 bnx2x__link_reset(bp);
7329 /* Reset the chip */
7330 bnx2x_reset_chip(bp, reset_code);
7332 /* Report UNLOAD_DONE to MCP */
7334 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7338 /* Free SKBs, SGEs, TPA pool and driver internals */
7339 bnx2x_free_skbs(bp);
7340 for_each_rx_queue(bp, i)
7341 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7342 for_each_rx_queue(bp, i)
7343 netif_napi_del(&bnx2x_fp(bp, i, napi));
7346 bp->state = BNX2X_STATE_CLOSED;
7348 netif_carrier_off(bp->dev);
7353 static void bnx2x_reset_task(struct work_struct *work)
7355 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7357 #ifdef BNX2X_STOP_ON_ERROR
7358 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7359 " so reset not done to allow debug dump,\n"
7360 KERN_ERR " you will need to reboot when done\n");
7366 if (!netif_running(bp->dev))
7367 goto reset_task_exit;
7369 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7370 bnx2x_nic_load(bp, LOAD_NORMAL);
7376 /* end of nic load/unload */
7381 * Init service functions
7384 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7387 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7388 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7389 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7390 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7391 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7392 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7393 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7394 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7396 BNX2X_ERR("Unsupported function index: %d\n", func);
7401 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7403 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7405 /* Flush all outstanding writes */
7408 /* Pretend to be function 0 */
7410 /* Flush the GRC transaction (in the chip) */
7411 new_val = REG_RD(bp, reg);
7413 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7418 /* From now we are in the "like-E1" mode */
7419 bnx2x_int_disable(bp);
7421 /* Flush all outstanding writes */
7424 /* Restore the original funtion settings */
7425 REG_WR(bp, reg, orig_func);
7426 new_val = REG_RD(bp, reg);
7427 if (new_val != orig_func) {
7428 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7429 orig_func, new_val);
7434 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7436 if (CHIP_IS_E1H(bp))
7437 bnx2x_undi_int_disable_e1h(bp, func);
7439 bnx2x_int_disable(bp);
7442 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7446 /* Check if there is any driver already loaded */
7447 val = REG_RD(bp, MISC_REG_UNPREPARED);
7449 /* Check if it is the UNDI driver
7450 * UNDI driver initializes CID offset for normal bell to 0x7
7452 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7453 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7455 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7457 int func = BP_FUNC(bp);
7461 /* clear the UNDI indication */
7462 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7464 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7466 /* try unload UNDI on port 0 */
7469 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7470 DRV_MSG_SEQ_NUMBER_MASK);
7471 reset_code = bnx2x_fw_command(bp, reset_code);
7473 /* if UNDI is loaded on the other port */
7474 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7476 /* send "DONE" for previous unload */
7477 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7479 /* unload UNDI on port 1 */
7482 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7483 DRV_MSG_SEQ_NUMBER_MASK);
7484 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7486 bnx2x_fw_command(bp, reset_code);
7489 /* now it's safe to release the lock */
7490 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7492 bnx2x_undi_int_disable(bp, func);
7494 /* close input traffic and wait for it */
7495 /* Do not rcv packets to BRB */
7497 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7498 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7499 /* Do not direct rcv packets that are not for MCP to
7502 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7503 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7506 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7507 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7510 /* save NIG port swap info */
7511 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7512 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7518 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7520 /* take the NIG out of reset and restore swap values */
7522 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7523 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7524 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7525 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7527 /* send unload done to the MCP */
7528 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7530 /* restore our func and fw_seq */
7533 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7534 DRV_MSG_SEQ_NUMBER_MASK);
7537 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7541 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7543 u32 val, val2, val3, val4, id;
7546 /* Get the chip revision id and number. */
7547 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7548 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7549 id = ((val & 0xffff) << 16);
7550 val = REG_RD(bp, MISC_REG_CHIP_REV);
7551 id |= ((val & 0xf) << 12);
7552 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7553 id |= ((val & 0xff) << 4);
7554 val = REG_RD(bp, MISC_REG_BOND_ID);
7556 bp->common.chip_id = id;
7557 bp->link_params.chip_id = bp->common.chip_id;
7558 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7560 val = (REG_RD(bp, 0x2874) & 0x55);
7561 if ((bp->common.chip_id & 0x1) ||
7562 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7563 bp->flags |= ONE_PORT_FLAG;
7564 BNX2X_DEV_INFO("single port device\n");
7567 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7568 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7569 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7570 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7571 bp->common.flash_size, bp->common.flash_size);
7573 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7574 bp->link_params.shmem_base = bp->common.shmem_base;
7575 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7577 if (!bp->common.shmem_base ||
7578 (bp->common.shmem_base < 0xA0000) ||
7579 (bp->common.shmem_base >= 0xC0000)) {
7580 BNX2X_DEV_INFO("MCP not active\n");
7581 bp->flags |= NO_MCP_FLAG;
7585 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7586 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7587 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7588 BNX2X_ERR("BAD MCP validity signature\n");
7590 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7591 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7593 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7594 SHARED_HW_CFG_LED_MODE_MASK) >>
7595 SHARED_HW_CFG_LED_MODE_SHIFT);
7597 bp->link_params.feature_config_flags = 0;
7598 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7599 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7600 bp->link_params.feature_config_flags |=
7601 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7603 bp->link_params.feature_config_flags &=
7604 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7606 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7607 bp->common.bc_ver = val;
7608 BNX2X_DEV_INFO("bc_ver %X\n", val);
7609 if (val < BNX2X_BC_VER) {
7610 /* for now only warn
7611 * later we might need to enforce this */
7612 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7613 " please upgrade BC\n", BNX2X_BC_VER, val);
7616 if (BP_E1HVN(bp) == 0) {
7617 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7618 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7620 /* no WOL capability for E1HVN != 0 */
7621 bp->flags |= NO_WOL_FLAG;
7623 BNX2X_DEV_INFO("%sWoL capable\n",
7624 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7626 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7627 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7628 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7629 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7631 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7632 val, val2, val3, val4);
7635 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7638 int port = BP_PORT(bp);
7641 switch (switch_cfg) {
7643 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7646 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7647 switch (ext_phy_type) {
7648 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7649 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7652 bp->port.supported |= (SUPPORTED_10baseT_Half |
7653 SUPPORTED_10baseT_Full |
7654 SUPPORTED_100baseT_Half |
7655 SUPPORTED_100baseT_Full |
7656 SUPPORTED_1000baseT_Full |
7657 SUPPORTED_2500baseX_Full |
7662 SUPPORTED_Asym_Pause);
7665 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7666 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7669 bp->port.supported |= (SUPPORTED_10baseT_Half |
7670 SUPPORTED_10baseT_Full |
7671 SUPPORTED_100baseT_Half |
7672 SUPPORTED_100baseT_Full |
7673 SUPPORTED_1000baseT_Full |
7678 SUPPORTED_Asym_Pause);
7682 BNX2X_ERR("NVRAM config error. "
7683 "BAD SerDes ext_phy_config 0x%x\n",
7684 bp->link_params.ext_phy_config);
7688 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7690 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7693 case SWITCH_CFG_10G:
7694 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7697 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7698 switch (ext_phy_type) {
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7703 bp->port.supported |= (SUPPORTED_10baseT_Half |
7704 SUPPORTED_10baseT_Full |
7705 SUPPORTED_100baseT_Half |
7706 SUPPORTED_100baseT_Full |
7707 SUPPORTED_1000baseT_Full |
7708 SUPPORTED_2500baseX_Full |
7709 SUPPORTED_10000baseT_Full |
7714 SUPPORTED_Asym_Pause);
7717 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7718 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7721 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7722 SUPPORTED_1000baseT_Full |
7726 SUPPORTED_Asym_Pause);
7729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7730 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7733 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7734 SUPPORTED_2500baseX_Full |
7735 SUPPORTED_1000baseT_Full |
7739 SUPPORTED_Asym_Pause);
7742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7743 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7746 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7749 SUPPORTED_Asym_Pause);
7752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7753 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7756 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7757 SUPPORTED_1000baseT_Full |
7760 SUPPORTED_Asym_Pause);
7763 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7764 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7767 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7768 SUPPORTED_1000baseT_Full |
7772 SUPPORTED_Asym_Pause);
7775 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7776 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7779 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7783 SUPPORTED_Asym_Pause);
7786 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7787 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7790 bp->port.supported |= (SUPPORTED_10baseT_Half |
7791 SUPPORTED_10baseT_Full |
7792 SUPPORTED_100baseT_Half |
7793 SUPPORTED_100baseT_Full |
7794 SUPPORTED_1000baseT_Full |
7795 SUPPORTED_10000baseT_Full |
7799 SUPPORTED_Asym_Pause);
7802 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7803 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7804 bp->link_params.ext_phy_config);
7808 BNX2X_ERR("NVRAM config error. "
7809 "BAD XGXS ext_phy_config 0x%x\n",
7810 bp->link_params.ext_phy_config);
7814 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7816 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7821 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7822 bp->port.link_config);
7825 bp->link_params.phy_addr = bp->port.phy_addr;
7827 /* mask what we support according to speed_cap_mask */
7828 if (!(bp->link_params.speed_cap_mask &
7829 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7830 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7832 if (!(bp->link_params.speed_cap_mask &
7833 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7834 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7836 if (!(bp->link_params.speed_cap_mask &
7837 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7838 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7840 if (!(bp->link_params.speed_cap_mask &
7841 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7842 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7844 if (!(bp->link_params.speed_cap_mask &
7845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7846 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7847 SUPPORTED_1000baseT_Full);
7849 if (!(bp->link_params.speed_cap_mask &
7850 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7851 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7853 if (!(bp->link_params.speed_cap_mask &
7854 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7855 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7857 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7860 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7862 bp->link_params.req_duplex = DUPLEX_FULL;
7864 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7865 case PORT_FEATURE_LINK_SPEED_AUTO:
7866 if (bp->port.supported & SUPPORTED_Autoneg) {
7867 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7868 bp->port.advertising = bp->port.supported;
7871 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7873 if ((ext_phy_type ==
7874 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7877 /* force 10G, no AN */
7878 bp->link_params.req_line_speed = SPEED_10000;
7879 bp->port.advertising =
7880 (ADVERTISED_10000baseT_Full |
7884 BNX2X_ERR("NVRAM config error. "
7885 "Invalid link_config 0x%x"
7886 " Autoneg not supported\n",
7887 bp->port.link_config);
7892 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7893 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7894 bp->link_params.req_line_speed = SPEED_10;
7895 bp->port.advertising = (ADVERTISED_10baseT_Full |
7898 BNX2X_ERR("NVRAM config error. "
7899 "Invalid link_config 0x%x"
7900 " speed_cap_mask 0x%x\n",
7901 bp->port.link_config,
7902 bp->link_params.speed_cap_mask);
7907 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7908 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7909 bp->link_params.req_line_speed = SPEED_10;
7910 bp->link_params.req_duplex = DUPLEX_HALF;
7911 bp->port.advertising = (ADVERTISED_10baseT_Half |
7914 BNX2X_ERR("NVRAM config error. "
7915 "Invalid link_config 0x%x"
7916 " speed_cap_mask 0x%x\n",
7917 bp->port.link_config,
7918 bp->link_params.speed_cap_mask);
7923 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7924 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7925 bp->link_params.req_line_speed = SPEED_100;
7926 bp->port.advertising = (ADVERTISED_100baseT_Full |
7929 BNX2X_ERR("NVRAM config error. "
7930 "Invalid link_config 0x%x"
7931 " speed_cap_mask 0x%x\n",
7932 bp->port.link_config,
7933 bp->link_params.speed_cap_mask);
7938 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7939 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7940 bp->link_params.req_line_speed = SPEED_100;
7941 bp->link_params.req_duplex = DUPLEX_HALF;
7942 bp->port.advertising = (ADVERTISED_100baseT_Half |
7945 BNX2X_ERR("NVRAM config error. "
7946 "Invalid link_config 0x%x"
7947 " speed_cap_mask 0x%x\n",
7948 bp->port.link_config,
7949 bp->link_params.speed_cap_mask);
7954 case PORT_FEATURE_LINK_SPEED_1G:
7955 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7956 bp->link_params.req_line_speed = SPEED_1000;
7957 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7960 BNX2X_ERR("NVRAM config error. "
7961 "Invalid link_config 0x%x"
7962 " speed_cap_mask 0x%x\n",
7963 bp->port.link_config,
7964 bp->link_params.speed_cap_mask);
7969 case PORT_FEATURE_LINK_SPEED_2_5G:
7970 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7971 bp->link_params.req_line_speed = SPEED_2500;
7972 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7975 BNX2X_ERR("NVRAM config error. "
7976 "Invalid link_config 0x%x"
7977 " speed_cap_mask 0x%x\n",
7978 bp->port.link_config,
7979 bp->link_params.speed_cap_mask);
7984 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7985 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7986 case PORT_FEATURE_LINK_SPEED_10G_KR:
7987 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7988 bp->link_params.req_line_speed = SPEED_10000;
7989 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7992 BNX2X_ERR("NVRAM config error. "
7993 "Invalid link_config 0x%x"
7994 " speed_cap_mask 0x%x\n",
7995 bp->port.link_config,
7996 bp->link_params.speed_cap_mask);
8002 BNX2X_ERR("NVRAM config error. "
8003 "BAD link speed link_config 0x%x\n",
8004 bp->port.link_config);
8005 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8006 bp->port.advertising = bp->port.supported;
8010 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8011 PORT_FEATURE_FLOW_CONTROL_MASK);
8012 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8013 !(bp->port.supported & SUPPORTED_Autoneg))
8014 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8016 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8017 " advertising 0x%x\n",
8018 bp->link_params.req_line_speed,
8019 bp->link_params.req_duplex,
8020 bp->link_params.req_flow_ctrl, bp->port.advertising);
8023 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8025 int port = BP_PORT(bp);
8030 bp->link_params.bp = bp;
8031 bp->link_params.port = port;
8033 bp->link_params.lane_config =
8034 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8035 bp->link_params.ext_phy_config =
8037 dev_info.port_hw_config[port].external_phy_config);
8038 bp->link_params.speed_cap_mask =
8040 dev_info.port_hw_config[port].speed_capability_mask);
8042 bp->port.link_config =
8043 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8045 /* Get the 4 lanes xgxs config rx and tx */
8046 for (i = 0; i < 2; i++) {
8048 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8049 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8050 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8053 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8054 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8055 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8058 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8059 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8060 bp->link_params.feature_config_flags |=
8061 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8063 bp->link_params.feature_config_flags &=
8064 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8066 /* If the device is capable of WoL, set the default state according
8069 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8070 (config & PORT_FEATURE_WOL_ENABLED));
8072 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8073 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8074 bp->link_params.lane_config,
8075 bp->link_params.ext_phy_config,
8076 bp->link_params.speed_cap_mask, bp->port.link_config);
8078 bp->link_params.switch_cfg = (bp->port.link_config &
8079 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8080 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8082 bnx2x_link_settings_requested(bp);
8084 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8085 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8086 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8087 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8088 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8089 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8090 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8091 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8092 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8093 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8096 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8098 int func = BP_FUNC(bp);
8102 bnx2x_get_common_hwinfo(bp);
8106 if (CHIP_IS_E1H(bp)) {
8108 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8110 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8111 FUNC_MF_CFG_E1HOV_TAG_MASK);
8112 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8116 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8118 func, bp->e1hov, bp->e1hov);
8120 BNX2X_DEV_INFO("single function mode\n");
8122 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8123 " aborting\n", func);
8129 if (!BP_NOMCP(bp)) {
8130 bnx2x_get_port_hwinfo(bp);
8132 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8133 DRV_MSG_SEQ_NUMBER_MASK);
8134 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8138 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8139 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8140 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8141 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8142 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8143 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8144 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8145 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8146 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8147 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8148 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8150 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8158 /* only supposed to happen on emulation/FPGA */
8159 BNX2X_ERR("warning random MAC workaround active\n");
8160 random_ether_addr(bp->dev->dev_addr);
8161 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8167 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8169 int func = BP_FUNC(bp);
8173 /* Disable interrupt handling until HW is initialized */
8174 atomic_set(&bp->intr_sem, 1);
8176 mutex_init(&bp->port.phy_mutex);
8178 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8179 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8181 rc = bnx2x_get_hwinfo(bp);
8183 /* need to reset chip if undi was active */
8185 bnx2x_undi_unload(bp);
8187 if (CHIP_REV_IS_FPGA(bp))
8188 printk(KERN_ERR PFX "FPGA detected\n");
8190 if (BP_NOMCP(bp) && (func == 0))
8192 "MCP disabled, must load devices in order!\n");
8194 /* Set multi queue mode */
8195 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8196 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8198 "Multi disabled since int_mode requested is not MSI-X\n");
8199 multi_mode = ETH_RSS_MODE_DISABLED;
8201 bp->multi_mode = multi_mode;
8206 bp->flags &= ~TPA_ENABLE_FLAG;
8207 bp->dev->features &= ~NETIF_F_LRO;
8209 bp->flags |= TPA_ENABLE_FLAG;
8210 bp->dev->features |= NETIF_F_LRO;
8215 bp->tx_ring_size = MAX_TX_AVAIL;
8216 bp->rx_ring_size = MAX_RX_AVAIL;
8223 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8224 bp->current_interval = (poll ? poll : timer_interval);
8226 init_timer(&bp->timer);
8227 bp->timer.expires = jiffies + bp->current_interval;
8228 bp->timer.data = (unsigned long) bp;
8229 bp->timer.function = bnx2x_timer;
8235 * ethtool service functions
8238 /* All ethtool functions called with rtnl_lock */
8240 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8242 struct bnx2x *bp = netdev_priv(dev);
8244 cmd->supported = bp->port.supported;
8245 cmd->advertising = bp->port.advertising;
8247 if (netif_carrier_ok(dev)) {
8248 cmd->speed = bp->link_vars.line_speed;
8249 cmd->duplex = bp->link_vars.duplex;
8251 cmd->speed = bp->link_params.req_line_speed;
8252 cmd->duplex = bp->link_params.req_duplex;
8257 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8258 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8259 if (vn_max_rate < cmd->speed)
8260 cmd->speed = vn_max_rate;
8263 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8265 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8267 switch (ext_phy_type) {
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8274 cmd->port = PORT_FIBRE;
8277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8279 cmd->port = PORT_TP;
8282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8283 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8284 bp->link_params.ext_phy_config);
8288 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8289 bp->link_params.ext_phy_config);
8293 cmd->port = PORT_TP;
8295 cmd->phy_address = bp->port.phy_addr;
8296 cmd->transceiver = XCVR_INTERNAL;
8298 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8299 cmd->autoneg = AUTONEG_ENABLE;
8301 cmd->autoneg = AUTONEG_DISABLE;
8306 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8307 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8308 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8309 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8310 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8311 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8312 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8317 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8319 struct bnx2x *bp = netdev_priv(dev);
8325 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8326 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8327 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8328 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8329 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8330 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8331 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8333 if (cmd->autoneg == AUTONEG_ENABLE) {
8334 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8335 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8339 /* advertise the requested speed and duplex if supported */
8340 cmd->advertising &= bp->port.supported;
8342 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8343 bp->link_params.req_duplex = DUPLEX_FULL;
8344 bp->port.advertising |= (ADVERTISED_Autoneg |
8347 } else { /* forced speed */
8348 /* advertise the requested speed and duplex if supported */
8349 switch (cmd->speed) {
8351 if (cmd->duplex == DUPLEX_FULL) {
8352 if (!(bp->port.supported &
8353 SUPPORTED_10baseT_Full)) {
8355 "10M full not supported\n");
8359 advertising = (ADVERTISED_10baseT_Full |
8362 if (!(bp->port.supported &
8363 SUPPORTED_10baseT_Half)) {
8365 "10M half not supported\n");
8369 advertising = (ADVERTISED_10baseT_Half |
8375 if (cmd->duplex == DUPLEX_FULL) {
8376 if (!(bp->port.supported &
8377 SUPPORTED_100baseT_Full)) {
8379 "100M full not supported\n");
8383 advertising = (ADVERTISED_100baseT_Full |
8386 if (!(bp->port.supported &
8387 SUPPORTED_100baseT_Half)) {
8389 "100M half not supported\n");
8393 advertising = (ADVERTISED_100baseT_Half |
8399 if (cmd->duplex != DUPLEX_FULL) {
8400 DP(NETIF_MSG_LINK, "1G half not supported\n");
8404 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8405 DP(NETIF_MSG_LINK, "1G full not supported\n");
8409 advertising = (ADVERTISED_1000baseT_Full |
8414 if (cmd->duplex != DUPLEX_FULL) {
8416 "2.5G half not supported\n");
8420 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8422 "2.5G full not supported\n");
8426 advertising = (ADVERTISED_2500baseX_Full |
8431 if (cmd->duplex != DUPLEX_FULL) {
8432 DP(NETIF_MSG_LINK, "10G half not supported\n");
8436 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8437 DP(NETIF_MSG_LINK, "10G full not supported\n");
8441 advertising = (ADVERTISED_10000baseT_Full |
8446 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8450 bp->link_params.req_line_speed = cmd->speed;
8451 bp->link_params.req_duplex = cmd->duplex;
8452 bp->port.advertising = advertising;
8455 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8456 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8457 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8458 bp->port.advertising);
8460 if (netif_running(dev)) {
8461 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8468 #define PHY_FW_VER_LEN 10
8470 static void bnx2x_get_drvinfo(struct net_device *dev,
8471 struct ethtool_drvinfo *info)
8473 struct bnx2x *bp = netdev_priv(dev);
8474 u8 phy_fw_ver[PHY_FW_VER_LEN];
8476 strcpy(info->driver, DRV_MODULE_NAME);
8477 strcpy(info->version, DRV_MODULE_VERSION);
8479 phy_fw_ver[0] = '\0';
8481 bnx2x_acquire_phy_lock(bp);
8482 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8483 (bp->state != BNX2X_STATE_CLOSED),
8484 phy_fw_ver, PHY_FW_VER_LEN);
8485 bnx2x_release_phy_lock(bp);
8488 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8489 (bp->common.bc_ver & 0xff0000) >> 16,
8490 (bp->common.bc_ver & 0xff00) >> 8,
8491 (bp->common.bc_ver & 0xff),
8492 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8493 strcpy(info->bus_info, pci_name(bp->pdev));
8494 info->n_stats = BNX2X_NUM_STATS;
8495 info->testinfo_len = BNX2X_NUM_TESTS;
8496 info->eedump_len = bp->common.flash_size;
8497 info->regdump_len = 0;
8500 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8502 struct bnx2x *bp = netdev_priv(dev);
8504 if (bp->flags & NO_WOL_FLAG) {
8508 wol->supported = WAKE_MAGIC;
8510 wol->wolopts = WAKE_MAGIC;
8514 memset(&wol->sopass, 0, sizeof(wol->sopass));
8517 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8519 struct bnx2x *bp = netdev_priv(dev);
8521 if (wol->wolopts & ~WAKE_MAGIC)
8524 if (wol->wolopts & WAKE_MAGIC) {
8525 if (bp->flags & NO_WOL_FLAG)
8535 static u32 bnx2x_get_msglevel(struct net_device *dev)
8537 struct bnx2x *bp = netdev_priv(dev);
8539 return bp->msglevel;
8542 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8544 struct bnx2x *bp = netdev_priv(dev);
8546 if (capable(CAP_NET_ADMIN))
8547 bp->msglevel = level;
8550 static int bnx2x_nway_reset(struct net_device *dev)
8552 struct bnx2x *bp = netdev_priv(dev);
8557 if (netif_running(dev)) {
8558 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8565 static int bnx2x_get_eeprom_len(struct net_device *dev)
8567 struct bnx2x *bp = netdev_priv(dev);
8569 return bp->common.flash_size;
8572 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8574 int port = BP_PORT(bp);
8578 /* adjust timeout for emulation/FPGA */
8579 count = NVRAM_TIMEOUT_COUNT;
8580 if (CHIP_REV_IS_SLOW(bp))
8583 /* request access to nvram interface */
8584 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8585 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8587 for (i = 0; i < count*10; i++) {
8588 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8589 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8595 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8596 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8603 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8605 int port = BP_PORT(bp);
8609 /* adjust timeout for emulation/FPGA */
8610 count = NVRAM_TIMEOUT_COUNT;
8611 if (CHIP_REV_IS_SLOW(bp))
8614 /* relinquish nvram interface */
8615 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8616 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8618 for (i = 0; i < count*10; i++) {
8619 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8620 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8626 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8627 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8634 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8638 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8640 /* enable both bits, even on read */
8641 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8642 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8643 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8646 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8650 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8652 /* disable both bits, even after read */
8653 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8654 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8655 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8658 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8664 /* build the command word */
8665 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8667 /* need to clear DONE bit separately */
8668 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8670 /* address of the NVRAM to read from */
8671 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8672 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8674 /* issue a read command */
8675 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8677 /* adjust timeout for emulation/FPGA */
8678 count = NVRAM_TIMEOUT_COUNT;
8679 if (CHIP_REV_IS_SLOW(bp))
8682 /* wait for completion */
8685 for (i = 0; i < count; i++) {
8687 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8689 if (val & MCPR_NVM_COMMAND_DONE) {
8690 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8691 /* we read nvram data in cpu order
8692 * but ethtool sees it as an array of bytes
8693 * converting to big-endian will do the work */
8694 *ret_val = cpu_to_be32(val);
8703 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8710 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8712 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8717 if (offset + buf_size > bp->common.flash_size) {
8718 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8719 " buf_size (0x%x) > flash_size (0x%x)\n",
8720 offset, buf_size, bp->common.flash_size);
8724 /* request access to nvram interface */
8725 rc = bnx2x_acquire_nvram_lock(bp);
8729 /* enable access to nvram interface */
8730 bnx2x_enable_nvram_access(bp);
8732 /* read the first word(s) */
8733 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8734 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8735 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8736 memcpy(ret_buf, &val, 4);
8738 /* advance to the next dword */
8739 offset += sizeof(u32);
8740 ret_buf += sizeof(u32);
8741 buf_size -= sizeof(u32);
8746 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8747 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8748 memcpy(ret_buf, &val, 4);
8751 /* disable access to nvram interface */
8752 bnx2x_disable_nvram_access(bp);
8753 bnx2x_release_nvram_lock(bp);
8758 static int bnx2x_get_eeprom(struct net_device *dev,
8759 struct ethtool_eeprom *eeprom, u8 *eebuf)
8761 struct bnx2x *bp = netdev_priv(dev);
8764 if (!netif_running(dev))
8767 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8768 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8769 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8770 eeprom->len, eeprom->len);
8772 /* parameters already validated in ethtool_get_eeprom */
8774 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8779 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8784 /* build the command word */
8785 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8787 /* need to clear DONE bit separately */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8790 /* write the data */
8791 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8793 /* address of the NVRAM to write to */
8794 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8795 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8797 /* issue the write command */
8798 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8800 /* adjust timeout for emulation/FPGA */
8801 count = NVRAM_TIMEOUT_COUNT;
8802 if (CHIP_REV_IS_SLOW(bp))
8805 /* wait for completion */
8807 for (i = 0; i < count; i++) {
8809 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8810 if (val & MCPR_NVM_COMMAND_DONE) {
8819 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8821 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8829 if (offset + buf_size > bp->common.flash_size) {
8830 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8831 " buf_size (0x%x) > flash_size (0x%x)\n",
8832 offset, buf_size, bp->common.flash_size);
8836 /* request access to nvram interface */
8837 rc = bnx2x_acquire_nvram_lock(bp);
8841 /* enable access to nvram interface */
8842 bnx2x_enable_nvram_access(bp);
8844 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8845 align_offset = (offset & ~0x03);
8846 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8849 val &= ~(0xff << BYTE_OFFSET(offset));
8850 val |= (*data_buf << BYTE_OFFSET(offset));
8852 /* nvram data is returned as an array of bytes
8853 * convert it back to cpu order */
8854 val = be32_to_cpu(val);
8856 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8860 /* disable access to nvram interface */
8861 bnx2x_disable_nvram_access(bp);
8862 bnx2x_release_nvram_lock(bp);
8867 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8875 if (buf_size == 1) /* ethtool */
8876 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8878 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8880 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8885 if (offset + buf_size > bp->common.flash_size) {
8886 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8887 " buf_size (0x%x) > flash_size (0x%x)\n",
8888 offset, buf_size, bp->common.flash_size);
8892 /* request access to nvram interface */
8893 rc = bnx2x_acquire_nvram_lock(bp);
8897 /* enable access to nvram interface */
8898 bnx2x_enable_nvram_access(bp);
8901 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8902 while ((written_so_far < buf_size) && (rc == 0)) {
8903 if (written_so_far == (buf_size - sizeof(u32)))
8904 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8905 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8906 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8908 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8910 memcpy(&val, data_buf, 4);
8912 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8914 /* advance to the next dword */
8915 offset += sizeof(u32);
8916 data_buf += sizeof(u32);
8917 written_so_far += sizeof(u32);
8921 /* disable access to nvram interface */
8922 bnx2x_disable_nvram_access(bp);
8923 bnx2x_release_nvram_lock(bp);
8928 static int bnx2x_set_eeprom(struct net_device *dev,
8929 struct ethtool_eeprom *eeprom, u8 *eebuf)
8931 struct bnx2x *bp = netdev_priv(dev);
8934 if (!netif_running(dev))
8937 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8938 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8939 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8940 eeprom->len, eeprom->len);
8942 /* parameters already validated in ethtool_set_eeprom */
8944 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8945 if (eeprom->magic == 0x00504859)
8948 bnx2x_acquire_phy_lock(bp);
8949 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8950 bp->link_params.ext_phy_config,
8951 (bp->state != BNX2X_STATE_CLOSED),
8952 eebuf, eeprom->len);
8953 if ((bp->state == BNX2X_STATE_OPEN) ||
8954 (bp->state == BNX2X_STATE_DISABLED)) {
8955 rc |= bnx2x_link_reset(&bp->link_params,
8957 rc |= bnx2x_phy_init(&bp->link_params,
8960 bnx2x_release_phy_lock(bp);
8962 } else /* Only the PMF can access the PHY */
8965 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8970 static int bnx2x_get_coalesce(struct net_device *dev,
8971 struct ethtool_coalesce *coal)
8973 struct bnx2x *bp = netdev_priv(dev);
8975 memset(coal, 0, sizeof(struct ethtool_coalesce));
8977 coal->rx_coalesce_usecs = bp->rx_ticks;
8978 coal->tx_coalesce_usecs = bp->tx_ticks;
8983 static int bnx2x_set_coalesce(struct net_device *dev,
8984 struct ethtool_coalesce *coal)
8986 struct bnx2x *bp = netdev_priv(dev);
8988 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8989 if (bp->rx_ticks > 3000)
8990 bp->rx_ticks = 3000;
8992 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8993 if (bp->tx_ticks > 0x3000)
8994 bp->tx_ticks = 0x3000;
8996 if (netif_running(dev))
8997 bnx2x_update_coalesce(bp);
9002 static void bnx2x_get_ringparam(struct net_device *dev,
9003 struct ethtool_ringparam *ering)
9005 struct bnx2x *bp = netdev_priv(dev);
9007 ering->rx_max_pending = MAX_RX_AVAIL;
9008 ering->rx_mini_max_pending = 0;
9009 ering->rx_jumbo_max_pending = 0;
9011 ering->rx_pending = bp->rx_ring_size;
9012 ering->rx_mini_pending = 0;
9013 ering->rx_jumbo_pending = 0;
9015 ering->tx_max_pending = MAX_TX_AVAIL;
9016 ering->tx_pending = bp->tx_ring_size;
9019 static int bnx2x_set_ringparam(struct net_device *dev,
9020 struct ethtool_ringparam *ering)
9022 struct bnx2x *bp = netdev_priv(dev);
9025 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9026 (ering->tx_pending > MAX_TX_AVAIL) ||
9027 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9030 bp->rx_ring_size = ering->rx_pending;
9031 bp->tx_ring_size = ering->tx_pending;
9033 if (netif_running(dev)) {
9034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9035 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9041 static void bnx2x_get_pauseparam(struct net_device *dev,
9042 struct ethtool_pauseparam *epause)
9044 struct bnx2x *bp = netdev_priv(dev);
9046 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9047 BNX2X_FLOW_CTRL_AUTO) &&
9048 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9050 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9051 BNX2X_FLOW_CTRL_RX);
9052 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9053 BNX2X_FLOW_CTRL_TX);
9055 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9056 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9057 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9060 static int bnx2x_set_pauseparam(struct net_device *dev,
9061 struct ethtool_pauseparam *epause)
9063 struct bnx2x *bp = netdev_priv(dev);
9068 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9069 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9070 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9072 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9074 if (epause->rx_pause)
9075 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9077 if (epause->tx_pause)
9078 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9080 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9081 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9083 if (epause->autoneg) {
9084 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9085 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9089 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9090 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9094 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9096 if (netif_running(dev)) {
9097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9104 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9106 struct bnx2x *bp = netdev_priv(dev);
9110 /* TPA requires Rx CSUM offloading */
9111 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9112 if (!(dev->features & NETIF_F_LRO)) {
9113 dev->features |= NETIF_F_LRO;
9114 bp->flags |= TPA_ENABLE_FLAG;
9118 } else if (dev->features & NETIF_F_LRO) {
9119 dev->features &= ~NETIF_F_LRO;
9120 bp->flags &= ~TPA_ENABLE_FLAG;
9124 if (changed && netif_running(dev)) {
9125 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9126 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9132 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9134 struct bnx2x *bp = netdev_priv(dev);
9139 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9141 struct bnx2x *bp = netdev_priv(dev);
9146 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9147 TPA'ed packets will be discarded due to wrong TCP CSUM */
9149 u32 flags = ethtool_op_get_flags(dev);
9151 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9157 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9160 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9161 dev->features |= NETIF_F_TSO6;
9163 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9164 dev->features &= ~NETIF_F_TSO6;
9170 static const struct {
9171 char string[ETH_GSTRING_LEN];
9172 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9173 { "register_test (offline)" },
9174 { "memory_test (offline)" },
9175 { "loopback_test (offline)" },
9176 { "nvram_test (online)" },
9177 { "interrupt_test (online)" },
9178 { "link_test (online)" },
9179 { "idle check (online)" }
9182 static int bnx2x_self_test_count(struct net_device *dev)
9184 return BNX2X_NUM_TESTS;
9187 static int bnx2x_test_registers(struct bnx2x *bp)
9189 int idx, i, rc = -ENODEV;
9191 int port = BP_PORT(bp);
9192 static const struct {
9197 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9198 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9199 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9200 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9201 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9202 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9203 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9204 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9205 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9206 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9207 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9208 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9209 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9210 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9211 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9212 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9213 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9214 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9215 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9216 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9217 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9218 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9219 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9220 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9221 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9222 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9223 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9224 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9225 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9226 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9227 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9228 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9229 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9230 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9231 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9232 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9233 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9234 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9236 { 0xffffffff, 0, 0x00000000 }
9239 if (!netif_running(bp->dev))
9242 /* Repeat the test twice:
9243 First by writing 0x00000000, second by writing 0xffffffff */
9244 for (idx = 0; idx < 2; idx++) {
9251 wr_val = 0xffffffff;
9255 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9256 u32 offset, mask, save_val, val;
9258 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9259 mask = reg_tbl[i].mask;
9261 save_val = REG_RD(bp, offset);
9263 REG_WR(bp, offset, wr_val);
9264 val = REG_RD(bp, offset);
9266 /* Restore the original register's value */
9267 REG_WR(bp, offset, save_val);
9269 /* verify that value is as expected value */
9270 if ((val & mask) != (wr_val & mask))
9281 static int bnx2x_test_memory(struct bnx2x *bp)
9283 int i, j, rc = -ENODEV;
9285 static const struct {
9289 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9290 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9291 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9292 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9293 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9294 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9295 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9299 static const struct {
9305 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9306 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9307 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9308 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9309 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9310 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9312 { NULL, 0xffffffff, 0, 0 }
9315 if (!netif_running(bp->dev))
9318 /* Go through all the memories */
9319 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9320 for (j = 0; j < mem_tbl[i].size; j++)
9321 REG_RD(bp, mem_tbl[i].offset + j*4);
9323 /* Check the parity status */
9324 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9325 val = REG_RD(bp, prty_tbl[i].offset);
9326 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9327 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9329 "%s is 0x%x\n", prty_tbl[i].name, val);
9340 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9345 while (bnx2x_link_test(bp) && cnt--)
9349 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9351 unsigned int pkt_size, num_pkts, i;
9352 struct sk_buff *skb;
9353 unsigned char *packet;
9354 struct bnx2x_fastpath *fp = &bp->fp[0];
9355 u16 tx_start_idx, tx_idx;
9356 u16 rx_start_idx, rx_idx;
9358 struct sw_tx_bd *tx_buf;
9359 struct eth_tx_bd *tx_bd;
9361 union eth_rx_cqe *cqe;
9363 struct sw_rx_bd *rx_buf;
9367 /* check the loopback mode */
9368 switch (loopback_mode) {
9369 case BNX2X_PHY_LOOPBACK:
9370 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9373 case BNX2X_MAC_LOOPBACK:
9374 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9375 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9381 /* prepare the loopback packet */
9382 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9383 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9384 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9387 goto test_loopback_exit;
9389 packet = skb_put(skb, pkt_size);
9390 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9391 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9392 for (i = ETH_HLEN; i < pkt_size; i++)
9393 packet[i] = (unsigned char) (i & 0xff);
9395 /* send the loopback packet */
9397 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9398 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9400 pkt_prod = fp->tx_pkt_prod++;
9401 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9402 tx_buf->first_bd = fp->tx_bd_prod;
9405 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9406 mapping = pci_map_single(bp->pdev, skb->data,
9407 skb_headlen(skb), PCI_DMA_TODEVICE);
9408 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9409 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9410 tx_bd->nbd = cpu_to_le16(1);
9411 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9412 tx_bd->vlan = cpu_to_le16(pkt_prod);
9413 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9414 ETH_TX_BD_FLAGS_END_BD);
9415 tx_bd->general_data = ((UNICAST_ADDRESS <<
9416 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9420 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9421 mb(); /* FW restriction: must not reorder writing nbd and packets */
9422 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9423 DOORBELL(bp, fp->index, 0);
9429 bp->dev->trans_start = jiffies;
9433 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9434 if (tx_idx != tx_start_idx + num_pkts)
9435 goto test_loopback_exit;
9437 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9438 if (rx_idx != rx_start_idx + num_pkts)
9439 goto test_loopback_exit;
9441 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9442 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9443 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9444 goto test_loopback_rx_exit;
9446 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9447 if (len != pkt_size)
9448 goto test_loopback_rx_exit;
9450 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9452 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9453 for (i = ETH_HLEN; i < pkt_size; i++)
9454 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9455 goto test_loopback_rx_exit;
9459 test_loopback_rx_exit:
9461 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9462 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9463 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9464 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9466 /* Update producers */
9467 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9471 bp->link_params.loopback_mode = LOOPBACK_NONE;
9476 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9480 if (!netif_running(bp->dev))
9481 return BNX2X_LOOPBACK_FAILED;
9483 bnx2x_netif_stop(bp, 1);
9484 bnx2x_acquire_phy_lock(bp);
9486 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9488 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9489 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9492 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9494 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9495 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9498 bnx2x_release_phy_lock(bp);
9499 bnx2x_netif_start(bp);
9504 #define CRC32_RESIDUAL 0xdebb20e3
9506 static int bnx2x_test_nvram(struct bnx2x *bp)
9508 static const struct {
9512 { 0, 0x14 }, /* bootstrap */
9513 { 0x14, 0xec }, /* dir */
9514 { 0x100, 0x350 }, /* manuf_info */
9515 { 0x450, 0xf0 }, /* feature_info */
9516 { 0x640, 0x64 }, /* upgrade_key_info */
9518 { 0x708, 0x70 }, /* manuf_key_info */
9522 __be32 buf[0x350 / 4];
9523 u8 *data = (u8 *)buf;
9527 rc = bnx2x_nvram_read(bp, 0, data, 4);
9529 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9530 goto test_nvram_exit;
9533 magic = be32_to_cpu(buf[0]);
9534 if (magic != 0x669955aa) {
9535 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9537 goto test_nvram_exit;
9540 for (i = 0; nvram_tbl[i].size; i++) {
9542 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9546 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9547 goto test_nvram_exit;
9550 csum = ether_crc_le(nvram_tbl[i].size, data);
9551 if (csum != CRC32_RESIDUAL) {
9553 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9555 goto test_nvram_exit;
9563 static int bnx2x_test_intr(struct bnx2x *bp)
9565 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9568 if (!netif_running(bp->dev))
9571 config->hdr.length = 0;
9573 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9575 config->hdr.offset = BP_FUNC(bp);
9576 config->hdr.client_id = bp->fp->cl_id;
9577 config->hdr.reserved1 = 0;
9579 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9580 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9581 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9583 bp->set_mac_pending++;
9584 for (i = 0; i < 10; i++) {
9585 if (!bp->set_mac_pending)
9587 msleep_interruptible(10);
9596 static void bnx2x_self_test(struct net_device *dev,
9597 struct ethtool_test *etest, u64 *buf)
9599 struct bnx2x *bp = netdev_priv(dev);
9601 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9603 if (!netif_running(dev))
9606 /* offline tests are not supported in MF mode */
9608 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9610 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9613 link_up = bp->link_vars.link_up;
9614 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9615 bnx2x_nic_load(bp, LOAD_DIAG);
9616 /* wait until link state is restored */
9617 bnx2x_wait_for_link(bp, link_up);
9619 if (bnx2x_test_registers(bp) != 0) {
9621 etest->flags |= ETH_TEST_FL_FAILED;
9623 if (bnx2x_test_memory(bp) != 0) {
9625 etest->flags |= ETH_TEST_FL_FAILED;
9627 buf[2] = bnx2x_test_loopback(bp, link_up);
9629 etest->flags |= ETH_TEST_FL_FAILED;
9631 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9632 bnx2x_nic_load(bp, LOAD_NORMAL);
9633 /* wait until link state is restored */
9634 bnx2x_wait_for_link(bp, link_up);
9636 if (bnx2x_test_nvram(bp) != 0) {
9638 etest->flags |= ETH_TEST_FL_FAILED;
9640 if (bnx2x_test_intr(bp) != 0) {
9642 etest->flags |= ETH_TEST_FL_FAILED;
9645 if (bnx2x_link_test(bp) != 0) {
9647 etest->flags |= ETH_TEST_FL_FAILED;
9650 #ifdef BNX2X_EXTRA_DEBUG
9651 bnx2x_panic_dump(bp);
9655 static const struct {
9658 u8 string[ETH_GSTRING_LEN];
9659 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9660 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9661 { Q_STATS_OFFSET32(error_bytes_received_hi),
9662 8, "[%d]: rx_error_bytes" },
9663 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9664 8, "[%d]: rx_ucast_packets" },
9665 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9666 8, "[%d]: rx_mcast_packets" },
9667 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9668 8, "[%d]: rx_bcast_packets" },
9669 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9670 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9671 4, "[%d]: rx_phy_ip_err_discards"},
9672 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9673 4, "[%d]: rx_skb_alloc_discard" },
9674 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9676 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9677 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9678 8, "[%d]: tx_packets" }
9681 static const struct {
9685 #define STATS_FLAGS_PORT 1
9686 #define STATS_FLAGS_FUNC 2
9687 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9688 u8 string[ETH_GSTRING_LEN];
9689 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9690 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9691 8, STATS_FLAGS_BOTH, "rx_bytes" },
9692 { STATS_OFFSET32(error_bytes_received_hi),
9693 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9694 { STATS_OFFSET32(total_unicast_packets_received_hi),
9695 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9696 { STATS_OFFSET32(total_multicast_packets_received_hi),
9697 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9698 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9699 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9700 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9701 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9702 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9703 8, STATS_FLAGS_PORT, "rx_align_errors" },
9704 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9705 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9706 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9707 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9708 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9709 8, STATS_FLAGS_PORT, "rx_fragments" },
9710 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9711 8, STATS_FLAGS_PORT, "rx_jabbers" },
9712 { STATS_OFFSET32(no_buff_discard_hi),
9713 8, STATS_FLAGS_BOTH, "rx_discards" },
9714 { STATS_OFFSET32(mac_filter_discard),
9715 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9716 { STATS_OFFSET32(xxoverflow_discard),
9717 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9718 { STATS_OFFSET32(brb_drop_hi),
9719 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9720 { STATS_OFFSET32(brb_truncate_hi),
9721 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9722 { STATS_OFFSET32(pause_frames_received_hi),
9723 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9724 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9725 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9726 { STATS_OFFSET32(nig_timer_max),
9727 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9728 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9729 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9730 { STATS_OFFSET32(rx_skb_alloc_failed),
9731 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9732 { STATS_OFFSET32(hw_csum_err),
9733 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9735 { STATS_OFFSET32(total_bytes_transmitted_hi),
9736 8, STATS_FLAGS_BOTH, "tx_bytes" },
9737 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9738 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9739 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9740 8, STATS_FLAGS_BOTH, "tx_packets" },
9741 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9742 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9743 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9744 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9745 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9746 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9747 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9748 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9749 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9750 8, STATS_FLAGS_PORT, "tx_deferred" },
9751 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9752 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9753 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9754 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9755 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9756 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9757 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9758 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9759 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9760 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9761 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9762 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9763 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9764 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9765 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9766 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9767 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9768 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9769 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9770 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9771 { STATS_OFFSET32(pause_frames_sent_hi),
9772 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9775 #define IS_PORT_STAT(i) \
9776 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9777 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9778 #define IS_E1HMF_MODE_STAT(bp) \
9779 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9781 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9783 struct bnx2x *bp = netdev_priv(dev);
9786 switch (stringset) {
9790 for_each_queue(bp, i) {
9791 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9792 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9793 bnx2x_q_stats_arr[j].string, i);
9794 k += BNX2X_NUM_Q_STATS;
9796 if (IS_E1HMF_MODE_STAT(bp))
9798 for (j = 0; j < BNX2X_NUM_STATS; j++)
9799 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9800 bnx2x_stats_arr[j].string);
9802 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9803 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9805 strcpy(buf + j*ETH_GSTRING_LEN,
9806 bnx2x_stats_arr[i].string);
9813 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9818 static int bnx2x_get_stats_count(struct net_device *dev)
9820 struct bnx2x *bp = netdev_priv(dev);
9824 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9825 if (!IS_E1HMF_MODE_STAT(bp))
9826 num_stats += BNX2X_NUM_STATS;
9828 if (IS_E1HMF_MODE_STAT(bp)) {
9830 for (i = 0; i < BNX2X_NUM_STATS; i++)
9831 if (IS_FUNC_STAT(i))
9834 num_stats = BNX2X_NUM_STATS;
9840 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9841 struct ethtool_stats *stats, u64 *buf)
9843 struct bnx2x *bp = netdev_priv(dev);
9844 u32 *hw_stats, *offset;
9849 for_each_queue(bp, i) {
9850 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9851 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9852 if (bnx2x_q_stats_arr[j].size == 0) {
9853 /* skip this counter */
9857 offset = (hw_stats +
9858 bnx2x_q_stats_arr[j].offset);
9859 if (bnx2x_q_stats_arr[j].size == 4) {
9860 /* 4-byte counter */
9861 buf[k + j] = (u64) *offset;
9864 /* 8-byte counter */
9865 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9867 k += BNX2X_NUM_Q_STATS;
9869 if (IS_E1HMF_MODE_STAT(bp))
9871 hw_stats = (u32 *)&bp->eth_stats;
9872 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9873 if (bnx2x_stats_arr[j].size == 0) {
9874 /* skip this counter */
9878 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9879 if (bnx2x_stats_arr[j].size == 4) {
9880 /* 4-byte counter */
9881 buf[k + j] = (u64) *offset;
9884 /* 8-byte counter */
9885 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9888 hw_stats = (u32 *)&bp->eth_stats;
9889 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9890 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9892 if (bnx2x_stats_arr[i].size == 0) {
9893 /* skip this counter */
9898 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9899 if (bnx2x_stats_arr[i].size == 4) {
9900 /* 4-byte counter */
9901 buf[j] = (u64) *offset;
9905 /* 8-byte counter */
9906 buf[j] = HILO_U64(*offset, *(offset + 1));
9912 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9914 struct bnx2x *bp = netdev_priv(dev);
9915 int port = BP_PORT(bp);
9918 if (!netif_running(dev))
9927 for (i = 0; i < (data * 2); i++) {
9929 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9930 bp->link_params.hw_led_mode,
9931 bp->link_params.chip_id);
9933 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9934 bp->link_params.hw_led_mode,
9935 bp->link_params.chip_id);
9937 msleep_interruptible(500);
9938 if (signal_pending(current))
9942 if (bp->link_vars.link_up)
9943 bnx2x_set_led(bp, port, LED_MODE_OPER,
9944 bp->link_vars.line_speed,
9945 bp->link_params.hw_led_mode,
9946 bp->link_params.chip_id);
9951 static struct ethtool_ops bnx2x_ethtool_ops = {
9952 .get_settings = bnx2x_get_settings,
9953 .set_settings = bnx2x_set_settings,
9954 .get_drvinfo = bnx2x_get_drvinfo,
9955 .get_wol = bnx2x_get_wol,
9956 .set_wol = bnx2x_set_wol,
9957 .get_msglevel = bnx2x_get_msglevel,
9958 .set_msglevel = bnx2x_set_msglevel,
9959 .nway_reset = bnx2x_nway_reset,
9960 .get_link = ethtool_op_get_link,
9961 .get_eeprom_len = bnx2x_get_eeprom_len,
9962 .get_eeprom = bnx2x_get_eeprom,
9963 .set_eeprom = bnx2x_set_eeprom,
9964 .get_coalesce = bnx2x_get_coalesce,
9965 .set_coalesce = bnx2x_set_coalesce,
9966 .get_ringparam = bnx2x_get_ringparam,
9967 .set_ringparam = bnx2x_set_ringparam,
9968 .get_pauseparam = bnx2x_get_pauseparam,
9969 .set_pauseparam = bnx2x_set_pauseparam,
9970 .get_rx_csum = bnx2x_get_rx_csum,
9971 .set_rx_csum = bnx2x_set_rx_csum,
9972 .get_tx_csum = ethtool_op_get_tx_csum,
9973 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9974 .set_flags = bnx2x_set_flags,
9975 .get_flags = ethtool_op_get_flags,
9976 .get_sg = ethtool_op_get_sg,
9977 .set_sg = ethtool_op_set_sg,
9978 .get_tso = ethtool_op_get_tso,
9979 .set_tso = bnx2x_set_tso,
9980 .self_test_count = bnx2x_self_test_count,
9981 .self_test = bnx2x_self_test,
9982 .get_strings = bnx2x_get_strings,
9983 .phys_id = bnx2x_phys_id,
9984 .get_stats_count = bnx2x_get_stats_count,
9985 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9988 /* end of ethtool_ops */
9990 /****************************************************************************
9991 * General service functions
9992 ****************************************************************************/
9994 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10002 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10003 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10004 PCI_PM_CTRL_PME_STATUS));
10006 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10007 /* delay required during transition out of D3hot */
10012 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10016 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10018 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10021 /* No more memory access after this point until
10022 * device is brought back to D0.
10032 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10036 /* Tell compiler that status block fields can change */
10038 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10039 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10041 return (fp->rx_comp_cons != rx_cons_sb);
10045 * net_device service functions
10048 static int bnx2x_poll(struct napi_struct *napi, int budget)
10050 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10052 struct bnx2x *bp = fp->bp;
10055 #ifdef BNX2X_STOP_ON_ERROR
10056 if (unlikely(bp->panic))
10060 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10061 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10062 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10064 bnx2x_update_fpsb_idx(fp);
10066 if (bnx2x_has_tx_work(fp))
10067 bnx2x_tx_int(fp, budget);
10069 if (bnx2x_has_rx_work(fp))
10070 work_done = bnx2x_rx_int(fp, budget);
10072 rmb(); /* BNX2X_HAS_WORK() reads the status block */
10074 /* must not complete if we consumed full budget */
10075 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10077 #ifdef BNX2X_STOP_ON_ERROR
10080 napi_complete(napi);
10082 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10083 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10084 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10085 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10092 /* we split the first BD into headers and data BDs
10093 * to ease the pain of our fellow microcode engineers
10094 * we use one mapping for both BDs
10095 * So far this has only been observed to happen
10096 * in Other Operating Systems(TM)
10098 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10099 struct bnx2x_fastpath *fp,
10100 struct eth_tx_bd **tx_bd, u16 hlen,
10101 u16 bd_prod, int nbd)
10103 struct eth_tx_bd *h_tx_bd = *tx_bd;
10104 struct eth_tx_bd *d_tx_bd;
10105 dma_addr_t mapping;
10106 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10108 /* first fix first BD */
10109 h_tx_bd->nbd = cpu_to_le16(nbd);
10110 h_tx_bd->nbytes = cpu_to_le16(hlen);
10112 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10113 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10114 h_tx_bd->addr_lo, h_tx_bd->nbd);
10116 /* now get a new data BD
10117 * (after the pbd) and fill it */
10118 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10119 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10121 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10122 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10124 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10125 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10126 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10128 /* this marks the BD as one that has no individual mapping
10129 * the FW ignores this flag in a BD not marked start
10131 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10132 DP(NETIF_MSG_TX_QUEUED,
10133 "TSO split data size is %d (%x:%x)\n",
10134 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10136 /* update tx_bd for marking the last BD flag */
10142 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10145 csum = (u16) ~csum_fold(csum_sub(csum,
10146 csum_partial(t_header - fix, fix, 0)));
10149 csum = (u16) ~csum_fold(csum_add(csum,
10150 csum_partial(t_header, -fix, 0)));
10152 return swab16(csum);
10155 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10159 if (skb->ip_summed != CHECKSUM_PARTIAL)
10163 if (skb->protocol == htons(ETH_P_IPV6)) {
10165 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10166 rc |= XMIT_CSUM_TCP;
10170 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10171 rc |= XMIT_CSUM_TCP;
10175 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10178 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10184 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10185 /* check if packet requires linearization (packet is too fragmented)
10186 no need to check fragmentation if page size > 8K (there will be no
10187 violation to FW restrictions) */
10188 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10193 int first_bd_sz = 0;
10195 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10196 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10198 if (xmit_type & XMIT_GSO) {
10199 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10200 /* Check if LSO packet needs to be copied:
10201 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10202 int wnd_size = MAX_FETCH_BD - 3;
10203 /* Number of windows to check */
10204 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10209 /* Headers length */
10210 hlen = (int)(skb_transport_header(skb) - skb->data) +
10213 /* Amount of data (w/o headers) on linear part of SKB*/
10214 first_bd_sz = skb_headlen(skb) - hlen;
10216 wnd_sum = first_bd_sz;
10218 /* Calculate the first sum - it's special */
10219 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10221 skb_shinfo(skb)->frags[frag_idx].size;
10223 /* If there was data on linear skb data - check it */
10224 if (first_bd_sz > 0) {
10225 if (unlikely(wnd_sum < lso_mss)) {
10230 wnd_sum -= first_bd_sz;
10233 /* Others are easier: run through the frag list and
10234 check all windows */
10235 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10237 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10239 if (unlikely(wnd_sum < lso_mss)) {
10244 skb_shinfo(skb)->frags[wnd_idx].size;
10247 /* in non-LSO too fragmented packet should always
10254 if (unlikely(to_copy))
10255 DP(NETIF_MSG_TX_QUEUED,
10256 "Linearization IS REQUIRED for %s packet. "
10257 "num_frags %d hlen %d first_bd_sz %d\n",
10258 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10259 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10265 /* called with netif_tx_lock
10266 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10267 * netif_wake_queue()
10269 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10271 struct bnx2x *bp = netdev_priv(dev);
10272 struct bnx2x_fastpath *fp;
10273 struct netdev_queue *txq;
10274 struct sw_tx_bd *tx_buf;
10275 struct eth_tx_bd *tx_bd;
10276 struct eth_tx_parse_bd *pbd = NULL;
10277 u16 pkt_prod, bd_prod;
10279 dma_addr_t mapping;
10280 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10281 int vlan_off = (bp->e1hov ? 4 : 0);
10285 #ifdef BNX2X_STOP_ON_ERROR
10286 if (unlikely(bp->panic))
10287 return NETDEV_TX_BUSY;
10290 fp_index = skb_get_queue_mapping(skb);
10291 txq = netdev_get_tx_queue(dev, fp_index);
10293 fp = &bp->fp[fp_index];
10295 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10296 fp->eth_q_stats.driver_xoff++,
10297 netif_tx_stop_queue(txq);
10298 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10299 return NETDEV_TX_BUSY;
10302 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10303 " gso type %x xmit_type %x\n",
10304 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10305 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10307 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10308 /* First, check if we need to linearize the skb (due to FW
10309 restrictions). No need to check fragmentation if page size > 8K
10310 (there will be no violation to FW restrictions) */
10311 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10312 /* Statistics of linearization */
10314 if (skb_linearize(skb) != 0) {
10315 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10316 "silently dropping this SKB\n");
10317 dev_kfree_skb_any(skb);
10318 return NETDEV_TX_OK;
10324 Please read carefully. First we use one BD which we mark as start,
10325 then for TSO or xsum we have a parsing info BD,
10326 and only then we have the rest of the TSO BDs.
10327 (don't forget to mark the last one as last,
10328 and to unmap only AFTER you write to the BD ...)
10329 And above all, all pdb sizes are in words - NOT DWORDS!
10332 pkt_prod = fp->tx_pkt_prod++;
10333 bd_prod = TX_BD(fp->tx_bd_prod);
10335 /* get a tx_buf and first BD */
10336 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10337 tx_bd = &fp->tx_desc_ring[bd_prod];
10339 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10340 tx_bd->general_data = (UNICAST_ADDRESS <<
10341 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10343 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10345 /* remember the first BD of the packet */
10346 tx_buf->first_bd = fp->tx_bd_prod;
10349 DP(NETIF_MSG_TX_QUEUED,
10350 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10351 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10354 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10355 (bp->flags & HW_VLAN_TX_FLAG)) {
10356 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10357 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10361 tx_bd->vlan = cpu_to_le16(pkt_prod);
10364 /* turn on parsing and get a BD */
10365 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10366 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10368 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10371 if (xmit_type & XMIT_CSUM) {
10372 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10374 /* for now NS flag is not used in Linux */
10376 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10377 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10379 pbd->ip_hlen = (skb_transport_header(skb) -
10380 skb_network_header(skb)) / 2;
10382 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10384 pbd->total_hlen = cpu_to_le16(hlen);
10385 hlen = hlen*2 - vlan_off;
10387 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10389 if (xmit_type & XMIT_CSUM_V4)
10390 tx_bd->bd_flags.as_bitfield |=
10391 ETH_TX_BD_FLAGS_IP_CSUM;
10393 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10395 if (xmit_type & XMIT_CSUM_TCP) {
10396 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10399 s8 fix = SKB_CS_OFF(skb); /* signed! */
10401 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10402 pbd->cs_offset = fix / 2;
10404 DP(NETIF_MSG_TX_QUEUED,
10405 "hlen %d offset %d fix %d csum before fix %x\n",
10406 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10409 /* HW bug: fixup the CSUM */
10410 pbd->tcp_pseudo_csum =
10411 bnx2x_csum_fix(skb_transport_header(skb),
10414 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10415 pbd->tcp_pseudo_csum);
10419 mapping = pci_map_single(bp->pdev, skb->data,
10420 skb_headlen(skb), PCI_DMA_TODEVICE);
10422 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10423 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10424 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10425 tx_bd->nbd = cpu_to_le16(nbd);
10426 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10428 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10429 " nbytes %d flags %x vlan %x\n",
10430 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10431 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10432 le16_to_cpu(tx_bd->vlan));
10434 if (xmit_type & XMIT_GSO) {
10436 DP(NETIF_MSG_TX_QUEUED,
10437 "TSO packet len %d hlen %d total len %d tso size %d\n",
10438 skb->len, hlen, skb_headlen(skb),
10439 skb_shinfo(skb)->gso_size);
10441 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10443 if (unlikely(skb_headlen(skb) > hlen))
10444 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10447 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10448 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10449 pbd->tcp_flags = pbd_tcp_flags(skb);
10451 if (xmit_type & XMIT_GSO_V4) {
10452 pbd->ip_id = swab16(ip_hdr(skb)->id);
10453 pbd->tcp_pseudo_csum =
10454 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10455 ip_hdr(skb)->daddr,
10456 0, IPPROTO_TCP, 0));
10459 pbd->tcp_pseudo_csum =
10460 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10461 &ipv6_hdr(skb)->daddr,
10462 0, IPPROTO_TCP, 0));
10464 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10467 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10468 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10470 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10471 tx_bd = &fp->tx_desc_ring[bd_prod];
10473 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10474 frag->size, PCI_DMA_TODEVICE);
10476 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10477 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10478 tx_bd->nbytes = cpu_to_le16(frag->size);
10479 tx_bd->vlan = cpu_to_le16(pkt_prod);
10480 tx_bd->bd_flags.as_bitfield = 0;
10482 DP(NETIF_MSG_TX_QUEUED,
10483 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10484 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10485 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10488 /* now at last mark the BD as the last BD */
10489 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10491 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10492 tx_bd, tx_bd->bd_flags.as_bitfield);
10494 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10496 /* now send a tx doorbell, counting the next BD
10497 * if the packet contains or ends with it
10499 if (TX_BD_POFF(bd_prod) < nbd)
10503 DP(NETIF_MSG_TX_QUEUED,
10504 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10505 " tcp_flags %x xsum %x seq %u hlen %u\n",
10506 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10507 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10508 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10510 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10513 * Make sure that the BD data is updated before updating the producer
10514 * since FW might read the BD right after the producer is updated.
10515 * This is only applicable for weak-ordered memory model archs such
10516 * as IA-64. The following barrier is also mandatory since FW will
10517 * assumes packets must have BDs.
10521 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10522 mb(); /* FW restriction: must not reorder writing nbd and packets */
10523 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10524 DOORBELL(bp, fp->index, 0);
10528 fp->tx_bd_prod += nbd;
10529 dev->trans_start = jiffies;
10531 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10532 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10533 if we put Tx into XOFF state. */
10535 netif_tx_stop_queue(txq);
10536 fp->eth_q_stats.driver_xoff++;
10537 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10538 netif_tx_wake_queue(txq);
10542 return NETDEV_TX_OK;
10545 /* called with rtnl_lock */
10546 static int bnx2x_open(struct net_device *dev)
10548 struct bnx2x *bp = netdev_priv(dev);
10550 netif_carrier_off(dev);
10552 bnx2x_set_power_state(bp, PCI_D0);
10554 return bnx2x_nic_load(bp, LOAD_OPEN);
10557 /* called with rtnl_lock */
10558 static int bnx2x_close(struct net_device *dev)
10560 struct bnx2x *bp = netdev_priv(dev);
10562 /* Unload the driver, release IRQs */
10563 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10564 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10565 if (!CHIP_REV_IS_SLOW(bp))
10566 bnx2x_set_power_state(bp, PCI_D3hot);
10571 /* called with netif_tx_lock from dev_mcast.c */
10572 static void bnx2x_set_rx_mode(struct net_device *dev)
10574 struct bnx2x *bp = netdev_priv(dev);
10575 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10576 int port = BP_PORT(bp);
10578 if (bp->state != BNX2X_STATE_OPEN) {
10579 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10583 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10585 if (dev->flags & IFF_PROMISC)
10586 rx_mode = BNX2X_RX_MODE_PROMISC;
10588 else if ((dev->flags & IFF_ALLMULTI) ||
10589 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10590 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10592 else { /* some multicasts */
10593 if (CHIP_IS_E1(bp)) {
10594 int i, old, offset;
10595 struct dev_mc_list *mclist;
10596 struct mac_configuration_cmd *config =
10597 bnx2x_sp(bp, mcast_config);
10599 for (i = 0, mclist = dev->mc_list;
10600 mclist && (i < dev->mc_count);
10601 i++, mclist = mclist->next) {
10603 config->config_table[i].
10604 cam_entry.msb_mac_addr =
10605 swab16(*(u16 *)&mclist->dmi_addr[0]);
10606 config->config_table[i].
10607 cam_entry.middle_mac_addr =
10608 swab16(*(u16 *)&mclist->dmi_addr[2]);
10609 config->config_table[i].
10610 cam_entry.lsb_mac_addr =
10611 swab16(*(u16 *)&mclist->dmi_addr[4]);
10612 config->config_table[i].cam_entry.flags =
10614 config->config_table[i].
10615 target_table_entry.flags = 0;
10616 config->config_table[i].
10617 target_table_entry.client_id = 0;
10618 config->config_table[i].
10619 target_table_entry.vlan_id = 0;
10622 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10623 config->config_table[i].
10624 cam_entry.msb_mac_addr,
10625 config->config_table[i].
10626 cam_entry.middle_mac_addr,
10627 config->config_table[i].
10628 cam_entry.lsb_mac_addr);
10630 old = config->hdr.length;
10632 for (; i < old; i++) {
10633 if (CAM_IS_INVALID(config->
10634 config_table[i])) {
10635 /* already invalidated */
10639 CAM_INVALIDATE(config->
10644 if (CHIP_REV_IS_SLOW(bp))
10645 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10647 offset = BNX2X_MAX_MULTICAST*(1 + port);
10649 config->hdr.length = i;
10650 config->hdr.offset = offset;
10651 config->hdr.client_id = bp->fp->cl_id;
10652 config->hdr.reserved1 = 0;
10654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10655 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10656 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10659 /* Accept one or more multicasts */
10660 struct dev_mc_list *mclist;
10661 u32 mc_filter[MC_HASH_SIZE];
10662 u32 crc, bit, regidx;
10665 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10667 for (i = 0, mclist = dev->mc_list;
10668 mclist && (i < dev->mc_count);
10669 i++, mclist = mclist->next) {
10671 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10674 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10675 bit = (crc >> 24) & 0xff;
10678 mc_filter[regidx] |= (1 << bit);
10681 for (i = 0; i < MC_HASH_SIZE; i++)
10682 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10687 bp->rx_mode = rx_mode;
10688 bnx2x_set_storm_rx_mode(bp);
10691 /* called with rtnl_lock */
10692 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10694 struct sockaddr *addr = p;
10695 struct bnx2x *bp = netdev_priv(dev);
10697 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10700 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10701 if (netif_running(dev)) {
10702 if (CHIP_IS_E1(bp))
10703 bnx2x_set_mac_addr_e1(bp, 1);
10705 bnx2x_set_mac_addr_e1h(bp, 1);
10711 /* called with rtnl_lock */
10712 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10714 struct mii_ioctl_data *data = if_mii(ifr);
10715 struct bnx2x *bp = netdev_priv(dev);
10716 int port = BP_PORT(bp);
10721 data->phy_id = bp->port.phy_addr;
10725 case SIOCGMIIREG: {
10728 if (!netif_running(dev))
10731 mutex_lock(&bp->port.phy_mutex);
10732 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10733 DEFAULT_PHY_DEV_ADDR,
10734 (data->reg_num & 0x1f), &mii_regval);
10735 data->val_out = mii_regval;
10736 mutex_unlock(&bp->port.phy_mutex);
10741 if (!capable(CAP_NET_ADMIN))
10744 if (!netif_running(dev))
10747 mutex_lock(&bp->port.phy_mutex);
10748 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10749 DEFAULT_PHY_DEV_ADDR,
10750 (data->reg_num & 0x1f), data->val_in);
10751 mutex_unlock(&bp->port.phy_mutex);
10759 return -EOPNOTSUPP;
10762 /* called with rtnl_lock */
10763 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10765 struct bnx2x *bp = netdev_priv(dev);
10768 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10769 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10772 /* This does not race with packet allocation
10773 * because the actual alloc size is
10774 * only updated as part of load
10776 dev->mtu = new_mtu;
10778 if (netif_running(dev)) {
10779 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10780 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10786 static void bnx2x_tx_timeout(struct net_device *dev)
10788 struct bnx2x *bp = netdev_priv(dev);
10790 #ifdef BNX2X_STOP_ON_ERROR
10794 /* This allows the netif to be shutdown gracefully before resetting */
10795 schedule_work(&bp->reset_task);
10799 /* called with rtnl_lock */
10800 static void bnx2x_vlan_rx_register(struct net_device *dev,
10801 struct vlan_group *vlgrp)
10803 struct bnx2x *bp = netdev_priv(dev);
10807 /* Set flags according to the required capabilities */
10808 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10810 if (dev->features & NETIF_F_HW_VLAN_TX)
10811 bp->flags |= HW_VLAN_TX_FLAG;
10813 if (dev->features & NETIF_F_HW_VLAN_RX)
10814 bp->flags |= HW_VLAN_RX_FLAG;
10816 if (netif_running(dev))
10817 bnx2x_set_client_config(bp);
10822 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10823 static void poll_bnx2x(struct net_device *dev)
10825 struct bnx2x *bp = netdev_priv(dev);
10827 disable_irq(bp->pdev->irq);
10828 bnx2x_interrupt(bp->pdev->irq, dev);
10829 enable_irq(bp->pdev->irq);
10833 static const struct net_device_ops bnx2x_netdev_ops = {
10834 .ndo_open = bnx2x_open,
10835 .ndo_stop = bnx2x_close,
10836 .ndo_start_xmit = bnx2x_start_xmit,
10837 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10838 .ndo_set_mac_address = bnx2x_change_mac_addr,
10839 .ndo_validate_addr = eth_validate_addr,
10840 .ndo_do_ioctl = bnx2x_ioctl,
10841 .ndo_change_mtu = bnx2x_change_mtu,
10842 .ndo_tx_timeout = bnx2x_tx_timeout,
10844 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10846 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10847 .ndo_poll_controller = poll_bnx2x,
10851 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10852 struct net_device *dev)
10857 SET_NETDEV_DEV(dev, &pdev->dev);
10858 bp = netdev_priv(dev);
10863 bp->func = PCI_FUNC(pdev->devfn);
10865 rc = pci_enable_device(pdev);
10867 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10871 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10872 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10875 goto err_out_disable;
10878 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10879 printk(KERN_ERR PFX "Cannot find second PCI device"
10880 " base address, aborting\n");
10882 goto err_out_disable;
10885 if (atomic_read(&pdev->enable_cnt) == 1) {
10886 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10888 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10890 goto err_out_disable;
10893 pci_set_master(pdev);
10894 pci_save_state(pdev);
10897 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10898 if (bp->pm_cap == 0) {
10899 printk(KERN_ERR PFX "Cannot find power management"
10900 " capability, aborting\n");
10902 goto err_out_release;
10905 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10906 if (bp->pcie_cap == 0) {
10907 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10910 goto err_out_release;
10913 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10914 bp->flags |= USING_DAC_FLAG;
10915 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10916 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10917 " failed, aborting\n");
10919 goto err_out_release;
10922 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10923 printk(KERN_ERR PFX "System does not support DMA,"
10926 goto err_out_release;
10929 dev->mem_start = pci_resource_start(pdev, 0);
10930 dev->base_addr = dev->mem_start;
10931 dev->mem_end = pci_resource_end(pdev, 0);
10933 dev->irq = pdev->irq;
10935 bp->regview = pci_ioremap_bar(pdev, 0);
10936 if (!bp->regview) {
10937 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10939 goto err_out_release;
10942 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10943 min_t(u64, BNX2X_DB_SIZE,
10944 pci_resource_len(pdev, 2)));
10945 if (!bp->doorbells) {
10946 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10948 goto err_out_unmap;
10951 bnx2x_set_power_state(bp, PCI_D0);
10953 /* clean indirect addresses */
10954 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10955 PCICFG_VENDOR_ID_OFFSET);
10956 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10957 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10958 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10959 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10961 dev->watchdog_timeo = TX_TIMEOUT;
10963 dev->netdev_ops = &bnx2x_netdev_ops;
10964 dev->ethtool_ops = &bnx2x_ethtool_ops;
10965 dev->features |= NETIF_F_SG;
10966 dev->features |= NETIF_F_HW_CSUM;
10967 if (bp->flags & USING_DAC_FLAG)
10968 dev->features |= NETIF_F_HIGHDMA;
10970 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10971 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10973 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10974 dev->features |= NETIF_F_TSO6;
10980 iounmap(bp->regview);
10981 bp->regview = NULL;
10983 if (bp->doorbells) {
10984 iounmap(bp->doorbells);
10985 bp->doorbells = NULL;
10989 if (atomic_read(&pdev->enable_cnt) == 1)
10990 pci_release_regions(pdev);
10993 pci_disable_device(pdev);
10994 pci_set_drvdata(pdev, NULL);
11000 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11002 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11004 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11008 /* return value of 1=2.5GHz 2=5GHz */
11009 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11011 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11013 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11017 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11018 const struct pci_device_id *ent)
11020 static int version_printed;
11021 struct net_device *dev = NULL;
11025 if (version_printed++ == 0)
11026 printk(KERN_INFO "%s", version);
11028 /* dev zeroed in init_etherdev */
11029 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11031 printk(KERN_ERR PFX "Cannot allocate net device\n");
11035 bp = netdev_priv(dev);
11036 bp->msglevel = debug;
11038 rc = bnx2x_init_dev(pdev, dev);
11044 pci_set_drvdata(pdev, dev);
11046 rc = bnx2x_init_bp(bp);
11048 goto init_one_exit;
11050 rc = register_netdev(dev);
11052 dev_err(&pdev->dev, "Cannot register net device\n");
11053 goto init_one_exit;
11056 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11057 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11058 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11059 bnx2x_get_pcie_width(bp),
11060 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11061 dev->base_addr, bp->pdev->irq);
11062 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11067 iounmap(bp->regview);
11070 iounmap(bp->doorbells);
11074 if (atomic_read(&pdev->enable_cnt) == 1)
11075 pci_release_regions(pdev);
11077 pci_disable_device(pdev);
11078 pci_set_drvdata(pdev, NULL);
11083 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11085 struct net_device *dev = pci_get_drvdata(pdev);
11089 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11092 bp = netdev_priv(dev);
11094 unregister_netdev(dev);
11097 iounmap(bp->regview);
11100 iounmap(bp->doorbells);
11104 if (atomic_read(&pdev->enable_cnt) == 1)
11105 pci_release_regions(pdev);
11107 pci_disable_device(pdev);
11108 pci_set_drvdata(pdev, NULL);
11111 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11113 struct net_device *dev = pci_get_drvdata(pdev);
11117 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11120 bp = netdev_priv(dev);
11124 pci_save_state(pdev);
11126 if (!netif_running(dev)) {
11131 netif_device_detach(dev);
11133 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11135 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11142 static int bnx2x_resume(struct pci_dev *pdev)
11144 struct net_device *dev = pci_get_drvdata(pdev);
11149 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11152 bp = netdev_priv(dev);
11156 pci_restore_state(pdev);
11158 if (!netif_running(dev)) {
11163 bnx2x_set_power_state(bp, PCI_D0);
11164 netif_device_attach(dev);
11166 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11173 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11177 bp->state = BNX2X_STATE_ERROR;
11179 bp->rx_mode = BNX2X_RX_MODE_NONE;
11181 bnx2x_netif_stop(bp, 0);
11183 del_timer_sync(&bp->timer);
11184 bp->stats_state = STATS_STATE_DISABLED;
11185 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11188 bnx2x_free_irq(bp);
11190 if (CHIP_IS_E1(bp)) {
11191 struct mac_configuration_cmd *config =
11192 bnx2x_sp(bp, mcast_config);
11194 for (i = 0; i < config->hdr.length; i++)
11195 CAM_INVALIDATE(config->config_table[i]);
11198 /* Free SKBs, SGEs, TPA pool and driver internals */
11199 bnx2x_free_skbs(bp);
11200 for_each_rx_queue(bp, i)
11201 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11202 for_each_rx_queue(bp, i)
11203 netif_napi_del(&bnx2x_fp(bp, i, napi));
11204 bnx2x_free_mem(bp);
11206 bp->state = BNX2X_STATE_CLOSED;
11208 netif_carrier_off(bp->dev);
11213 static void bnx2x_eeh_recover(struct bnx2x *bp)
11217 mutex_init(&bp->port.phy_mutex);
11219 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11220 bp->link_params.shmem_base = bp->common.shmem_base;
11221 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11223 if (!bp->common.shmem_base ||
11224 (bp->common.shmem_base < 0xA0000) ||
11225 (bp->common.shmem_base >= 0xC0000)) {
11226 BNX2X_DEV_INFO("MCP not active\n");
11227 bp->flags |= NO_MCP_FLAG;
11231 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11232 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11233 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11234 BNX2X_ERR("BAD MCP validity signature\n");
11236 if (!BP_NOMCP(bp)) {
11237 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11238 & DRV_MSG_SEQ_NUMBER_MASK);
11239 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11244 * bnx2x_io_error_detected - called when PCI error is detected
11245 * @pdev: Pointer to PCI device
11246 * @state: The current pci connection state
11248 * This function is called after a PCI bus error affecting
11249 * this device has been detected.
11251 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11252 pci_channel_state_t state)
11254 struct net_device *dev = pci_get_drvdata(pdev);
11255 struct bnx2x *bp = netdev_priv(dev);
11259 netif_device_detach(dev);
11261 if (netif_running(dev))
11262 bnx2x_eeh_nic_unload(bp);
11264 pci_disable_device(pdev);
11268 /* Request a slot reset */
11269 return PCI_ERS_RESULT_NEED_RESET;
11273 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11274 * @pdev: Pointer to PCI device
11276 * Restart the card from scratch, as if from a cold-boot.
11278 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11280 struct net_device *dev = pci_get_drvdata(pdev);
11281 struct bnx2x *bp = netdev_priv(dev);
11285 if (pci_enable_device(pdev)) {
11286 dev_err(&pdev->dev,
11287 "Cannot re-enable PCI device after reset\n");
11289 return PCI_ERS_RESULT_DISCONNECT;
11292 pci_set_master(pdev);
11293 pci_restore_state(pdev);
11295 if (netif_running(dev))
11296 bnx2x_set_power_state(bp, PCI_D0);
11300 return PCI_ERS_RESULT_RECOVERED;
11304 * bnx2x_io_resume - called when traffic can start flowing again
11305 * @pdev: Pointer to PCI device
11307 * This callback is called when the error recovery driver tells us that
11308 * its OK to resume normal operation.
11310 static void bnx2x_io_resume(struct pci_dev *pdev)
11312 struct net_device *dev = pci_get_drvdata(pdev);
11313 struct bnx2x *bp = netdev_priv(dev);
11317 bnx2x_eeh_recover(bp);
11319 if (netif_running(dev))
11320 bnx2x_nic_load(bp, LOAD_NORMAL);
11322 netif_device_attach(dev);
11327 static struct pci_error_handlers bnx2x_err_handler = {
11328 .error_detected = bnx2x_io_error_detected,
11329 .slot_reset = bnx2x_io_slot_reset,
11330 .resume = bnx2x_io_resume,
11333 static struct pci_driver bnx2x_pci_driver = {
11334 .name = DRV_MODULE_NAME,
11335 .id_table = bnx2x_pci_tbl,
11336 .probe = bnx2x_init_one,
11337 .remove = __devexit_p(bnx2x_remove_one),
11338 .suspend = bnx2x_suspend,
11339 .resume = bnx2x_resume,
11340 .err_handler = &bnx2x_err_handler,
11343 static int __init bnx2x_init(void)
11345 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11346 if (bnx2x_wq == NULL) {
11347 printk(KERN_ERR PFX "Cannot create workqueue\n");
11351 return pci_register_driver(&bnx2x_pci_driver);
11354 static void __exit bnx2x_cleanup(void)
11356 pci_unregister_driver(&bnx2x_pci_driver);
11358 destroy_workqueue(bnx2x_wq);
11361 module_init(bnx2x_init);
11362 module_exit(bnx2x_cleanup);