1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 /* enable nig attention */
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
740 /* Tell compiler that status block fields can change */
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return (fp->tx_pkt_cons != tx_cons_sb);
746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
748 /* Tell compiler that consumer and producer can change */
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
754 /* free skb in the packet ring at pos idx
755 * return idx of last bd freed
757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761 struct eth_tx_bd *tx_bd;
762 struct sk_buff *skb = tx_buf->skb;
763 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
766 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
770 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771 tx_bd = &fp->tx_desc_ring[bd_idx];
772 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
775 nbd = le16_to_cpu(tx_bd->nbd) - 1;
776 new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778 if (nbd > (MAX_SKB_FRAGS + 2)) {
779 BNX2X_ERR("BAD nbd!\n");
784 /* Skip a parse bd and the TSO split header bd
785 since they have no mapping */
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790 ETH_TX_BD_FLAGS_TCP_CSUM |
791 ETH_TX_BD_FLAGS_SW_LSO)) {
793 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 /* is this a TSO split header bd? */
796 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
805 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806 tx_bd = &fp->tx_desc_ring[bd_idx];
807 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
810 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
816 tx_buf->first_bd = 0;
822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
828 barrier(); /* Tell compiler that prod and cons can change */
829 prod = fp->tx_bd_prod;
830 cons = fp->tx_bd_cons;
832 /* NUM_TX_RINGS = number of "next-page" entries
833 It will be used as a threshold */
834 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
836 #ifdef BNX2X_STOP_ON_ERROR
838 WARN_ON(used > fp->bp->tx_ring_size);
839 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
842 return (s16)(fp->bp->tx_ring_size) - used;
845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
847 struct bnx2x *bp = fp->bp;
848 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
856 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 sw_cons = fp->tx_pkt_cons;
859 while (sw_cons != hw_cons) {
862 pkt_cons = TX_BD(sw_cons);
864 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
866 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
867 hw_cons, sw_cons, pkt_cons);
869 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
871 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
874 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
882 fp->tx_pkt_cons = sw_cons;
883 fp->tx_bd_cons = bd_cons;
885 /* Need to make the tx_cons update visible to start_xmit()
886 * before checking for netif_queue_stopped(). Without the
887 * memory barrier, there is a small possibility that start_xmit()
888 * will miss it and cause the queue to be stopped forever.
892 /* TBD need a thresh? */
893 if (unlikely(netif_queue_stopped(bp->dev))) {
895 netif_tx_lock(bp->dev);
897 if (netif_queue_stopped(bp->dev) &&
898 (bp->state == BNX2X_STATE_OPEN) &&
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
902 netif_tx_unlock(bp->dev);
907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 union eth_rx_cqe *rr_cqe)
910 struct bnx2x *bp = fp->bp;
911 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
915 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
916 FP_IDX(fp), cid, command, bp->state,
917 rr_cqe->ramrod_cqe.ramrod_type);
922 switch (command | fp->state) {
923 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 BNX2X_FP_STATE_OPENING):
925 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
927 fp->state = BNX2X_FP_STATE_OPEN;
930 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
933 fp->state = BNX2X_FP_STATE_HALTED;
937 BNX2X_ERR("unexpected MC reply (%d) "
938 "fp->state is %x\n", command, fp->state);
941 mb(); /* force bnx2x_wait_ramrod() to see the change */
945 switch (command | bp->state) {
946 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 bp->state = BNX2X_STATE_OPEN;
951 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 fp->state = BNX2X_FP_STATE_HALTED;
957 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966 bp->set_mac_pending = 0;
969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
978 mb(); /* force bnx2x_wait_ramrod() to see the change */
981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
988 /* Skip "next page" elements */
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1018 if (unlikely(page == NULL))
1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022 PCI_DMA_FROMDEVICE);
1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1065 /* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1093 u16 last_max = fp->last_max_sge;
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1116 struct bnx2x *bp = fp->bp;
1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
1120 u16 last_max, last_elem, first_elem;
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1172 /* Clear the two last indices in the page to 1:
1173 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1207 #ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1214 fp->tpa_queue_used);
1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1223 struct sw_rx_page *rx_pg, old_rx_pg;
1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 u32 i, frag_len, frag_size, pages;
1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1232 /* This is needed in order to enable forwarding support */
1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235 max(frag_size, (u32)len_on_bd));
1237 #ifdef BNX2X_STOP_ON_ERROR
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256 rx_pg = &fp->rx_page_ring[sge_idx];
1259 /* If we fail to allocate a substitute page, we simply stop
1260 where we are and drop the whole packet */
1261 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 if (unlikely(err)) {
1263 bp->eth_stats.rx_skb_alloc_failed++;
1267 /* Unmap the page as we r going to pass it to the stack */
1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1271 /* Add one frag and update the appropriate fields in the skb */
1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1274 skb->data_len += frag_len;
1275 skb->truesize += frag_len;
1276 skb->len += frag_len;
1278 frag_size -= frag_len;
1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1288 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 struct sk_buff *skb = rx_buf->skb;
1291 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1293 /* Unmap skb in the pool anyway, as we are going to change
1294 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1296 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1299 if (likely(new_skb)) {
1300 /* fix ip xsum and give it to the stack */
1301 /* (no need to map the new skb) */
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1311 prefetch(((char *)(skb)) + 128);
1313 #ifdef BNX2X_STOP_ON_ERROR
1314 if (pad + len > bp->rx_buf_size) {
1315 BNX2X_ERR("skb_put is about to fail... "
1316 "pad %d len %d rx_buf_size %d\n",
1317 pad, len, bp->rx_buf_size);
1323 skb_reserve(skb, pad);
1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
1328 skb_record_rx_queue(skb, queue);
1333 iph = (struct iphdr *)skb->data;
1335 /* If there is no Rx VLAN offloading -
1336 take VLAN tag into an account */
1337 if (unlikely(is_not_hwaccel_vlan_cqe))
1338 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1341 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1344 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1345 &cqe->fast_path_cqe, cqe_idx)) {
1347 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1348 (!is_not_hwaccel_vlan_cqe))
1349 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1350 le16_to_cpu(cqe->fast_path_cqe.
1354 netif_receive_skb(skb);
1356 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1357 " - dropping packet!\n");
1362 /* put new skb in bin */
1363 fp->tpa_pool[queue].skb = new_skb;
1366 /* else drop the packet and keep the buffer in the bin */
1367 DP(NETIF_MSG_RX_STATUS,
1368 "Failed to allocate new skb - dropping packet!\n");
1369 bp->eth_stats.rx_skb_alloc_failed++;
1372 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1375 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1376 struct bnx2x_fastpath *fp,
1377 u16 bd_prod, u16 rx_comp_prod,
1380 struct tstorm_eth_rx_producers rx_prods = {0};
1383 /* Update producers */
1384 rx_prods.bd_prod = bd_prod;
1385 rx_prods.cqe_prod = rx_comp_prod;
1386 rx_prods.sge_prod = rx_sge_prod;
1389 * Make sure that the BD and SGE data is updated before updating the
1390 * producers since FW might read the BD/SGE right after the producer
1392 * This is only applicable for weak-ordered memory model archs such
1393 * as IA-64. The following barrier is also mandatory since FW will
1394 * assumes BDs must have buffers.
1398 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1399 REG_WR(bp, BAR_TSTRORM_INTMEM +
1400 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1401 ((u32 *)&rx_prods)[i]);
1403 mmiowb(); /* keep prod updates ordered */
1405 DP(NETIF_MSG_RX_STATUS,
1406 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1407 bd_prod, rx_comp_prod, rx_sge_prod);
1410 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1412 struct bnx2x *bp = fp->bp;
1413 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1414 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1417 #ifdef BNX2X_STOP_ON_ERROR
1418 if (unlikely(bp->panic))
1422 /* CQ "next element" is of the size of the regular element,
1423 that's why it's ok here */
1424 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1425 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1428 bd_cons = fp->rx_bd_cons;
1429 bd_prod = fp->rx_bd_prod;
1430 bd_prod_fw = bd_prod;
1431 sw_comp_cons = fp->rx_comp_cons;
1432 sw_comp_prod = fp->rx_comp_prod;
1434 /* Memory barrier necessary as speculative reads of the rx
1435 * buffer can be ahead of the index in the status block
1439 DP(NETIF_MSG_RX_STATUS,
1440 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1441 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1443 while (sw_comp_cons != hw_comp_cons) {
1444 struct sw_rx_bd *rx_buf = NULL;
1445 struct sk_buff *skb;
1446 union eth_rx_cqe *cqe;
1450 comp_ring_cons = RCQ_BD(sw_comp_cons);
1451 bd_prod = RX_BD(bd_prod);
1452 bd_cons = RX_BD(bd_cons);
1454 cqe = &fp->rx_comp_ring[comp_ring_cons];
1455 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1457 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1458 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1459 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1460 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1461 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1462 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1464 /* is this a slowpath msg? */
1465 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1466 bnx2x_sp_event(fp, cqe);
1469 /* this is an rx packet */
1471 rx_buf = &fp->rx_buf_ring[bd_cons];
1473 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1474 pad = cqe->fast_path_cqe.placement_offset;
1476 /* If CQE is marked both TPA_START and TPA_END
1477 it is a non-TPA CQE */
1478 if ((!fp->disable_tpa) &&
1479 (TPA_TYPE(cqe_fp_flags) !=
1480 (TPA_TYPE_START | TPA_TYPE_END))) {
1481 u16 queue = cqe->fast_path_cqe.queue_index;
1483 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1484 DP(NETIF_MSG_RX_STATUS,
1485 "calling tpa_start on queue %d\n",
1488 bnx2x_tpa_start(fp, queue, skb,
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_stop on queue %d\n",
1498 if (!BNX2X_RX_SUM_FIX(cqe))
1499 BNX2X_ERR("STOP on none TCP "
1502 /* This is a size of the linear data
1504 len = le16_to_cpu(cqe->fast_path_cqe.
1506 bnx2x_tpa_stop(bp, fp, queue, pad,
1507 len, cqe, comp_ring_cons);
1508 #ifdef BNX2X_STOP_ON_ERROR
1513 bnx2x_update_sge_prod(fp,
1514 &cqe->fast_path_cqe);
1519 pci_dma_sync_single_for_device(bp->pdev,
1520 pci_unmap_addr(rx_buf, mapping),
1521 pad + RX_COPY_THRESH,
1522 PCI_DMA_FROMDEVICE);
1524 prefetch(((char *)(skb)) + 128);
1526 /* is this an error packet? */
1527 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1528 DP(NETIF_MSG_RX_ERR,
1529 "ERROR flags %x rx packet %u\n",
1530 cqe_fp_flags, sw_comp_cons);
1531 bp->eth_stats.rx_err_discard_pkt++;
1535 /* Since we don't have a jumbo ring
1536 * copy small packets if mtu > 1500
1538 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1539 (len <= RX_COPY_THRESH)) {
1540 struct sk_buff *new_skb;
1542 new_skb = netdev_alloc_skb(bp->dev,
1544 if (new_skb == NULL) {
1545 DP(NETIF_MSG_RX_ERR,
1546 "ERROR packet dropped "
1547 "because of alloc failure\n");
1548 bp->eth_stats.rx_skb_alloc_failed++;
1553 skb_copy_from_linear_data_offset(skb, pad,
1554 new_skb->data + pad, len);
1555 skb_reserve(new_skb, pad);
1556 skb_put(new_skb, len);
1558 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1562 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1563 pci_unmap_single(bp->pdev,
1564 pci_unmap_addr(rx_buf, mapping),
1566 PCI_DMA_FROMDEVICE);
1567 skb_reserve(skb, pad);
1571 DP(NETIF_MSG_RX_ERR,
1572 "ERROR packet dropped because "
1573 "of alloc failure\n");
1574 bp->eth_stats.rx_skb_alloc_failed++;
1576 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1580 skb->protocol = eth_type_trans(skb, bp->dev);
1582 skb->ip_summed = CHECKSUM_NONE;
1584 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1587 bp->eth_stats.hw_csum_err++;
1592 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1593 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1594 PARSING_FLAGS_VLAN))
1595 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1596 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1599 netif_receive_skb(skb);
1605 bd_cons = NEXT_RX_IDX(bd_cons);
1606 bd_prod = NEXT_RX_IDX(bd_prod);
1607 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1610 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1611 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1613 if (rx_pkt == budget)
1617 fp->rx_bd_cons = bd_cons;
1618 fp->rx_bd_prod = bd_prod_fw;
1619 fp->rx_comp_cons = sw_comp_cons;
1620 fp->rx_comp_prod = sw_comp_prod;
1622 /* Update producers */
1623 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1626 fp->rx_pkt += rx_pkt;
1632 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1634 struct bnx2x_fastpath *fp = fp_cookie;
1635 struct bnx2x *bp = fp->bp;
1636 int index = FP_IDX(fp);
1638 /* Return here if interrupt is disabled */
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1644 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1645 index, FP_SB_ID(fp));
1646 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1648 #ifdef BNX2X_STOP_ON_ERROR
1649 if (unlikely(bp->panic))
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658 napi_schedule(&bnx2x_fp(bp, index, napi));
1663 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1665 struct net_device *dev = dev_instance;
1666 struct bnx2x *bp = netdev_priv(dev);
1667 u16 status = bnx2x_ack_int(bp);
1670 /* Return here if interrupt is shared and it's not for us */
1671 if (unlikely(status == 0)) {
1672 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1675 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1677 /* Return here if interrupt is disabled */
1678 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1679 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1683 #ifdef BNX2X_STOP_ON_ERROR
1684 if (unlikely(bp->panic))
1688 mask = 0x2 << bp->fp[0].sb_id;
1689 if (status & mask) {
1690 struct bnx2x_fastpath *fp = &bp->fp[0];
1692 prefetch(fp->rx_cons_sb);
1693 prefetch(fp->tx_cons_sb);
1694 prefetch(&fp->status_blk->c_status_block.status_block_index);
1695 prefetch(&fp->status_blk->u_status_block.status_block_index);
1697 napi_schedule(&bnx2x_fp(bp, 0, napi));
1703 if (unlikely(status & 0x1)) {
1704 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1712 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1718 /* end of fast path */
1720 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1725 * General service functions
1728 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1731 u32 resource_bit = (1 << resource);
1732 int func = BP_FUNC(bp);
1733 u32 hw_lock_control_reg;
1736 /* Validating that the resource is within range */
1737 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1739 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1740 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1745 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1747 hw_lock_control_reg =
1748 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1751 /* Validating that the resource is not already taken */
1752 lock_status = REG_RD(bp, hw_lock_control_reg);
1753 if (lock_status & resource_bit) {
1754 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1755 lock_status, resource_bit);
1759 /* Try for 5 second every 5ms */
1760 for (cnt = 0; cnt < 1000; cnt++) {
1761 /* Try to acquire the lock */
1762 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1763 lock_status = REG_RD(bp, hw_lock_control_reg);
1764 if (lock_status & resource_bit)
1769 DP(NETIF_MSG_HW, "Timeout\n");
1773 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1776 u32 resource_bit = (1 << resource);
1777 int func = BP_FUNC(bp);
1778 u32 hw_lock_control_reg;
1780 /* Validating that the resource is within range */
1781 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1783 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1784 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1789 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1791 hw_lock_control_reg =
1792 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1795 /* Validating that the resource is currently taken */
1796 lock_status = REG_RD(bp, hw_lock_control_reg);
1797 if (!(lock_status & resource_bit)) {
1798 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1799 lock_status, resource_bit);
1803 REG_WR(bp, hw_lock_control_reg, resource_bit);
1807 /* HW Lock for shared dual port PHYs */
1808 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1810 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1812 mutex_lock(&bp->port.phy_mutex);
1814 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1815 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1819 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1821 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1827 mutex_unlock(&bp->port.phy_mutex);
1830 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1832 /* The GPIO should be swapped if swap register is set and active */
1833 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1834 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1835 int gpio_shift = gpio_num +
1836 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1837 u32 gpio_mask = (1 << gpio_shift);
1840 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1841 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1846 /* read GPIO and mask except the float bits */
1847 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1850 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1851 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1852 gpio_num, gpio_shift);
1853 /* clear FLOAT and set CLR */
1854 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1858 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1859 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1860 gpio_num, gpio_shift);
1861 /* clear FLOAT and set SET */
1862 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1866 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1867 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1868 gpio_num, gpio_shift);
1870 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1877 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1878 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1883 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1885 u32 spio_mask = (1 << spio_num);
1888 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1889 (spio_num > MISC_REGISTERS_SPIO_7)) {
1890 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1895 /* read SPIO and mask except the float bits */
1896 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1899 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1900 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1901 /* clear FLOAT and set CLR */
1902 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1903 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1906 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1907 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1908 /* clear FLOAT and set SET */
1909 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1910 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1913 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1914 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1916 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1923 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1929 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1931 switch (bp->link_vars.ieee_fc &
1932 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1934 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1937 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1938 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1941 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1942 bp->port.advertising |= ADVERTISED_Asym_Pause;
1945 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1951 static void bnx2x_link_report(struct bnx2x *bp)
1953 if (bp->link_vars.link_up) {
1954 if (bp->state == BNX2X_STATE_OPEN)
1955 netif_carrier_on(bp->dev);
1956 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1958 printk("%d Mbps ", bp->link_vars.line_speed);
1960 if (bp->link_vars.duplex == DUPLEX_FULL)
1961 printk("full duplex");
1963 printk("half duplex");
1965 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1966 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1967 printk(", receive ");
1968 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1969 printk("& transmit ");
1971 printk(", transmit ");
1973 printk("flow control ON");
1977 } else { /* link_down */
1978 netif_carrier_off(bp->dev);
1979 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1983 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1985 if (!BP_NOMCP(bp)) {
1988 /* Initialize link parameters structure variables */
1989 /* It is recommended to turn off RX FC for jumbo frames
1990 for better performance */
1992 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1993 else if (bp->dev->mtu > 5000)
1994 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1996 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1998 bnx2x_acquire_phy_lock(bp);
1999 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000 bnx2x_release_phy_lock(bp);
2002 bnx2x_calc_fc_adv(bp);
2004 if (bp->link_vars.link_up)
2005 bnx2x_link_report(bp);
2010 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2014 static void bnx2x_link_set(struct bnx2x *bp)
2016 if (!BP_NOMCP(bp)) {
2017 bnx2x_acquire_phy_lock(bp);
2018 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2019 bnx2x_release_phy_lock(bp);
2021 bnx2x_calc_fc_adv(bp);
2023 BNX2X_ERR("Bootcode is missing -not setting link\n");
2026 static void bnx2x__link_reset(struct bnx2x *bp)
2028 if (!BP_NOMCP(bp)) {
2029 bnx2x_acquire_phy_lock(bp);
2030 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2031 bnx2x_release_phy_lock(bp);
2033 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2036 static u8 bnx2x_link_test(struct bnx2x *bp)
2040 bnx2x_acquire_phy_lock(bp);
2041 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2042 bnx2x_release_phy_lock(bp);
2047 /* Calculates the sum of vn_min_rates.
2048 It's needed for further normalizing of the min_rates.
2053 0 - if all the min_rates are 0.
2054 In the later case fairness algorithm should be deactivated.
2055 If not all min_rates are zero then those that are zeroes will
2058 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2060 int i, port = BP_PORT(bp);
2064 for (i = 0; i < E1HVN_MAX; i++) {
2066 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2067 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2068 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2069 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2070 /* If min rate is zero - set it to 1 */
2072 vn_min_rate = DEF_MIN_RATE;
2076 wsum += vn_min_rate;
2080 /* ... only if all min rates are zeros - disable FAIRNESS */
2087 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2090 struct cmng_struct_per_port *m_cmng_port)
2092 u32 r_param = port_rate / 8;
2093 int port = BP_PORT(bp);
2096 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2098 /* Enable minmax only if we are in e1hmf mode */
2100 u32 fair_periodic_timeout_usec;
2103 /* Enable rate shaping and fairness */
2104 m_cmng_port->flags.cmng_vn_enable = 1;
2105 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2106 m_cmng_port->flags.rate_shaping_enable = 1;
2109 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2110 " fairness will be disabled\n");
2112 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2113 m_cmng_port->rs_vars.rs_periodic_timeout =
2114 RS_PERIODIC_TIMEOUT_USEC / 4;
2116 /* this is the threshold below which no timer arming will occur
2117 1.25 coefficient is for the threshold to be a little bigger
2118 than the real time, to compensate for timer in-accuracy */
2119 m_cmng_port->rs_vars.rs_threshold =
2120 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2122 /* resolution of fairness timer */
2123 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2124 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2125 t_fair = T_FAIR_COEF / port_rate;
2127 /* this is the threshold below which we won't arm
2128 the timer anymore */
2129 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2131 /* we multiply by 1e3/8 to get bytes/msec.
2132 We don't want the credits to pass a credit
2133 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2134 m_cmng_port->fair_vars.upper_bound =
2135 r_param * t_fair * FAIR_MEM;
2136 /* since each tick is 4 usec */
2137 m_cmng_port->fair_vars.fairness_timeout =
2138 fair_periodic_timeout_usec / 4;
2141 /* Disable rate shaping and fairness */
2142 m_cmng_port->flags.cmng_vn_enable = 0;
2143 m_cmng_port->flags.fairness_enable = 0;
2144 m_cmng_port->flags.rate_shaping_enable = 0;
2147 "Single function mode minmax will be disabled\n");
2150 /* Store it to internal memory */
2151 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2154 ((u32 *)(m_cmng_port))[i]);
2157 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2158 u32 wsum, u16 port_rate,
2159 struct cmng_struct_per_port *m_cmng_port)
2161 struct rate_shaping_vars_per_vn m_rs_vn;
2162 struct fairness_vars_per_vn m_fair_vn;
2163 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2164 u16 vn_min_rate, vn_max_rate;
2167 /* If function is hidden - set min and max to zeroes */
2168 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2173 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2174 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2175 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2176 if current min rate is zero - set it to 1.
2177 This is a requirement of the algorithm. */
2178 if ((vn_min_rate == 0) && wsum)
2179 vn_min_rate = DEF_MIN_RATE;
2180 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2181 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2184 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2185 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2187 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2188 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2190 /* global vn counter - maximal Mbps for this vn */
2191 m_rs_vn.vn_counter.rate = vn_max_rate;
2193 /* quota - number of bytes transmitted in this period */
2194 m_rs_vn.vn_counter.quota =
2195 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2197 #ifdef BNX2X_PER_PROT_QOS
2198 /* per protocol counter */
2199 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2200 /* maximal Mbps for this protocol */
2201 m_rs_vn.protocol_counters[protocol].rate =
2202 protocol_max_rate[protocol];
2203 /* the quota in each timer period -
2204 number of bytes transmitted in this period */
2205 m_rs_vn.protocol_counters[protocol].quota =
2206 (u32)(rs_periodic_timeout_usec *
2208 protocol_counters[protocol].rate/8));
2213 /* credit for each period of the fairness algorithm:
2214 number of bytes in T_FAIR (the vn share the port rate).
2215 wsum should not be larger than 10000, thus
2216 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2217 m_fair_vn.vn_credit_delta =
2218 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2219 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2220 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2221 m_fair_vn.vn_credit_delta);
2224 #ifdef BNX2X_PER_PROT_QOS
2226 u32 protocolWeightSum = 0;
2228 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2229 protocolWeightSum +=
2230 drvInit.protocol_min_rate[protocol];
2231 /* per protocol counter -
2232 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2233 if (protocolWeightSum > 0) {
2235 protocol < NUM_OF_PROTOCOLS; protocol++)
2236 /* credit for each period of the
2237 fairness algorithm - number of bytes in
2238 T_FAIR (the protocol share the vn rate) */
2239 m_fair_vn.protocol_credit_delta[protocol] =
2240 (u32)((vn_min_rate / 8) * t_fair *
2241 protocol_min_rate / protocolWeightSum);
2246 /* Store it to internal memory */
2247 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250 ((u32 *)(&m_rs_vn))[i]);
2252 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255 ((u32 *)(&m_fair_vn))[i]);
2258 /* This function is called upon link interrupt */
2259 static void bnx2x_link_attn(struct bnx2x *bp)
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2268 if (bp->link_vars.link_up) {
2270 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2271 struct host_port_stats *pstats;
2273 pstats = bnx2x_sp(bp, port_stats);
2274 /* reset old bmac stats */
2275 memset(&(pstats->mac_stx[0]), 0,
2276 sizeof(struct mac_stx));
2278 if ((bp->state == BNX2X_STATE_OPEN) ||
2279 (bp->state == BNX2X_STATE_DISABLED))
2280 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283 /* indicate link status */
2284 bnx2x_link_report(bp);
2289 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2290 if (vn == BP_E1HVN(bp))
2293 func = ((vn << 1) | BP_PORT(bp));
2295 /* Set the attention towards other drivers
2297 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2298 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2302 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2303 struct cmng_struct_per_port m_cmng_port;
2305 int port = BP_PORT(bp);
2307 /* Init RATE SHAPING and FAIRNESS contexts */
2308 wsum = bnx2x_calc_vn_wsum(bp);
2309 bnx2x_init_port_minmax(bp, (int)wsum,
2310 bp->link_vars.line_speed,
2313 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2314 bnx2x_init_vn_minmax(bp, 2*vn + port,
2315 wsum, bp->link_vars.line_speed,
2320 static void bnx2x__link_status_update(struct bnx2x *bp)
2322 if (bp->state != BNX2X_STATE_OPEN)
2325 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2327 if (bp->link_vars.link_up)
2328 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2330 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2332 /* indicate link status */
2333 bnx2x_link_report(bp);
2336 static void bnx2x_pmf_update(struct bnx2x *bp)
2338 int port = BP_PORT(bp);
2342 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2344 /* enable nig attention */
2345 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2346 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2347 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2349 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2357 * General service functions
2360 /* the slow path queue is odd since completions arrive on the fastpath ring */
2361 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2362 u32 data_hi, u32 data_lo, int common)
2364 int func = BP_FUNC(bp);
2366 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2367 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2368 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2369 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2370 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2372 #ifdef BNX2X_STOP_ON_ERROR
2373 if (unlikely(bp->panic))
2377 spin_lock_bh(&bp->spq_lock);
2379 if (!bp->spq_left) {
2380 BNX2X_ERR("BUG! SPQ ring full!\n");
2381 spin_unlock_bh(&bp->spq_lock);
2386 /* CID needs port number to be encoded int it */
2387 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2388 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2390 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2392 bp->spq_prod_bd->hdr.type |=
2393 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2395 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2396 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2400 if (bp->spq_prod_bd == bp->spq_last_bd) {
2401 bp->spq_prod_bd = bp->spq;
2402 bp->spq_prod_idx = 0;
2403 DP(NETIF_MSG_TIMER, "end of spq\n");
2410 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2413 spin_unlock_bh(&bp->spq_lock);
2417 /* acquire split MCP access lock register */
2418 static int bnx2x_acquire_alr(struct bnx2x *bp)
2425 for (j = 0; j < i*10; j++) {
2427 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2428 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2429 if (val & (1L << 31))
2434 if (!(val & (1L << 31))) {
2435 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2442 /* release split MCP access lock register */
2443 static void bnx2x_release_alr(struct bnx2x *bp)
2447 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2452 struct host_def_status_block *def_sb = bp->def_status_blk;
2455 barrier(); /* status block is written to by the chip */
2456 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2457 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2460 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2461 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2464 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2465 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2468 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2469 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2472 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2473 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2480 * slow path service functions
2483 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2485 int port = BP_PORT(bp);
2486 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2487 COMMAND_REG_ATTN_BITS_SET);
2488 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2489 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2490 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2491 NIG_REG_MASK_INTERRUPT_PORT0;
2494 if (bp->attn_state & asserted)
2495 BNX2X_ERR("IGU ERROR\n");
2497 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2498 aeu_mask = REG_RD(bp, aeu_addr);
2500 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2501 aeu_mask, asserted);
2502 aeu_mask &= ~(asserted & 0xff);
2503 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2505 REG_WR(bp, aeu_addr, aeu_mask);
2506 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2509 bp->attn_state |= asserted;
2510 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2512 if (asserted & ATTN_HARD_WIRED_MASK) {
2513 if (asserted & ATTN_NIG_FOR_FUNC) {
2515 bnx2x_acquire_phy_lock(bp);
2517 /* save nig interrupt mask */
2518 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2519 REG_WR(bp, nig_int_mask_addr, 0);
2521 bnx2x_link_attn(bp);
2523 /* handle unicore attn? */
2525 if (asserted & ATTN_SW_TIMER_4_FUNC)
2526 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2528 if (asserted & GPIO_2_FUNC)
2529 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2531 if (asserted & GPIO_3_FUNC)
2532 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2534 if (asserted & GPIO_4_FUNC)
2535 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2538 if (asserted & ATTN_GENERAL_ATTN_1) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2542 if (asserted & ATTN_GENERAL_ATTN_2) {
2543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2546 if (asserted & ATTN_GENERAL_ATTN_3) {
2547 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2548 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2551 if (asserted & ATTN_GENERAL_ATTN_4) {
2552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2555 if (asserted & ATTN_GENERAL_ATTN_5) {
2556 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2557 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2559 if (asserted & ATTN_GENERAL_ATTN_6) {
2560 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2561 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2565 } /* if hardwired */
2567 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2569 REG_WR(bp, hc_addr, asserted);
2571 /* now set back the mask */
2572 if (asserted & ATTN_NIG_FOR_FUNC) {
2573 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2574 bnx2x_release_phy_lock(bp);
2578 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2580 int port = BP_PORT(bp);
2584 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2585 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2587 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2589 val = REG_RD(bp, reg_offset);
2590 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2591 REG_WR(bp, reg_offset, val);
2593 BNX2X_ERR("SPIO5 hw attention\n");
2595 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2596 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2597 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2598 /* Fan failure attention */
2600 /* The PHY reset is controlled by GPIO 1 */
2601 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2602 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2603 /* Low power mode is controlled by GPIO 2 */
2604 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2605 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2606 /* mark the failure */
2607 bp->link_params.ext_phy_config &=
2608 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2609 bp->link_params.ext_phy_config |=
2610 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2612 dev_info.port_hw_config[port].
2613 external_phy_config,
2614 bp->link_params.ext_phy_config);
2615 /* log the failure */
2616 printk(KERN_ERR PFX "Fan Failure on Network"
2617 " Controller %s has caused the driver to"
2618 " shutdown the card to prevent permanent"
2619 " damage. Please contact Dell Support for"
2620 " assistance\n", bp->dev->name);
2628 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2630 val = REG_RD(bp, reg_offset);
2631 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632 REG_WR(bp, reg_offset, val);
2634 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635 (attn & HW_INTERRUT_ASSERT_SET_0));
2640 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2644 if (attn & BNX2X_DOORQ_ASSERT) {
2646 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648 /* DORQ discard attention */
2650 BNX2X_ERR("FATAL error from DORQ\n");
2653 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2655 int port = BP_PORT(bp);
2658 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2661 val = REG_RD(bp, reg_offset);
2662 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663 REG_WR(bp, reg_offset, val);
2665 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666 (attn & HW_INTERRUT_ASSERT_SET_1));
2671 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2675 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2677 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679 /* CFC error attention */
2681 BNX2X_ERR("FATAL error from CFC\n");
2684 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2686 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688 /* RQ_USDMDP_FIFO_OVERFLOW */
2690 BNX2X_ERR("FATAL error from PXP\n");
2693 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2695 int port = BP_PORT(bp);
2698 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2701 val = REG_RD(bp, reg_offset);
2702 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703 REG_WR(bp, reg_offset, val);
2705 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706 (attn & HW_INTERRUT_ASSERT_SET_2));
2711 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2715 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2717 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718 int func = BP_FUNC(bp);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721 bnx2x__link_status_update(bp);
2722 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2724 bnx2x_pmf_update(bp);
2726 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2728 BNX2X_ERR("MC assert!\n");
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2735 } else if (attn & BNX2X_MCP_ASSERT) {
2737 BNX2X_ERR("MCP assert!\n");
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2742 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2745 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2746 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747 if (attn & BNX2X_GRC_TIMEOUT) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2752 if (attn & BNX2X_GRC_RSV) {
2753 val = CHIP_IS_E1H(bp) ?
2754 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2757 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2761 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2763 struct attn_route attn;
2764 struct attn_route group_mask;
2765 int port = BP_PORT(bp);
2771 /* need to take HW lock because MCP or other port might also
2772 try to handle this event */
2773 bnx2x_acquire_alr(bp);
2775 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2779 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2782 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783 if (deasserted & (1 << index)) {
2784 group_mask = bp->attn_group[index];
2786 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787 index, group_mask.sig[0], group_mask.sig[1],
2788 group_mask.sig[2], group_mask.sig[3]);
2790 bnx2x_attn_int_deasserted3(bp,
2791 attn.sig[3] & group_mask.sig[3]);
2792 bnx2x_attn_int_deasserted1(bp,
2793 attn.sig[1] & group_mask.sig[1]);
2794 bnx2x_attn_int_deasserted2(bp,
2795 attn.sig[2] & group_mask.sig[2]);
2796 bnx2x_attn_int_deasserted0(bp,
2797 attn.sig[0] & group_mask.sig[0]);
2799 if ((attn.sig[0] & group_mask.sig[0] &
2800 HW_PRTY_ASSERT_SET_0) ||
2801 (attn.sig[1] & group_mask.sig[1] &
2802 HW_PRTY_ASSERT_SET_1) ||
2803 (attn.sig[2] & group_mask.sig[2] &
2804 HW_PRTY_ASSERT_SET_2))
2805 BNX2X_ERR("FATAL HW block parity attention\n");
2809 bnx2x_release_alr(bp);
2811 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2814 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2816 REG_WR(bp, reg_addr, val);
2818 if (~bp->attn_state & deasserted)
2819 BNX2X_ERR("IGU ERROR\n");
2821 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2824 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825 aeu_mask = REG_RD(bp, reg_addr);
2827 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2828 aeu_mask, deasserted);
2829 aeu_mask |= (deasserted & 0xff);
2830 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2832 REG_WR(bp, reg_addr, aeu_mask);
2833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836 bp->attn_state &= ~deasserted;
2837 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2840 static void bnx2x_attn_int(struct bnx2x *bp)
2842 /* read local copy of bits */
2843 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2847 u32 attn_state = bp->attn_state;
2849 /* look for changed bits */
2850 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2851 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2854 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2855 attn_bits, attn_ack, asserted, deasserted);
2857 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2858 BNX2X_ERR("BAD attention state\n");
2860 /* handle bits that were raised */
2862 bnx2x_attn_int_asserted(bp, asserted);
2865 bnx2x_attn_int_deasserted(bp, deasserted);
2868 static void bnx2x_sp_task(struct work_struct *work)
2870 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2874 /* Return here if interrupt is disabled */
2875 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2876 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2880 status = bnx2x_update_dsb_idx(bp);
2881 /* if (status == 0) */
2882 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2884 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2890 /* CStorm events: query_stats, port delete ramrod */
2892 bp->stats_pending = 0;
2894 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2896 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2898 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2900 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2902 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2907 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2909 struct net_device *dev = dev_instance;
2910 struct bnx2x *bp = netdev_priv(dev);
2912 /* Return here if interrupt is disabled */
2913 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2914 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2920 #ifdef BNX2X_STOP_ON_ERROR
2921 if (unlikely(bp->panic))
2925 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2930 /* end of slow path */
2934 /****************************************************************************
2936 ****************************************************************************/
2938 /* sum[hi:lo] += add[hi:lo] */
2939 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2942 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2945 /* difference = minuend - subtrahend */
2946 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2948 if (m_lo < s_lo) { \
2950 d_hi = m_hi - s_hi; \
2952 /* we can 'loan' 1 */ \
2954 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2956 /* m_hi <= s_hi */ \
2961 /* m_lo >= s_lo */ \
2962 if (m_hi < s_hi) { \
2966 /* m_hi >= s_hi */ \
2967 d_hi = m_hi - s_hi; \
2968 d_lo = m_lo - s_lo; \
2973 #define UPDATE_STAT64(s, t) \
2975 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2976 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2977 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2978 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2979 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2980 pstats->mac_stx[1].t##_lo, diff.lo); \
2983 #define UPDATE_STAT64_NIG(s, t) \
2985 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2986 diff.lo, new->s##_lo, old->s##_lo); \
2987 ADD_64(estats->t##_hi, diff.hi, \
2988 estats->t##_lo, diff.lo); \
2991 /* sum[hi:lo] += add */
2992 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2995 s_hi += (s_lo < a) ? 1 : 0; \
2998 #define UPDATE_EXTEND_STAT(s) \
3000 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3001 pstats->mac_stx[1].s##_lo, \
3005 #define UPDATE_EXTEND_TSTAT(s, t) \
3007 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3008 old_tclient->s = le32_to_cpu(tclient->s); \
3009 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3012 #define UPDATE_EXTEND_XSTAT(s, t) \
3014 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3015 old_xclient->s = le32_to_cpu(xclient->s); \
3016 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3020 * General service functions
3023 static inline long bnx2x_hilo(u32 *hiref)
3025 u32 lo = *(hiref + 1);
3026 #if (BITS_PER_LONG == 64)
3029 return HILO_U64(hi, lo);
3036 * Init service functions
3039 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3041 if (!bp->stats_pending) {
3042 struct eth_query_ramrod_data ramrod_data = {0};
3045 ramrod_data.drv_counter = bp->stats_counter++;
3046 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3047 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3049 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3050 ((u32 *)&ramrod_data)[1],
3051 ((u32 *)&ramrod_data)[0], 0);
3053 /* stats ramrod has it's own slot on the spq */
3055 bp->stats_pending = 1;
3060 static void bnx2x_stats_init(struct bnx2x *bp)
3062 int port = BP_PORT(bp);
3064 bp->executer_idx = 0;
3065 bp->stats_counter = 0;
3069 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3071 bp->port.port_stx = 0;
3072 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3074 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3075 bp->port.old_nig_stats.brb_discard =
3076 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3077 bp->port.old_nig_stats.brb_truncate =
3078 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3079 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3080 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3081 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3082 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3084 /* function stats */
3085 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3086 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3087 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3088 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3090 bp->stats_state = STATS_STATE_DISABLED;
3091 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3092 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3095 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3097 struct dmae_command *dmae = &bp->stats_dmae;
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3100 *stats_comp = DMAE_COMP_VAL;
3103 if (bp->executer_idx) {
3104 int loader_idx = PMF_DMAE_C(bp);
3106 memset(dmae, 0, sizeof(struct dmae_command));
3108 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3109 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3110 DMAE_CMD_DST_RESET |
3112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3114 DMAE_CMD_ENDIANITY_DW_SWAP |
3116 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3118 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3119 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3120 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3121 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3122 sizeof(struct dmae_command) *
3123 (loader_idx + 1)) >> 2;
3124 dmae->dst_addr_hi = 0;
3125 dmae->len = sizeof(struct dmae_command) >> 2;
3128 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3129 dmae->comp_addr_hi = 0;
3133 bnx2x_post_dmae(bp, dmae, loader_idx);
3135 } else if (bp->func_stx) {
3137 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3141 static int bnx2x_stats_comp(struct bnx2x *bp)
3143 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3147 while (*stats_comp != DMAE_COMP_VAL) {
3149 BNX2X_ERR("timeout waiting for stats finished\n");
3159 * Statistics service functions
3162 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3164 struct dmae_command *dmae;
3166 int loader_idx = PMF_DMAE_C(bp);
3167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3170 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3171 BNX2X_ERR("BUG!\n");
3175 bp->executer_idx = 0;
3177 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3179 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3181 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3183 DMAE_CMD_ENDIANITY_DW_SWAP |
3185 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3186 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3188 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3190 dmae->src_addr_lo = bp->port.port_stx >> 2;
3191 dmae->src_addr_hi = 0;
3192 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3193 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3194 dmae->len = DMAE_LEN32_RD_MAX;
3195 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196 dmae->comp_addr_hi = 0;
3199 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3201 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3202 dmae->src_addr_hi = 0;
3203 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3204 DMAE_LEN32_RD_MAX * 4);
3205 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3206 DMAE_LEN32_RD_MAX * 4);
3207 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3208 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3209 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3210 dmae->comp_val = DMAE_COMP_VAL;
3213 bnx2x_hw_stats_post(bp);
3214 bnx2x_stats_comp(bp);
3217 static void bnx2x_port_stats_init(struct bnx2x *bp)
3219 struct dmae_command *dmae;
3220 int port = BP_PORT(bp);
3221 int vn = BP_E1HVN(bp);
3223 int loader_idx = PMF_DMAE_C(bp);
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3228 if (!bp->link_vars.link_up || !bp->port.pmf) {
3229 BNX2X_ERR("BUG!\n");
3233 bp->executer_idx = 0;
3236 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3237 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3238 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3240 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3242 DMAE_CMD_ENDIANITY_DW_SWAP |
3244 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245 (vn << DMAE_CMD_E1HVN_SHIFT));
3247 if (bp->port.port_stx) {
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
3251 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3254 dmae->dst_addr_hi = 0;
3255 dmae->len = sizeof(struct host_port_stats) >> 2;
3256 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257 dmae->comp_addr_hi = 0;
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3266 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3267 dmae->dst_addr_lo = bp->func_stx >> 2;
3268 dmae->dst_addr_hi = 0;
3269 dmae->len = sizeof(struct host_func_stats) >> 2;
3270 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3271 dmae->comp_addr_hi = 0;
3276 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3277 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3282 DMAE_CMD_ENDIANITY_DW_SWAP |
3284 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3285 (vn << DMAE_CMD_E1HVN_SHIFT));
3287 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3289 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3290 NIG_REG_INGRESS_BMAC0_MEM);
3292 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3293 BIGMAC_REGISTER_TX_STAT_GTBYT */
3294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295 dmae->opcode = opcode;
3296 dmae->src_addr_lo = (mac_addr +
3297 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3298 dmae->src_addr_hi = 0;
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3302 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3307 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3308 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310 dmae->opcode = opcode;
3311 dmae->src_addr_lo = (mac_addr +
3312 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3313 dmae->src_addr_hi = 0;
3314 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3315 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3317 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3318 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3319 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321 dmae->comp_addr_hi = 0;
3324 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3326 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3328 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = (mac_addr +
3332 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3333 dmae->src_addr_hi = 0;
3334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3336 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3341 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343 dmae->opcode = opcode;
3344 dmae->src_addr_lo = (mac_addr +
3345 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3346 dmae->src_addr_hi = 0;
3347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3348 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3350 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353 dmae->comp_addr_hi = 0;
3356 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3358 dmae->opcode = opcode;
3359 dmae->src_addr_lo = (mac_addr +
3360 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3361 dmae->src_addr_hi = 0;
3362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3363 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3365 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3366 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
3375 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3376 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3377 dmae->src_addr_hi = 0;
3378 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3380 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3382 dmae->comp_addr_hi = 0;
3385 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386 dmae->opcode = opcode;
3387 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3388 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3389 dmae->src_addr_hi = 0;
3390 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3391 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3392 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3393 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3394 dmae->len = (2*sizeof(u32)) >> 2;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3399 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3402 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3404 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3406 DMAE_CMD_ENDIANITY_DW_SWAP |
3408 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409 (vn << DMAE_CMD_E1HVN_SHIFT));
3410 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3411 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3412 dmae->src_addr_hi = 0;
3413 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3414 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3416 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3417 dmae->len = (2*sizeof(u32)) >> 2;
3418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420 dmae->comp_val = DMAE_COMP_VAL;
3425 static void bnx2x_func_stats_init(struct bnx2x *bp)
3427 struct dmae_command *dmae = &bp->stats_dmae;
3428 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431 if (!bp->func_stx) {
3432 BNX2X_ERR("BUG!\n");
3436 bp->executer_idx = 0;
3437 memset(dmae, 0, sizeof(struct dmae_command));
3439 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3440 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3441 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3443 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3445 DMAE_CMD_ENDIANITY_DW_SWAP |
3447 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3448 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3449 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3450 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3451 dmae->dst_addr_lo = bp->func_stx >> 2;
3452 dmae->dst_addr_hi = 0;
3453 dmae->len = sizeof(struct host_func_stats) >> 2;
3454 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456 dmae->comp_val = DMAE_COMP_VAL;
3461 static void bnx2x_stats_start(struct bnx2x *bp)
3464 bnx2x_port_stats_init(bp);
3466 else if (bp->func_stx)
3467 bnx2x_func_stats_init(bp);
3469 bnx2x_hw_stats_post(bp);
3470 bnx2x_storm_stats_post(bp);
3473 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3475 bnx2x_stats_comp(bp);
3476 bnx2x_stats_pmf_update(bp);
3477 bnx2x_stats_start(bp);
3480 static void bnx2x_stats_restart(struct bnx2x *bp)
3482 bnx2x_stats_comp(bp);
3483 bnx2x_stats_start(bp);
3486 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3488 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3489 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3490 struct regpair diff;
3492 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3493 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3494 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3495 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3496 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3497 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3498 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3500 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3502 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3503 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3504 UPDATE_STAT64(tx_stat_gt127,
3505 tx_stat_etherstatspkts65octetsto127octets);
3506 UPDATE_STAT64(tx_stat_gt255,
3507 tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_STAT64(tx_stat_gt511,
3509 tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_STAT64(tx_stat_gt1023,
3511 tx_stat_etherstatspkts512octetsto1023octets);
3512 UPDATE_STAT64(tx_stat_gt1518,
3513 tx_stat_etherstatspkts1024octetsto1522octets);
3514 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3515 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3516 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3517 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3518 UPDATE_STAT64(tx_stat_gterr,
3519 tx_stat_dot3statsinternalmactransmiterrors);
3520 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3523 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3525 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3526 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3528 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3529 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3530 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3531 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3532 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3533 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3534 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3535 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3536 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3537 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3538 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3539 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3540 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3541 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3542 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3543 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3544 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3546 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3547 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3548 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3549 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3551 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3552 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3553 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3554 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3556 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3557 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3561 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3563 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3564 struct nig_stats *old = &(bp->port.old_nig_stats);
3565 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3566 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3567 struct regpair diff;
3569 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3570 bnx2x_bmac_stats_update(bp);
3572 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3573 bnx2x_emac_stats_update(bp);
3575 else { /* unreached */
3576 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3580 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3581 new->brb_discard - old->brb_discard);
3582 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3583 new->brb_truncate - old->brb_truncate);
3585 UPDATE_STAT64_NIG(egress_mac_pkt0,
3586 etherstatspkts1024octetsto1522octets);
3587 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3589 memcpy(old, new, sizeof(struct nig_stats));
3591 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3592 sizeof(struct mac_stx));
3593 estats->brb_drop_hi = pstats->brb_drop_hi;
3594 estats->brb_drop_lo = pstats->brb_drop_lo;
3596 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3601 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3603 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3604 int cl_id = BP_CL_ID(bp);
3605 struct tstorm_per_port_stats *tport =
3606 &stats->tstorm_common.port_statistics;
3607 struct tstorm_per_client_stats *tclient =
3608 &stats->tstorm_common.client_statistics[cl_id];
3609 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3610 struct xstorm_per_client_stats *xclient =
3611 &stats->xstorm_common.client_statistics[cl_id];
3612 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3613 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3617 /* are storm stats valid? */
3618 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3619 bp->stats_counter) {
3620 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3621 " tstorm counter (%d) != stats_counter (%d)\n",
3622 tclient->stats_counter, bp->stats_counter);
3625 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3626 bp->stats_counter) {
3627 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3628 " xstorm counter (%d) != stats_counter (%d)\n",
3629 xclient->stats_counter, bp->stats_counter);
3633 fstats->total_bytes_received_hi =
3634 fstats->valid_bytes_received_hi =
3635 le32_to_cpu(tclient->total_rcv_bytes.hi);
3636 fstats->total_bytes_received_lo =
3637 fstats->valid_bytes_received_lo =
3638 le32_to_cpu(tclient->total_rcv_bytes.lo);
3640 estats->error_bytes_received_hi =
3641 le32_to_cpu(tclient->rcv_error_bytes.hi);
3642 estats->error_bytes_received_lo =
3643 le32_to_cpu(tclient->rcv_error_bytes.lo);
3644 ADD_64(estats->error_bytes_received_hi,
3645 estats->rx_stat_ifhcinbadoctets_hi,
3646 estats->error_bytes_received_lo,
3647 estats->rx_stat_ifhcinbadoctets_lo);
3649 ADD_64(fstats->total_bytes_received_hi,
3650 estats->error_bytes_received_hi,
3651 fstats->total_bytes_received_lo,
3652 estats->error_bytes_received_lo);
3654 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3655 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3656 total_multicast_packets_received);
3657 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3658 total_broadcast_packets_received);
3660 fstats->total_bytes_transmitted_hi =
3661 le32_to_cpu(xclient->total_sent_bytes.hi);
3662 fstats->total_bytes_transmitted_lo =
3663 le32_to_cpu(xclient->total_sent_bytes.lo);
3665 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3666 total_unicast_packets_transmitted);
3667 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3668 total_multicast_packets_transmitted);
3669 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3670 total_broadcast_packets_transmitted);
3672 memcpy(estats, &(fstats->total_bytes_received_hi),
3673 sizeof(struct host_func_stats) - 2*sizeof(u32));
3675 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3676 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3677 estats->brb_truncate_discard =
3678 le32_to_cpu(tport->brb_truncate_discard);
3679 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3681 old_tclient->rcv_unicast_bytes.hi =
3682 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3683 old_tclient->rcv_unicast_bytes.lo =
3684 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3685 old_tclient->rcv_broadcast_bytes.hi =
3686 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3687 old_tclient->rcv_broadcast_bytes.lo =
3688 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3689 old_tclient->rcv_multicast_bytes.hi =
3690 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3691 old_tclient->rcv_multicast_bytes.lo =
3692 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3693 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3695 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3696 old_tclient->packets_too_big_discard =
3697 le32_to_cpu(tclient->packets_too_big_discard);
3698 estats->no_buff_discard =
3699 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3700 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3702 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3703 old_xclient->unicast_bytes_sent.hi =
3704 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3705 old_xclient->unicast_bytes_sent.lo =
3706 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3707 old_xclient->multicast_bytes_sent.hi =
3708 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3709 old_xclient->multicast_bytes_sent.lo =
3710 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3711 old_xclient->broadcast_bytes_sent.hi =
3712 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3713 old_xclient->broadcast_bytes_sent.lo =
3714 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3716 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3721 static void bnx2x_net_stats_update(struct bnx2x *bp)
3723 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3724 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3725 struct net_device_stats *nstats = &bp->dev->stats;
3727 nstats->rx_packets =
3728 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3729 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3730 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3732 nstats->tx_packets =
3733 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3734 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3735 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3737 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3739 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3741 nstats->rx_dropped = old_tclient->checksum_discard +
3742 estats->mac_discard;
3743 nstats->tx_dropped = 0;
3746 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3748 nstats->collisions =
3749 estats->tx_stat_dot3statssinglecollisionframes_lo +
3750 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3751 estats->tx_stat_dot3statslatecollisions_lo +
3752 estats->tx_stat_dot3statsexcessivecollisions_lo;
3754 estats->jabber_packets_received =
3755 old_tclient->packets_too_big_discard +
3756 estats->rx_stat_dot3statsframestoolong_lo;
3758 nstats->rx_length_errors =
3759 estats->rx_stat_etherstatsundersizepkts_lo +
3760 estats->jabber_packets_received;
3761 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3762 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3763 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3764 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3765 nstats->rx_missed_errors = estats->xxoverflow_discard;
3767 nstats->rx_errors = nstats->rx_length_errors +
3768 nstats->rx_over_errors +
3769 nstats->rx_crc_errors +
3770 nstats->rx_frame_errors +
3771 nstats->rx_fifo_errors +
3772 nstats->rx_missed_errors;
3774 nstats->tx_aborted_errors =
3775 estats->tx_stat_dot3statslatecollisions_lo +
3776 estats->tx_stat_dot3statsexcessivecollisions_lo;
3777 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3778 nstats->tx_fifo_errors = 0;
3779 nstats->tx_heartbeat_errors = 0;
3780 nstats->tx_window_errors = 0;
3782 nstats->tx_errors = nstats->tx_aborted_errors +
3783 nstats->tx_carrier_errors;
3786 static void bnx2x_stats_update(struct bnx2x *bp)
3788 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3791 if (*stats_comp != DMAE_COMP_VAL)
3795 update = (bnx2x_hw_stats_update(bp) == 0);
3797 update |= (bnx2x_storm_stats_update(bp) == 0);
3800 bnx2x_net_stats_update(bp);
3803 if (bp->stats_pending) {
3804 bp->stats_pending++;
3805 if (bp->stats_pending == 3) {
3806 BNX2X_ERR("stats not updated for 3 times\n");
3813 if (bp->msglevel & NETIF_MSG_TIMER) {
3814 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3815 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3816 struct net_device_stats *nstats = &bp->dev->stats;
3819 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3820 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3822 bnx2x_tx_avail(bp->fp),
3823 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3824 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3826 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3827 bp->fp->rx_comp_cons),
3828 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3829 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3830 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3831 estats->driver_xoff, estats->brb_drop_lo);
3832 printk(KERN_DEBUG "tstats: checksum_discard %u "
3833 "packets_too_big_discard %u no_buff_discard %u "
3834 "mac_discard %u mac_filter_discard %u "
3835 "xxovrflow_discard %u brb_truncate_discard %u "
3836 "ttl0_discard %u\n",
3837 old_tclient->checksum_discard,
3838 old_tclient->packets_too_big_discard,
3839 old_tclient->no_buff_discard, estats->mac_discard,
3840 estats->mac_filter_discard, estats->xxoverflow_discard,
3841 estats->brb_truncate_discard,
3842 old_tclient->ttl0_discard);
3844 for_each_queue(bp, i) {
3845 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3846 bnx2x_fp(bp, i, tx_pkt),
3847 bnx2x_fp(bp, i, rx_pkt),
3848 bnx2x_fp(bp, i, rx_calls));
3852 bnx2x_hw_stats_post(bp);
3853 bnx2x_storm_stats_post(bp);
3856 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3858 struct dmae_command *dmae;
3860 int loader_idx = PMF_DMAE_C(bp);
3861 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3863 bp->executer_idx = 0;
3865 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3867 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3869 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3871 DMAE_CMD_ENDIANITY_DW_SWAP |
3873 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3874 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3876 if (bp->port.port_stx) {
3878 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3880 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3882 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3883 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3884 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3885 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3886 dmae->dst_addr_hi = 0;
3887 dmae->len = sizeof(struct host_port_stats) >> 2;
3889 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3890 dmae->comp_addr_hi = 0;
3893 dmae->comp_addr_lo =
3894 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_addr_hi =
3896 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3897 dmae->comp_val = DMAE_COMP_VAL;
3905 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3906 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3907 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3908 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3909 dmae->dst_addr_lo = bp->func_stx >> 2;
3910 dmae->dst_addr_hi = 0;
3911 dmae->len = sizeof(struct host_func_stats) >> 2;
3912 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3913 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3914 dmae->comp_val = DMAE_COMP_VAL;
3920 static void bnx2x_stats_stop(struct bnx2x *bp)
3924 bnx2x_stats_comp(bp);
3927 update = (bnx2x_hw_stats_update(bp) == 0);
3929 update |= (bnx2x_storm_stats_update(bp) == 0);
3932 bnx2x_net_stats_update(bp);
3935 bnx2x_port_stats_stop(bp);
3937 bnx2x_hw_stats_post(bp);
3938 bnx2x_stats_comp(bp);
3942 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3946 static const struct {
3947 void (*action)(struct bnx2x *bp);
3948 enum bnx2x_stats_state next_state;
3949 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3952 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3953 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3954 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3955 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3958 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3959 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3960 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3961 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3965 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3967 enum bnx2x_stats_state state = bp->stats_state;
3969 bnx2x_stats_stm[state][event].action(bp);
3970 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3972 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3973 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3974 state, event, bp->stats_state);
3977 static void bnx2x_timer(unsigned long data)
3979 struct bnx2x *bp = (struct bnx2x *) data;
3981 if (!netif_running(bp->dev))
3984 if (atomic_read(&bp->intr_sem) != 0)
3988 struct bnx2x_fastpath *fp = &bp->fp[0];
3991 bnx2x_tx_int(fp, 1000);
3992 rc = bnx2x_rx_int(fp, 1000);
3995 if (!BP_NOMCP(bp)) {
3996 int func = BP_FUNC(bp);
4000 ++bp->fw_drv_pulse_wr_seq;
4001 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4002 /* TBD - add SYSTEM_TIME */
4003 drv_pulse = bp->fw_drv_pulse_wr_seq;
4004 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4006 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4007 MCP_PULSE_SEQ_MASK);
4008 /* The delta between driver pulse and mcp response
4009 * should be 1 (before mcp response) or 0 (after mcp response)
4011 if ((drv_pulse != mcp_pulse) &&
4012 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4013 /* someone lost a heartbeat... */
4014 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4015 drv_pulse, mcp_pulse);
4019 if ((bp->state == BNX2X_STATE_OPEN) ||
4020 (bp->state == BNX2X_STATE_DISABLED))
4021 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4024 mod_timer(&bp->timer, jiffies + bp->current_interval);
4027 /* end of Statistics */
4032 * nic init service functions
4035 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4037 int port = BP_PORT(bp);
4039 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4040 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4041 sizeof(struct ustorm_status_block)/4);
4042 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4043 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4044 sizeof(struct cstorm_status_block)/4);
4047 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4048 dma_addr_t mapping, int sb_id)
4050 int port = BP_PORT(bp);
4051 int func = BP_FUNC(bp);
4056 section = ((u64)mapping) + offsetof(struct host_status_block,
4058 sb->u_status_block.status_block_id = sb_id;
4060 REG_WR(bp, BAR_USTRORM_INTMEM +
4061 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4062 REG_WR(bp, BAR_USTRORM_INTMEM +
4063 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4065 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4066 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4068 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4069 REG_WR16(bp, BAR_USTRORM_INTMEM +
4070 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4073 section = ((u64)mapping) + offsetof(struct host_status_block,
4075 sb->c_status_block.status_block_id = sb_id;
4077 REG_WR(bp, BAR_CSTRORM_INTMEM +
4078 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4079 REG_WR(bp, BAR_CSTRORM_INTMEM +
4080 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4082 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4083 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4085 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4086 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4087 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4089 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4092 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4094 int func = BP_FUNC(bp);
4096 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4097 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4098 sizeof(struct ustorm_def_status_block)/4);
4099 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4100 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4101 sizeof(struct cstorm_def_status_block)/4);
4102 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4103 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4104 sizeof(struct xstorm_def_status_block)/4);
4105 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4106 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4107 sizeof(struct tstorm_def_status_block)/4);
4110 static void bnx2x_init_def_sb(struct bnx2x *bp,
4111 struct host_def_status_block *def_sb,
4112 dma_addr_t mapping, int sb_id)
4114 int port = BP_PORT(bp);
4115 int func = BP_FUNC(bp);
4116 int index, val, reg_offset;
4120 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121 atten_status_block);
4122 def_sb->atten_status_block.status_block_id = sb_id;
4126 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4127 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4129 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4130 bp->attn_group[index].sig[0] = REG_RD(bp,
4131 reg_offset + 0x10*index);
4132 bp->attn_group[index].sig[1] = REG_RD(bp,
4133 reg_offset + 0x4 + 0x10*index);
4134 bp->attn_group[index].sig[2] = REG_RD(bp,
4135 reg_offset + 0x8 + 0x10*index);
4136 bp->attn_group[index].sig[3] = REG_RD(bp,
4137 reg_offset + 0xc + 0x10*index);
4140 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4141 HC_REG_ATTN_MSG0_ADDR_L);
4143 REG_WR(bp, reg_offset, U64_LO(section));
4144 REG_WR(bp, reg_offset + 4, U64_HI(section));
4146 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4148 val = REG_RD(bp, reg_offset);
4150 REG_WR(bp, reg_offset, val);
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 u_def_status_block);
4155 def_sb->u_def_status_block.status_block_id = sb_id;
4157 REG_WR(bp, BAR_USTRORM_INTMEM +
4158 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159 REG_WR(bp, BAR_USTRORM_INTMEM +
4160 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4162 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4163 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4165 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_USTRORM_INTMEM +
4167 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4170 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171 c_def_status_block);
4172 def_sb->c_def_status_block.status_block_id = sb_id;
4174 REG_WR(bp, BAR_CSTRORM_INTMEM +
4175 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4176 REG_WR(bp, BAR_CSTRORM_INTMEM +
4177 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4179 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4180 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4182 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4183 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4184 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4187 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4188 t_def_status_block);
4189 def_sb->t_def_status_block.status_block_id = sb_id;
4191 REG_WR(bp, BAR_TSTRORM_INTMEM +
4192 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4193 REG_WR(bp, BAR_TSTRORM_INTMEM +
4194 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4196 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4197 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4199 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4200 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4201 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4204 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4205 x_def_status_block);
4206 def_sb->x_def_status_block.status_block_id = sb_id;
4208 REG_WR(bp, BAR_XSTRORM_INTMEM +
4209 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4210 REG_WR(bp, BAR_XSTRORM_INTMEM +
4211 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4213 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4214 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4216 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4217 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4218 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4220 bp->stats_pending = 0;
4221 bp->set_mac_pending = 0;
4223 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4226 static void bnx2x_update_coalesce(struct bnx2x *bp)
4228 int port = BP_PORT(bp);
4231 for_each_queue(bp, i) {
4232 int sb_id = bp->fp[i].sb_id;
4234 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4235 REG_WR8(bp, BAR_USTRORM_INTMEM +
4236 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4237 U_SB_ETH_RX_CQ_INDEX),
4239 REG_WR16(bp, BAR_USTRORM_INTMEM +
4240 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4241 U_SB_ETH_RX_CQ_INDEX),
4242 bp->rx_ticks ? 0 : 1);
4243 REG_WR16(bp, BAR_USTRORM_INTMEM +
4244 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4245 U_SB_ETH_RX_BD_INDEX),
4246 bp->rx_ticks ? 0 : 1);
4248 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4249 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4250 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4251 C_SB_ETH_TX_CQ_INDEX),
4253 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4254 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4255 C_SB_ETH_TX_CQ_INDEX),
4256 bp->tx_ticks ? 0 : 1);
4260 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4261 struct bnx2x_fastpath *fp, int last)
4265 for (i = 0; i < last; i++) {
4266 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4267 struct sk_buff *skb = rx_buf->skb;
4270 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4274 if (fp->tpa_state[i] == BNX2X_TPA_START)
4275 pci_unmap_single(bp->pdev,
4276 pci_unmap_addr(rx_buf, mapping),
4278 PCI_DMA_FROMDEVICE);
4285 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4287 int func = BP_FUNC(bp);
4288 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4289 ETH_MAX_AGGREGATION_QUEUES_E1H;
4290 u16 ring_prod, cqe_ring_prod;
4293 bp->rx_buf_size = bp->dev->mtu;
4294 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4295 BCM_RX_ETH_PAYLOAD_ALIGN;
4297 if (bp->flags & TPA_ENABLE_FLAG) {
4299 "rx_buf_size %d effective_mtu %d\n",
4300 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4302 for_each_queue(bp, j) {
4303 struct bnx2x_fastpath *fp = &bp->fp[j];
4305 for (i = 0; i < max_agg_queues; i++) {
4306 fp->tpa_pool[i].skb =
4307 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4308 if (!fp->tpa_pool[i].skb) {
4309 BNX2X_ERR("Failed to allocate TPA "
4310 "skb pool for queue[%d] - "
4311 "disabling TPA on this "
4313 bnx2x_free_tpa_pool(bp, fp, i);
4314 fp->disable_tpa = 1;
4317 pci_unmap_addr_set((struct sw_rx_bd *)
4318 &bp->fp->tpa_pool[i],
4320 fp->tpa_state[i] = BNX2X_TPA_STOP;
4325 for_each_queue(bp, j) {
4326 struct bnx2x_fastpath *fp = &bp->fp[j];
4329 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4330 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4332 /* "next page" elements initialization */
4334 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4335 struct eth_rx_sge *sge;
4337 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4339 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4340 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4342 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4346 bnx2x_init_sge_ring_bit_mask(fp);
4349 for (i = 1; i <= NUM_RX_RINGS; i++) {
4350 struct eth_rx_bd *rx_bd;
4352 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4354 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4355 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4357 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4358 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4362 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4363 struct eth_rx_cqe_next_page *nextpg;
4365 nextpg = (struct eth_rx_cqe_next_page *)
4366 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4368 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4369 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4371 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4375 /* Allocate SGEs and initialize the ring elements */
4376 for (i = 0, ring_prod = 0;
4377 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4379 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4380 BNX2X_ERR("was only able to allocate "
4382 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4383 /* Cleanup already allocated elements */
4384 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4385 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4386 fp->disable_tpa = 1;
4390 ring_prod = NEXT_SGE_IDX(ring_prod);
4392 fp->rx_sge_prod = ring_prod;
4394 /* Allocate BDs and initialize BD ring */
4395 fp->rx_comp_cons = 0;
4396 cqe_ring_prod = ring_prod = 0;
4397 for (i = 0; i < bp->rx_ring_size; i++) {
4398 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4399 BNX2X_ERR("was only able to allocate "
4401 bp->eth_stats.rx_skb_alloc_failed++;
4404 ring_prod = NEXT_RX_IDX(ring_prod);
4405 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4406 WARN_ON(ring_prod <= i);
4409 fp->rx_bd_prod = ring_prod;
4410 /* must not have more available CQEs than BDs */
4411 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4413 fp->rx_pkt = fp->rx_calls = 0;
4416 * this will generate an interrupt (to the TSTORM)
4417 * must only be done after chip is initialized
4419 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4424 REG_WR(bp, BAR_USTRORM_INTMEM +
4425 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4426 U64_LO(fp->rx_comp_mapping));
4427 REG_WR(bp, BAR_USTRORM_INTMEM +
4428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4429 U64_HI(fp->rx_comp_mapping));
4433 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4437 for_each_queue(bp, j) {
4438 struct bnx2x_fastpath *fp = &bp->fp[j];
4440 for (i = 1; i <= NUM_TX_RINGS; i++) {
4441 struct eth_tx_bd *tx_bd =
4442 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4445 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4446 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4448 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4449 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4452 fp->tx_pkt_prod = 0;
4453 fp->tx_pkt_cons = 0;
4456 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4461 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4463 int func = BP_FUNC(bp);
4465 spin_lock_init(&bp->spq_lock);
4467 bp->spq_left = MAX_SPQ_PENDING;
4468 bp->spq_prod_idx = 0;
4469 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4470 bp->spq_prod_bd = bp->spq;
4471 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4473 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4474 U64_LO(bp->spq_mapping));
4476 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4477 U64_HI(bp->spq_mapping));
4479 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4483 static void bnx2x_init_context(struct bnx2x *bp)
4487 for_each_queue(bp, i) {
4488 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4489 struct bnx2x_fastpath *fp = &bp->fp[i];
4490 u8 sb_id = FP_SB_ID(fp);
4492 context->xstorm_st_context.tx_bd_page_base_hi =
4493 U64_HI(fp->tx_desc_mapping);
4494 context->xstorm_st_context.tx_bd_page_base_lo =
4495 U64_LO(fp->tx_desc_mapping);
4496 context->xstorm_st_context.db_data_addr_hi =
4497 U64_HI(fp->tx_prods_mapping);
4498 context->xstorm_st_context.db_data_addr_lo =
4499 U64_LO(fp->tx_prods_mapping);
4500 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4501 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4503 context->ustorm_st_context.common.sb_index_numbers =
4504 BNX2X_RX_SB_INDEX_NUM;
4505 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4506 context->ustorm_st_context.common.status_block_id = sb_id;
4507 context->ustorm_st_context.common.flags =
4508 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4509 context->ustorm_st_context.common.mc_alignment_size =
4510 BCM_RX_ETH_PAYLOAD_ALIGN;
4511 context->ustorm_st_context.common.bd_buff_size =
4513 context->ustorm_st_context.common.bd_page_base_hi =
4514 U64_HI(fp->rx_desc_mapping);
4515 context->ustorm_st_context.common.bd_page_base_lo =
4516 U64_LO(fp->rx_desc_mapping);
4517 if (!fp->disable_tpa) {
4518 context->ustorm_st_context.common.flags |=
4519 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4520 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4521 context->ustorm_st_context.common.sge_buff_size =
4522 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4523 context->ustorm_st_context.common.sge_page_base_hi =
4524 U64_HI(fp->rx_sge_mapping);
4525 context->ustorm_st_context.common.sge_page_base_lo =
4526 U64_LO(fp->rx_sge_mapping);
4529 context->cstorm_st_context.sb_index_number =
4530 C_SB_ETH_TX_CQ_INDEX;
4531 context->cstorm_st_context.status_block_id = sb_id;
4533 context->xstorm_ag_context.cdu_reserved =
4534 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4535 CDU_REGION_NUMBER_XCM_AG,
4536 ETH_CONNECTION_TYPE);
4537 context->ustorm_ag_context.cdu_usage =
4538 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4539 CDU_REGION_NUMBER_UCM_AG,
4540 ETH_CONNECTION_TYPE);
4544 static void bnx2x_init_ind_table(struct bnx2x *bp)
4546 int func = BP_FUNC(bp);
4552 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4553 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4554 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4555 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4556 BP_CL_ID(bp) + (i % bp->num_queues));
4559 static void bnx2x_set_client_config(struct bnx2x *bp)
4561 struct tstorm_eth_client_config tstorm_client = {0};
4562 int port = BP_PORT(bp);
4565 tstorm_client.mtu = bp->dev->mtu;
4566 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4567 tstorm_client.config_flags =
4568 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4570 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4571 tstorm_client.config_flags |=
4572 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4573 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4577 if (bp->flags & TPA_ENABLE_FLAG) {
4578 tstorm_client.max_sges_for_packet =
4579 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4580 tstorm_client.max_sges_for_packet =
4581 ((tstorm_client.max_sges_for_packet +
4582 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4583 PAGES_PER_SGE_SHIFT;
4585 tstorm_client.config_flags |=
4586 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4589 for_each_queue(bp, i) {
4590 REG_WR(bp, BAR_TSTRORM_INTMEM +
4591 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4592 ((u32 *)&tstorm_client)[0]);
4593 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4595 ((u32 *)&tstorm_client)[1]);
4598 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4599 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4602 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4604 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4605 int mode = bp->rx_mode;
4606 int mask = (1 << BP_L_ID(bp));
4607 int func = BP_FUNC(bp);
4610 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4613 case BNX2X_RX_MODE_NONE: /* no Rx */
4614 tstorm_mac_filter.ucast_drop_all = mask;
4615 tstorm_mac_filter.mcast_drop_all = mask;
4616 tstorm_mac_filter.bcast_drop_all = mask;
4618 case BNX2X_RX_MODE_NORMAL:
4619 tstorm_mac_filter.bcast_accept_all = mask;
4621 case BNX2X_RX_MODE_ALLMULTI:
4622 tstorm_mac_filter.mcast_accept_all = mask;
4623 tstorm_mac_filter.bcast_accept_all = mask;
4625 case BNX2X_RX_MODE_PROMISC:
4626 tstorm_mac_filter.ucast_accept_all = mask;
4627 tstorm_mac_filter.mcast_accept_all = mask;
4628 tstorm_mac_filter.bcast_accept_all = mask;
4631 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4635 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4636 REG_WR(bp, BAR_TSTRORM_INTMEM +
4637 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4638 ((u32 *)&tstorm_mac_filter)[i]);
4640 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4641 ((u32 *)&tstorm_mac_filter)[i]); */
4644 if (mode != BNX2X_RX_MODE_NONE)
4645 bnx2x_set_client_config(bp);
4648 static void bnx2x_init_internal_common(struct bnx2x *bp)
4652 if (bp->flags & TPA_ENABLE_FLAG) {
4653 struct tstorm_eth_tpa_exist tpa = {0};
4657 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4659 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4663 /* Zero this manually as its initialization is
4664 currently missing in the initTool */
4665 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4666 REG_WR(bp, BAR_USTRORM_INTMEM +
4667 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4670 static void bnx2x_init_internal_port(struct bnx2x *bp)
4672 int port = BP_PORT(bp);
4674 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4676 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4680 static void bnx2x_init_internal_func(struct bnx2x *bp)
4682 struct tstorm_eth_function_common_config tstorm_config = {0};
4683 struct stats_indication_flags stats_flags = {0};
4684 int port = BP_PORT(bp);
4685 int func = BP_FUNC(bp);
4690 tstorm_config.config_flags = MULTI_FLAGS;
4691 tstorm_config.rss_result_mask = MULTI_MASK;
4694 tstorm_config.leading_client_id = BP_L_ID(bp);
4696 REG_WR(bp, BAR_TSTRORM_INTMEM +
4697 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4698 (*(u32 *)&tstorm_config));
4700 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4701 bnx2x_set_storm_rx_mode(bp);
4703 /* reset xstorm per client statistics */
4704 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4709 /* reset tstorm per client statistics */
4710 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4716 /* Init statistics related context */
4717 stats_flags.collect_eth = 1;
4719 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4720 ((u32 *)&stats_flags)[0]);
4721 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4722 ((u32 *)&stats_flags)[1]);
4724 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4725 ((u32 *)&stats_flags)[0]);
4726 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4727 ((u32 *)&stats_flags)[1]);
4729 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4730 ((u32 *)&stats_flags)[0]);
4731 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4732 ((u32 *)&stats_flags)[1]);
4734 REG_WR(bp, BAR_XSTRORM_INTMEM +
4735 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737 REG_WR(bp, BAR_XSTRORM_INTMEM +
4738 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4741 REG_WR(bp, BAR_TSTRORM_INTMEM +
4742 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744 REG_WR(bp, BAR_TSTRORM_INTMEM +
4745 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4748 if (CHIP_IS_E1H(bp)) {
4749 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4751 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4753 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4755 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4758 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4762 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4764 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765 SGE_PAGE_SIZE * PAGES_PER_SGE),
4767 for_each_queue(bp, i) {
4768 struct bnx2x_fastpath *fp = &bp->fp[i];
4770 REG_WR(bp, BAR_USTRORM_INTMEM +
4771 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772 U64_LO(fp->rx_comp_mapping));
4773 REG_WR(bp, BAR_USTRORM_INTMEM +
4774 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775 U64_HI(fp->rx_comp_mapping));
4777 REG_WR16(bp, BAR_USTRORM_INTMEM +
4778 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4783 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4785 switch (load_code) {
4786 case FW_MSG_CODE_DRV_LOAD_COMMON:
4787 bnx2x_init_internal_common(bp);
4790 case FW_MSG_CODE_DRV_LOAD_PORT:
4791 bnx2x_init_internal_port(bp);
4794 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795 bnx2x_init_internal_func(bp);
4799 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4804 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4808 for_each_queue(bp, i) {
4809 struct bnx2x_fastpath *fp = &bp->fp[i];
4812 fp->state = BNX2X_FP_STATE_CLOSED;
4814 fp->cl_id = BP_L_ID(bp) + i;
4815 fp->sb_id = fp->cl_id;
4817 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4818 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4819 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4821 bnx2x_update_fpsb_idx(fp);
4824 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4826 bnx2x_update_dsb_idx(bp);
4827 bnx2x_update_coalesce(bp);
4828 bnx2x_init_rx_rings(bp);
4829 bnx2x_init_tx_ring(bp);
4830 bnx2x_init_sp_ring(bp);
4831 bnx2x_init_context(bp);
4832 bnx2x_init_internal(bp, load_code);
4833 bnx2x_init_ind_table(bp);
4834 bnx2x_stats_init(bp);
4836 /* At this point, we are ready for interrupts */
4837 atomic_set(&bp->intr_sem, 0);
4839 /* flush all before enabling interrupts */
4843 bnx2x_int_enable(bp);
4846 /* end of nic init */
4849 * gzip service functions
4852 static int bnx2x_gunzip_init(struct bnx2x *bp)
4854 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855 &bp->gunzip_mapping);
4856 if (bp->gunzip_buf == NULL)
4859 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860 if (bp->strm == NULL)
4863 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4865 if (bp->strm->workspace == NULL)
4875 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876 bp->gunzip_mapping);
4877 bp->gunzip_buf = NULL;
4880 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4881 " un-compression\n", bp->dev->name);
4885 static void bnx2x_gunzip_end(struct bnx2x *bp)
4887 kfree(bp->strm->workspace);
4892 if (bp->gunzip_buf) {
4893 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894 bp->gunzip_mapping);
4895 bp->gunzip_buf = NULL;
4899 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4903 /* check gzip header */
4904 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4911 if (zbuf[3] & FNAME)
4912 while ((zbuf[n++] != 0) && (n < len));
4914 bp->strm->next_in = zbuf + n;
4915 bp->strm->avail_in = len - n;
4916 bp->strm->next_out = bp->gunzip_buf;
4917 bp->strm->avail_out = FW_BUF_SIZE;
4919 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4923 rc = zlib_inflate(bp->strm, Z_FINISH);
4924 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926 bp->dev->name, bp->strm->msg);
4928 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929 if (bp->gunzip_outlen & 0x3)
4930 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931 " gunzip_outlen (%d) not aligned\n",
4932 bp->dev->name, bp->gunzip_outlen);
4933 bp->gunzip_outlen >>= 2;
4935 zlib_inflateEnd(bp->strm);
4937 if (rc == Z_STREAM_END)
4943 /* nic load/unload */
4946 * General service functions
4949 /* send a NIG loopback debug packet */
4950 static void bnx2x_lb_pckt(struct bnx2x *bp)
4954 /* Ethernet source and destination addresses */
4955 wb_write[0] = 0x55555555;
4956 wb_write[1] = 0x55555555;
4957 wb_write[2] = 0x20; /* SOP */
4958 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4960 /* NON-IP protocol */
4961 wb_write[0] = 0x09000000;
4962 wb_write[1] = 0x55555555;
4963 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4964 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4967 /* some of the internal memories
4968 * are not directly readable from the driver
4969 * to test them we send debug packets
4971 static int bnx2x_int_mem_test(struct bnx2x *bp)
4977 if (CHIP_REV_IS_FPGA(bp))
4979 else if (CHIP_REV_IS_EMUL(bp))
4984 DP(NETIF_MSG_HW, "start part1\n");
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4995 /* send Ethernet packet */
4998 /* TODO do i reset NIG statistic? */
4999 /* Wait until NIG register shows 1 packet of size 0x10 */
5000 count = 1000 * factor;
5003 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004 val = *bnx2x_sp(bp, wb_data[0]);
5012 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5016 /* Wait until PRS register shows 1 packet */
5017 count = 1000 * factor;
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5027 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5031 /* Reset and init BRB, PRS */
5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5034 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5036 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5039 DP(NETIF_MSG_HW, "part2\n");
5041 /* Disable inputs of parser neighbor blocks */
5042 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5045 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5047 /* Write 0 to parser credits for CFC search request */
5048 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5050 /* send 10 Ethernet packets */
5051 for (i = 0; i < 10; i++)
5054 /* Wait until NIG register shows 10 + 1
5055 packets of size 11*0x10 = 0xb0 */
5056 count = 1000 * factor;
5059 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060 val = *bnx2x_sp(bp, wb_data[0]);
5068 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5072 /* Wait until PRS register shows 2 packets */
5073 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5075 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5077 /* Write 1 to parser credits for CFC search request */
5078 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5080 /* Wait until PRS register shows 3 packets */
5081 msleep(10 * factor);
5082 /* Wait until NIG register shows 1 packet of size 0x10 */
5083 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5085 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5087 /* clear NIG EOP FIFO */
5088 for (i = 0; i < 11; i++)
5089 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5092 BNX2X_ERR("clear of NIG failed\n");
5096 /* Reset and init BRB, PRS, NIG */
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5101 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5105 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5108 /* Enable inputs of parser neighbor blocks */
5109 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5112 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5114 DP(NETIF_MSG_HW, "done\n");
5119 static void enable_blocks_attention(struct bnx2x *bp)
5121 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5130 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5132 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5135 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5137 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5141 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143 if (CHIP_REV_IS_FPGA(bp))
5144 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5146 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5147 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5150 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5152 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5154 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5159 static void bnx2x_reset_common(struct bnx2x *bp)
5162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5164 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5167 static int bnx2x_init_common(struct bnx2x *bp)
5171 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5173 bnx2x_reset_common(bp);
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5177 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178 if (CHIP_IS_E1H(bp))
5179 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5181 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5183 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5185 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186 if (CHIP_IS_E1(bp)) {
5187 /* enable HW interrupt from PXP on USDM overflow
5188 bit 16 on INT_MASK_0 */
5189 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5192 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5196 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5202 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5209 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5211 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5216 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5219 /* let the HW do it's magic ... */
5221 /* finish PXP init */
5222 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5224 BNX2X_ERR("PXP2 CFG failed\n");
5227 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5229 BNX2X_ERR("PXP2 RD_INIT failed\n");
5233 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5236 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5238 /* clean the DMAE memory */
5240 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5242 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5247 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5252 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253 /* soft reset pulse */
5254 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5258 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5261 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263 if (!CHIP_REV_IS_SLOW(bp)) {
5264 /* enable hw interrupt from doorbell Q */
5265 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5268 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269 if (CHIP_REV_IS_SLOW(bp)) {
5270 /* fix for emulation and FPGA for no pause */
5271 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5277 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5278 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5280 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5281 if (CHIP_IS_E1H(bp))
5282 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5284 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5289 if (CHIP_IS_E1H(bp)) {
5290 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291 STORM_INTMEM_SIZE_E1H/2);
5293 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294 0, STORM_INTMEM_SIZE_E1H/2);
5295 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296 STORM_INTMEM_SIZE_E1H/2);
5298 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299 0, STORM_INTMEM_SIZE_E1H/2);
5300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5303 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5308 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5311 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312 STORM_INTMEM_SIZE_E1);
5313 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314 STORM_INTMEM_SIZE_E1);
5315 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1);
5317 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318 STORM_INTMEM_SIZE_E1);
5321 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5327 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5332 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5336 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338 REG_WR(bp, i, 0xc0cac01a);
5339 /* TODO: replace with something meaningful */
5341 if (CHIP_IS_E1H(bp))
5342 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5343 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5345 if (sizeof(union cdu_context) != 1024)
5346 /* we currently assume that a context is 1024 bytes */
5347 printk(KERN_ALERT PFX "please adjust the size of"
5348 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5350 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5351 val = (4 << 24) + (0 << 12) + 1024;
5352 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5353 if (CHIP_IS_E1(bp)) {
5354 /* !!! fix pxp client crdit until excel update */
5355 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5356 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5359 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5360 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5362 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5363 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5365 /* PXPCS COMMON comes here */
5366 /* Reset PCIE errors for debug */
5367 REG_WR(bp, 0x2814, 0xffffffff);
5368 REG_WR(bp, 0x3820, 0xffffffff);
5370 /* EMAC0 COMMON comes here */
5371 /* EMAC1 COMMON comes here */
5372 /* DBU COMMON comes here */
5373 /* DBG COMMON comes here */
5375 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5376 if (CHIP_IS_E1H(bp)) {
5377 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5378 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5381 if (CHIP_REV_IS_SLOW(bp))
5384 /* finish CFC init */
5385 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5387 BNX2X_ERR("CFC LL_INIT failed\n");
5390 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5392 BNX2X_ERR("CFC AC_INIT failed\n");
5395 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5397 BNX2X_ERR("CFC CAM_INIT failed\n");
5400 REG_WR(bp, CFC_REG_DEBUG0, 0);
5402 /* read NIG statistic
5403 to see if this is our first up since powerup */
5404 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405 val = *bnx2x_sp(bp, wb_data[0]);
5407 /* do internal memory self test */
5408 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5409 BNX2X_ERR("internal mem self test failed\n");
5413 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5414 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5415 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5416 /* Fan failure is indicated by SPIO 5 */
5417 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5418 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5420 /* set to active low mode */
5421 val = REG_RD(bp, MISC_REG_SPIO_INT);
5422 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5423 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5424 REG_WR(bp, MISC_REG_SPIO_INT, val);
5426 /* enable interrupt to signal the IGU */
5427 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5428 val |= (1 << MISC_REGISTERS_SPIO_5);
5429 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5436 /* clear PXP2 attentions */
5437 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5439 enable_blocks_attention(bp);
5441 if (!BP_NOMCP(bp)) {
5442 bnx2x_acquire_phy_lock(bp);
5443 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5444 bnx2x_release_phy_lock(bp);
5446 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5451 static int bnx2x_init_port(struct bnx2x *bp)
5453 int port = BP_PORT(bp);
5456 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5458 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5460 /* Port PXP comes here */
5461 /* Port PXP2 comes here */
5466 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5467 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5468 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5474 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5475 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5476 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5482 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5483 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5484 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5485 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5487 /* Port CMs come here */
5489 /* Port QM comes here */
5491 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5492 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5494 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5495 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5497 /* Port DQ comes here */
5498 /* Port BRB1 comes here */
5499 /* Port PRS comes here */
5500 /* Port TSDM comes here */
5501 /* Port CSDM comes here */
5502 /* Port USDM comes here */
5503 /* Port XSDM comes here */
5504 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5505 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5506 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5507 port ? USEM_PORT1_END : USEM_PORT0_END);
5508 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5509 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5510 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5511 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5512 /* Port UPB comes here */
5513 /* Port XPB comes here */
5515 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5516 port ? PBF_PORT1_END : PBF_PORT0_END);
5518 /* configure PBF to work without PAUSE mtu 9000 */
5519 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5521 /* update threshold */
5522 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5523 /* update init credit */
5524 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5527 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5529 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5532 /* tell the searcher where the T2 table is */
5533 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5535 wb_write[0] = U64_LO(bp->t2_mapping);
5536 wb_write[1] = U64_HI(bp->t2_mapping);
5537 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5538 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5539 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5540 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5542 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5543 /* Port SRCH comes here */
5545 /* Port CDU comes here */
5546 /* Port CFC comes here */
5548 if (CHIP_IS_E1(bp)) {
5549 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5550 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5552 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5553 port ? HC_PORT1_END : HC_PORT0_END);
5555 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5556 MISC_AEU_PORT0_START,
5557 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5558 /* init aeu_mask_attn_func_0/1:
5559 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5560 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5561 * bits 4-7 are used for "per vn group attention" */
5562 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5563 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5565 /* Port PXPCS comes here */
5566 /* Port EMAC0 comes here */
5567 /* Port EMAC1 comes here */
5568 /* Port DBU comes here */
5569 /* Port DBG comes here */
5570 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5571 port ? NIG_PORT1_END : NIG_PORT0_END);
5573 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5575 if (CHIP_IS_E1H(bp)) {
5577 struct cmng_struct_per_port m_cmng_port;
5580 /* 0x2 disable e1hov, 0x1 enable */
5581 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5582 (IS_E1HMF(bp) ? 0x1 : 0x2));
5584 /* Init RATE SHAPING and FAIRNESS contexts.
5585 Initialize as if there is 10G link. */
5586 wsum = bnx2x_calc_vn_wsum(bp);
5587 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5589 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5590 bnx2x_init_vn_minmax(bp, 2*vn + port,
5591 wsum, 10000, &m_cmng_port);
5594 /* Port MCP comes here */
5595 /* Port DMAE comes here */
5597 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5598 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5599 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5600 /* add SPIO 5 to group 0 */
5601 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5602 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5603 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5610 bnx2x__link_reset(bp);
5615 #define ILT_PER_FUNC (768/2)
5616 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5617 /* the phys address is shifted right 12 bits and has an added
5618 1=valid bit added to the 53rd bit
5619 then since this is a wide register(TM)
5620 we split it into two 32 bit writes
5622 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5623 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5624 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5625 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5627 #define CNIC_ILT_LINES 0
5629 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5633 if (CHIP_IS_E1H(bp))
5634 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5636 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5638 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5641 static int bnx2x_init_func(struct bnx2x *bp)
5643 int port = BP_PORT(bp);
5644 int func = BP_FUNC(bp);
5647 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5649 i = FUNC_ILT_BASE(func);
5651 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5652 if (CHIP_IS_E1H(bp)) {
5653 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5654 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5656 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5657 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5660 if (CHIP_IS_E1H(bp)) {
5661 for (i = 0; i < 9; i++)
5662 bnx2x_init_block(bp,
5663 cm_start[func][i], cm_end[func][i]);
5665 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5666 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5669 /* HC init per function */
5670 if (CHIP_IS_E1H(bp)) {
5671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5673 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5674 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5676 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5678 if (CHIP_IS_E1H(bp))
5679 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5681 /* Reset PCIE errors for debug */
5682 REG_WR(bp, 0x2114, 0xffffffff);
5683 REG_WR(bp, 0x2120, 0xffffffff);
5688 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5692 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5693 BP_FUNC(bp), load_code);
5696 mutex_init(&bp->dmae_mutex);
5697 bnx2x_gunzip_init(bp);
5699 switch (load_code) {
5700 case FW_MSG_CODE_DRV_LOAD_COMMON:
5701 rc = bnx2x_init_common(bp);
5706 case FW_MSG_CODE_DRV_LOAD_PORT:
5708 rc = bnx2x_init_port(bp);
5713 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5715 rc = bnx2x_init_func(bp);
5721 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5725 if (!BP_NOMCP(bp)) {
5726 int func = BP_FUNC(bp);
5728 bp->fw_drv_pulse_wr_seq =
5729 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5730 DRV_PULSE_SEQ_MASK);
5731 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5732 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5733 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5737 /* this needs to be done before gunzip end */
5738 bnx2x_zero_def_sb(bp);
5739 for_each_queue(bp, i)
5740 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5743 bnx2x_gunzip_end(bp);
5748 /* send the MCP a request, block until there is a reply */
5749 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5751 int func = BP_FUNC(bp);
5752 u32 seq = ++bp->fw_seq;
5755 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5757 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5758 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5761 /* let the FW do it's magic ... */
5764 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5766 /* Give the FW up to 2 second (200*10ms) */
5767 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5769 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5770 cnt*delay, rc, seq);
5772 /* is this a reply to our command? */
5773 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5774 rc &= FW_MSG_CODE_MASK;
5778 BNX2X_ERR("FW failed to respond!\n");
5786 static void bnx2x_free_mem(struct bnx2x *bp)
5789 #define BNX2X_PCI_FREE(x, y, size) \
5792 pci_free_consistent(bp->pdev, size, x, y); \
5798 #define BNX2X_FREE(x) \
5809 for_each_queue(bp, i) {
5812 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5813 bnx2x_fp(bp, i, status_blk_mapping),
5814 sizeof(struct host_status_block) +
5815 sizeof(struct eth_tx_db_data));
5817 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5818 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5819 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5820 bnx2x_fp(bp, i, tx_desc_mapping),
5821 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5823 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5824 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5825 bnx2x_fp(bp, i, rx_desc_mapping),
5826 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5828 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5829 bnx2x_fp(bp, i, rx_comp_mapping),
5830 sizeof(struct eth_fast_path_rx_cqe) *
5834 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5835 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5836 bnx2x_fp(bp, i, rx_sge_mapping),
5837 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5839 /* end of fastpath */
5841 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5842 sizeof(struct host_def_status_block));
5844 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5845 sizeof(struct bnx2x_slowpath));
5848 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5849 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5850 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5851 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5853 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5855 #undef BNX2X_PCI_FREE
5859 static int bnx2x_alloc_mem(struct bnx2x *bp)
5862 #define BNX2X_PCI_ALLOC(x, y, size) \
5864 x = pci_alloc_consistent(bp->pdev, size, y); \
5866 goto alloc_mem_err; \
5867 memset(x, 0, size); \
5870 #define BNX2X_ALLOC(x, size) \
5872 x = vmalloc(size); \
5874 goto alloc_mem_err; \
5875 memset(x, 0, size); \
5881 for_each_queue(bp, i) {
5882 bnx2x_fp(bp, i, bp) = bp;
5885 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5886 &bnx2x_fp(bp, i, status_blk_mapping),
5887 sizeof(struct host_status_block) +
5888 sizeof(struct eth_tx_db_data));
5890 bnx2x_fp(bp, i, hw_tx_prods) =
5891 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5893 bnx2x_fp(bp, i, tx_prods_mapping) =
5894 bnx2x_fp(bp, i, status_blk_mapping) +
5895 sizeof(struct host_status_block);
5897 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5898 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5899 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5900 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5901 &bnx2x_fp(bp, i, tx_desc_mapping),
5902 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5904 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5905 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5906 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5907 &bnx2x_fp(bp, i, rx_desc_mapping),
5908 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5911 &bnx2x_fp(bp, i, rx_comp_mapping),
5912 sizeof(struct eth_fast_path_rx_cqe) *
5916 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5917 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5918 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5919 &bnx2x_fp(bp, i, rx_sge_mapping),
5920 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5922 /* end of fastpath */
5924 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5925 sizeof(struct host_def_status_block));
5927 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5928 sizeof(struct bnx2x_slowpath));
5931 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5934 for (i = 0; i < 64*1024; i += 64) {
5935 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5936 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5939 /* allocate searcher T2 table
5940 we allocate 1/4 of alloc num for T2
5941 (which is not entered into the ILT) */
5942 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5945 for (i = 0; i < 16*1024; i += 64)
5946 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5948 /* now fixup the last line in the block to point to the next block */
5949 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5951 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5952 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5954 /* QM queues (128*MAX_CONN) */
5955 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5958 /* Slow path ring */
5959 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5967 #undef BNX2X_PCI_ALLOC
5971 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5975 for_each_queue(bp, i) {
5976 struct bnx2x_fastpath *fp = &bp->fp[i];
5978 u16 bd_cons = fp->tx_bd_cons;
5979 u16 sw_prod = fp->tx_pkt_prod;
5980 u16 sw_cons = fp->tx_pkt_cons;
5982 while (sw_cons != sw_prod) {
5983 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5989 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5993 for_each_queue(bp, j) {
5994 struct bnx2x_fastpath *fp = &bp->fp[j];
5996 for (i = 0; i < NUM_RX_BD; i++) {
5997 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5998 struct sk_buff *skb = rx_buf->skb;
6003 pci_unmap_single(bp->pdev,
6004 pci_unmap_addr(rx_buf, mapping),
6006 PCI_DMA_FROMDEVICE);
6011 if (!fp->disable_tpa)
6012 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6013 ETH_MAX_AGGREGATION_QUEUES_E1 :
6014 ETH_MAX_AGGREGATION_QUEUES_E1H);
6018 static void bnx2x_free_skbs(struct bnx2x *bp)
6020 bnx2x_free_tx_skbs(bp);
6021 bnx2x_free_rx_skbs(bp);
6024 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6028 free_irq(bp->msix_table[0].vector, bp->dev);
6029 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6030 bp->msix_table[0].vector);
6032 for_each_queue(bp, i) {
6033 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6034 "state %x\n", i, bp->msix_table[i + offset].vector,
6035 bnx2x_fp(bp, i, state));
6037 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6038 BNX2X_ERR("IRQ of fp #%d being freed while "
6039 "state != closed\n", i);
6041 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6045 static void bnx2x_free_irq(struct bnx2x *bp)
6047 if (bp->flags & USING_MSIX_FLAG) {
6048 bnx2x_free_msix_irqs(bp);
6049 pci_disable_msix(bp->pdev);
6050 bp->flags &= ~USING_MSIX_FLAG;
6053 free_irq(bp->pdev->irq, bp->dev);
6056 static int bnx2x_enable_msix(struct bnx2x *bp)
6060 bp->msix_table[0].entry = 0;
6062 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6064 for_each_queue(bp, i) {
6065 int igu_vec = offset + i + BP_L_ID(bp);
6067 bp->msix_table[i + offset].entry = igu_vec;
6068 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6069 "(fastpath #%u)\n", i + offset, igu_vec, i);
6072 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6073 bp->num_queues + offset);
6075 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6078 bp->flags |= USING_MSIX_FLAG;
6083 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6085 int i, rc, offset = 1;
6087 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6088 bp->dev->name, bp->dev);
6090 BNX2X_ERR("request sp irq failed\n");
6094 for_each_queue(bp, i) {
6095 rc = request_irq(bp->msix_table[i + offset].vector,
6096 bnx2x_msix_fp_int, 0,
6097 bp->dev->name, &bp->fp[i]);
6099 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6101 bnx2x_free_msix_irqs(bp);
6105 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6111 static int bnx2x_req_irq(struct bnx2x *bp)
6115 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6116 bp->dev->name, bp->dev);
6118 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6123 static void bnx2x_napi_enable(struct bnx2x *bp)
6127 for_each_queue(bp, i)
6128 napi_enable(&bnx2x_fp(bp, i, napi));
6131 static void bnx2x_napi_disable(struct bnx2x *bp)
6135 for_each_queue(bp, i)
6136 napi_disable(&bnx2x_fp(bp, i, napi));
6139 static void bnx2x_netif_start(struct bnx2x *bp)
6141 if (atomic_dec_and_test(&bp->intr_sem)) {
6142 if (netif_running(bp->dev)) {
6143 if (bp->state == BNX2X_STATE_OPEN)
6144 netif_wake_queue(bp->dev);
6145 bnx2x_napi_enable(bp);
6146 bnx2x_int_enable(bp);
6151 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6153 bnx2x_int_disable_sync(bp, disable_hw);
6154 bnx2x_napi_disable(bp);
6155 if (netif_running(bp->dev)) {
6156 netif_tx_disable(bp->dev);
6157 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6162 * Init service functions
6165 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6167 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6168 int port = BP_PORT(bp);
6171 * unicasts 0-31:port0 32-63:port1
6172 * multicast 64-127:port0 128-191:port1
6174 config->hdr.length_6b = 2;
6175 config->hdr.offset = port ? 32 : 0;
6176 config->hdr.client_id = BP_CL_ID(bp);
6177 config->hdr.reserved1 = 0;
6180 config->config_table[0].cam_entry.msb_mac_addr =
6181 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6182 config->config_table[0].cam_entry.middle_mac_addr =
6183 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6184 config->config_table[0].cam_entry.lsb_mac_addr =
6185 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6186 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6188 config->config_table[0].target_table_entry.flags = 0;
6190 CAM_INVALIDATE(config->config_table[0]);
6191 config->config_table[0].target_table_entry.client_id = 0;
6192 config->config_table[0].target_table_entry.vlan_id = 0;
6194 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6195 (set ? "setting" : "clearing"),
6196 config->config_table[0].cam_entry.msb_mac_addr,
6197 config->config_table[0].cam_entry.middle_mac_addr,
6198 config->config_table[0].cam_entry.lsb_mac_addr);
6201 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6202 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6203 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6204 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6206 config->config_table[1].target_table_entry.flags =
6207 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6209 CAM_INVALIDATE(config->config_table[1]);
6210 config->config_table[1].target_table_entry.client_id = 0;
6211 config->config_table[1].target_table_entry.vlan_id = 0;
6213 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6214 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6215 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6218 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6220 struct mac_configuration_cmd_e1h *config =
6221 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6223 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6224 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6228 /* CAM allocation for E1H
6229 * unicasts: by func number
6230 * multicast: 20+FUNC*20, 20 each
6232 config->hdr.length_6b = 1;
6233 config->hdr.offset = BP_FUNC(bp);
6234 config->hdr.client_id = BP_CL_ID(bp);
6235 config->hdr.reserved1 = 0;
6238 config->config_table[0].msb_mac_addr =
6239 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6240 config->config_table[0].middle_mac_addr =
6241 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6242 config->config_table[0].lsb_mac_addr =
6243 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6244 config->config_table[0].client_id = BP_L_ID(bp);
6245 config->config_table[0].vlan_id = 0;
6246 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6248 config->config_table[0].flags = BP_PORT(bp);
6250 config->config_table[0].flags =
6251 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6253 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6254 (set ? "setting" : "clearing"),
6255 config->config_table[0].msb_mac_addr,
6256 config->config_table[0].middle_mac_addr,
6257 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6260 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6261 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6264 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6265 int *state_p, int poll)
6267 /* can take a while if any port is running */
6270 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6271 poll ? "polling" : "waiting", state, idx);
6276 bnx2x_rx_int(bp->fp, 10);
6277 /* if index is different from 0
6278 * the reply for some commands will
6279 * be on the non default queue
6282 bnx2x_rx_int(&bp->fp[idx], 10);
6285 mb(); /* state is changed by bnx2x_sp_event() */
6286 if (*state_p == state)
6293 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6294 poll ? "polling" : "waiting", state, idx);
6295 #ifdef BNX2X_STOP_ON_ERROR
6302 static int bnx2x_setup_leading(struct bnx2x *bp)
6306 /* reset IGU state */
6307 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6310 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6312 /* Wait for completion */
6313 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6318 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6320 /* reset IGU state */
6321 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6324 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6325 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6327 /* Wait for completion */
6328 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6329 &(bp->fp[index].state), 0);
6332 static int bnx2x_poll(struct napi_struct *napi, int budget);
6333 static void bnx2x_set_rx_mode(struct net_device *dev);
6335 /* must be called with rtnl_lock */
6336 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6340 #ifdef BNX2X_STOP_ON_ERROR
6341 if (unlikely(bp->panic))
6345 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6351 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6352 /* user requested number */
6353 bp->num_queues = use_multi;
6356 bp->num_queues = min_t(u32, num_online_cpus(),
6362 "set number of queues to %d\n", bp->num_queues);
6364 /* if we can't use MSI-X we only need one fp,
6365 * so try to enable MSI-X with the requested number of fp's
6366 * and fallback to MSI or legacy INTx with one fp
6368 rc = bnx2x_enable_msix(bp);
6370 /* failed to enable MSI-X */
6373 BNX2X_ERR("Multi requested but failed"
6374 " to enable MSI-X\n");
6378 if (bnx2x_alloc_mem(bp))
6381 for_each_queue(bp, i)
6382 bnx2x_fp(bp, i, disable_tpa) =
6383 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6385 for_each_queue(bp, i)
6386 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6389 #ifdef BNX2X_STOP_ON_ERROR
6390 for_each_queue(bp, i) {
6391 struct bnx2x_fastpath *fp = &bp->fp[i];
6393 fp->poll_no_work = 0;
6395 fp->poll_max_calls = 0;
6396 fp->poll_complete = 0;
6400 bnx2x_napi_enable(bp);
6402 if (bp->flags & USING_MSIX_FLAG) {
6403 rc = bnx2x_req_msix_irqs(bp);
6405 pci_disable_msix(bp->pdev);
6408 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6411 rc = bnx2x_req_irq(bp);
6413 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6418 /* Send LOAD_REQUEST command to MCP
6419 Returns the type of LOAD command:
6420 if it is the first port to be initialized
6421 common blocks should be initialized, otherwise - not
6423 if (!BP_NOMCP(bp)) {
6424 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6426 BNX2X_ERR("MCP response failure, aborting\n");
6430 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6431 rc = -EBUSY; /* other port in diagnostic mode */
6436 int port = BP_PORT(bp);
6438 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6439 load_count[0], load_count[1], load_count[2]);
6441 load_count[1 + port]++;
6442 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6443 load_count[0], load_count[1], load_count[2]);
6444 if (load_count[0] == 1)
6445 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6446 else if (load_count[1 + port] == 1)
6447 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6449 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6452 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6453 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6457 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6460 rc = bnx2x_init_hw(bp, load_code);
6462 BNX2X_ERR("HW init failed, aborting\n");
6466 /* Setup NIC internals and enable interrupts */
6467 bnx2x_nic_init(bp, load_code);
6469 /* Send LOAD_DONE command to MCP */
6470 if (!BP_NOMCP(bp)) {
6471 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6473 BNX2X_ERR("MCP response failure, aborting\n");
6479 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6481 rc = bnx2x_setup_leading(bp);
6483 BNX2X_ERR("Setup leading failed!\n");
6487 if (CHIP_IS_E1H(bp))
6488 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6489 BNX2X_ERR("!!! mf_cfg function disabled\n");
6490 bp->state = BNX2X_STATE_DISABLED;
6493 if (bp->state == BNX2X_STATE_OPEN)
6494 for_each_nondefault_queue(bp, i) {
6495 rc = bnx2x_setup_multi(bp, i);
6501 bnx2x_set_mac_addr_e1(bp, 1);
6503 bnx2x_set_mac_addr_e1h(bp, 1);
6506 bnx2x_initial_phy_init(bp);
6508 /* Start fast path */
6509 switch (load_mode) {
6511 /* Tx queue should be only reenabled */
6512 netif_wake_queue(bp->dev);
6513 /* Initialize the receive filter. */
6514 bnx2x_set_rx_mode(bp->dev);
6518 netif_start_queue(bp->dev);
6519 /* Initialize the receive filter. */
6520 bnx2x_set_rx_mode(bp->dev);
6524 /* Initialize the receive filter. */
6525 bnx2x_set_rx_mode(bp->dev);
6526 bp->state = BNX2X_STATE_DIAG;
6534 bnx2x__link_status_update(bp);
6536 /* start the timer */
6537 mod_timer(&bp->timer, jiffies + bp->current_interval);
6543 bnx2x_int_disable_sync(bp, 1);
6544 if (!BP_NOMCP(bp)) {
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6546 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6549 /* Free SKBs, SGEs, TPA pool and driver internals */
6550 bnx2x_free_skbs(bp);
6551 for_each_queue(bp, i)
6552 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6557 bnx2x_napi_disable(bp);
6558 for_each_queue(bp, i)
6559 netif_napi_del(&bnx2x_fp(bp, i, napi));
6562 /* TBD we really need to reset the chip
6563 if we want to recover from this */
6567 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6571 /* halt the connection */
6572 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6573 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6575 /* Wait for completion */
6576 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6577 &(bp->fp[index].state), 1);
6578 if (rc) /* timeout */
6581 /* delete cfc entry */
6582 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6584 /* Wait for completion */
6585 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6586 &(bp->fp[index].state), 1);
6590 static int bnx2x_stop_leading(struct bnx2x *bp)
6592 u16 dsb_sp_prod_idx;
6593 /* if the other port is handling traffic,
6594 this can take a lot of time */
6600 /* Send HALT ramrod */
6601 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6602 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6604 /* Wait for completion */
6605 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6606 &(bp->fp[0].state), 1);
6607 if (rc) /* timeout */
6610 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6612 /* Send PORT_DELETE ramrod */
6613 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6615 /* Wait for completion to arrive on default status block
6616 we are going to reset the chip anyway
6617 so there is not much to do if this times out
6619 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6621 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6622 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6623 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6624 #ifdef BNX2X_STOP_ON_ERROR
6633 rmb(); /* Refresh the dsb_sp_prod */
6635 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6636 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6641 static void bnx2x_reset_func(struct bnx2x *bp)
6643 int port = BP_PORT(bp);
6644 int func = BP_FUNC(bp);
6648 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6649 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6651 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6654 base = FUNC_ILT_BASE(func);
6655 for (i = base; i < base + ILT_PER_FUNC; i++)
6656 bnx2x_ilt_wr(bp, i, 0);
6659 static void bnx2x_reset_port(struct bnx2x *bp)
6661 int port = BP_PORT(bp);
6664 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6666 /* Do not rcv packets to BRB */
6667 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6668 /* Do not direct rcv packets that are not for MCP to the BRB */
6669 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6670 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6673 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6676 /* Check for BRB port occupancy */
6677 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6679 DP(NETIF_MSG_IFDOWN,
6680 "BRB1 is not empty %d blocks are occupied\n", val);
6682 /* TODO: Close Doorbell port? */
6685 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6687 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6688 BP_FUNC(bp), reset_code);
6690 switch (reset_code) {
6691 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6692 bnx2x_reset_port(bp);
6693 bnx2x_reset_func(bp);
6694 bnx2x_reset_common(bp);
6697 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6698 bnx2x_reset_port(bp);
6699 bnx2x_reset_func(bp);
6702 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6703 bnx2x_reset_func(bp);
6707 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6712 /* must be called with rtnl_lock */
6713 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6715 int port = BP_PORT(bp);
6719 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6721 bp->rx_mode = BNX2X_RX_MODE_NONE;
6722 bnx2x_set_storm_rx_mode(bp);
6724 bnx2x_netif_stop(bp, 1);
6726 del_timer_sync(&bp->timer);
6727 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6728 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6729 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6734 /* Wait until tx fast path tasks complete */
6735 for_each_queue(bp, i) {
6736 struct bnx2x_fastpath *fp = &bp->fp[i];
6740 while (bnx2x_has_tx_work_unload(fp)) {
6742 bnx2x_tx_int(fp, 1000);
6744 BNX2X_ERR("timeout waiting for queue[%d]\n",
6746 #ifdef BNX2X_STOP_ON_ERROR
6758 /* Give HW time to discard old tx messages */
6761 if (CHIP_IS_E1(bp)) {
6762 struct mac_configuration_cmd *config =
6763 bnx2x_sp(bp, mcast_config);
6765 bnx2x_set_mac_addr_e1(bp, 0);
6767 for (i = 0; i < config->hdr.length_6b; i++)
6768 CAM_INVALIDATE(config->config_table[i]);
6770 config->hdr.length_6b = i;
6771 if (CHIP_REV_IS_SLOW(bp))
6772 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6774 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6775 config->hdr.client_id = BP_CL_ID(bp);
6776 config->hdr.reserved1 = 0;
6778 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6779 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6780 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6783 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6785 bnx2x_set_mac_addr_e1h(bp, 0);
6787 for (i = 0; i < MC_HASH_SIZE; i++)
6788 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6791 if (unload_mode == UNLOAD_NORMAL)
6792 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6794 else if (bp->flags & NO_WOL_FLAG) {
6795 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6796 if (CHIP_IS_E1H(bp))
6797 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6799 } else if (bp->wol) {
6800 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6801 u8 *mac_addr = bp->dev->dev_addr;
6803 /* The mac address is written to entries 1-4 to
6804 preserve entry 0 which is used by the PMF */
6805 u8 entry = (BP_E1HVN(bp) + 1)*8;
6807 val = (mac_addr[0] << 8) | mac_addr[1];
6808 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6810 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6811 (mac_addr[4] << 8) | mac_addr[5];
6812 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6814 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6817 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6819 /* Close multi and leading connections
6820 Completions for ramrods are collected in a synchronous way */
6821 for_each_nondefault_queue(bp, i)
6822 if (bnx2x_stop_multi(bp, i))
6825 rc = bnx2x_stop_leading(bp);
6827 BNX2X_ERR("Stop leading failed!\n");
6828 #ifdef BNX2X_STOP_ON_ERROR
6837 reset_code = bnx2x_fw_command(bp, reset_code);
6839 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6840 load_count[0], load_count[1], load_count[2]);
6842 load_count[1 + port]--;
6843 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6844 load_count[0], load_count[1], load_count[2]);
6845 if (load_count[0] == 0)
6846 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6847 else if (load_count[1 + port] == 0)
6848 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6850 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6853 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6854 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6855 bnx2x__link_reset(bp);
6857 /* Reset the chip */
6858 bnx2x_reset_chip(bp, reset_code);
6860 /* Report UNLOAD_DONE to MCP */
6862 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6865 /* Free SKBs, SGEs, TPA pool and driver internals */
6866 bnx2x_free_skbs(bp);
6867 for_each_queue(bp, i)
6868 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6869 for_each_queue(bp, i)
6870 netif_napi_del(&bnx2x_fp(bp, i, napi));
6873 bp->state = BNX2X_STATE_CLOSED;
6875 netif_carrier_off(bp->dev);
6880 static void bnx2x_reset_task(struct work_struct *work)
6882 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6884 #ifdef BNX2X_STOP_ON_ERROR
6885 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6886 " so reset not done to allow debug dump,\n"
6887 KERN_ERR " you will need to reboot when done\n");
6893 if (!netif_running(bp->dev))
6894 goto reset_task_exit;
6896 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6897 bnx2x_nic_load(bp, LOAD_NORMAL);
6903 /* end of nic load/unload */
6908 * Init service functions
6911 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6915 /* Check if there is any driver already loaded */
6916 val = REG_RD(bp, MISC_REG_UNPREPARED);
6918 /* Check if it is the UNDI driver
6919 * UNDI driver initializes CID offset for normal bell to 0x7
6921 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6922 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6924 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6926 int func = BP_FUNC(bp);
6930 /* clear the UNDI indication */
6931 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6933 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6935 /* try unload UNDI on port 0 */
6938 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6939 DRV_MSG_SEQ_NUMBER_MASK);
6940 reset_code = bnx2x_fw_command(bp, reset_code);
6942 /* if UNDI is loaded on the other port */
6943 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6945 /* send "DONE" for previous unload */
6946 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6948 /* unload UNDI on port 1 */
6951 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6952 DRV_MSG_SEQ_NUMBER_MASK);
6953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6955 bnx2x_fw_command(bp, reset_code);
6958 /* now it's safe to release the lock */
6959 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6961 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6962 HC_REG_CONFIG_0), 0x1000);
6964 /* close input traffic and wait for it */
6965 /* Do not rcv packets to BRB */
6967 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6968 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6969 /* Do not direct rcv packets that are not for MCP to
6972 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6973 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6976 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6977 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6980 /* save NIG port swap info */
6981 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6982 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6985 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6988 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6990 /* take the NIG out of reset and restore swap values */
6992 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6993 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6994 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6995 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6997 /* send unload done to the MCP */
6998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7000 /* restore our func and fw_seq */
7003 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7004 DRV_MSG_SEQ_NUMBER_MASK);
7007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7011 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7013 u32 val, val2, val3, val4, id;
7016 /* Get the chip revision id and number. */
7017 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7018 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7019 id = ((val & 0xffff) << 16);
7020 val = REG_RD(bp, MISC_REG_CHIP_REV);
7021 id |= ((val & 0xf) << 12);
7022 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7023 id |= ((val & 0xff) << 4);
7024 val = REG_RD(bp, MISC_REG_BOND_ID);
7026 bp->common.chip_id = id;
7027 bp->link_params.chip_id = bp->common.chip_id;
7028 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7030 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7031 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7032 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7033 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7034 bp->common.flash_size, bp->common.flash_size);
7036 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7037 bp->link_params.shmem_base = bp->common.shmem_base;
7038 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7040 if (!bp->common.shmem_base ||
7041 (bp->common.shmem_base < 0xA0000) ||
7042 (bp->common.shmem_base >= 0xC0000)) {
7043 BNX2X_DEV_INFO("MCP not active\n");
7044 bp->flags |= NO_MCP_FLAG;
7048 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7049 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7050 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7051 BNX2X_ERR("BAD MCP validity signature\n");
7053 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7054 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7056 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7057 bp->common.hw_config, bp->common.board);
7059 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7060 SHARED_HW_CFG_LED_MODE_MASK) >>
7061 SHARED_HW_CFG_LED_MODE_SHIFT);
7063 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7064 bp->common.bc_ver = val;
7065 BNX2X_DEV_INFO("bc_ver %X\n", val);
7066 if (val < BNX2X_BC_VER) {
7067 /* for now only warn
7068 * later we might need to enforce this */
7069 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7070 " please upgrade BC\n", BNX2X_BC_VER, val);
7073 if (BP_E1HVN(bp) == 0) {
7074 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7075 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7077 /* no WOL capability for E1HVN != 0 */
7078 bp->flags |= NO_WOL_FLAG;
7080 BNX2X_DEV_INFO("%sWoL capable\n",
7081 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7083 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7084 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7085 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7086 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7088 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7089 val, val2, val3, val4);
7092 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7095 int port = BP_PORT(bp);
7098 switch (switch_cfg) {
7100 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7103 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7104 switch (ext_phy_type) {
7105 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7106 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7109 bp->port.supported |= (SUPPORTED_10baseT_Half |
7110 SUPPORTED_10baseT_Full |
7111 SUPPORTED_100baseT_Half |
7112 SUPPORTED_100baseT_Full |
7113 SUPPORTED_1000baseT_Full |
7114 SUPPORTED_2500baseX_Full |
7119 SUPPORTED_Asym_Pause);
7122 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7123 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7126 bp->port.supported |= (SUPPORTED_10baseT_Half |
7127 SUPPORTED_10baseT_Full |
7128 SUPPORTED_100baseT_Half |
7129 SUPPORTED_100baseT_Full |
7130 SUPPORTED_1000baseT_Full |
7135 SUPPORTED_Asym_Pause);
7139 BNX2X_ERR("NVRAM config error. "
7140 "BAD SerDes ext_phy_config 0x%x\n",
7141 bp->link_params.ext_phy_config);
7145 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7147 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7150 case SWITCH_CFG_10G:
7151 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7154 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7155 switch (ext_phy_type) {
7156 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7157 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7160 bp->port.supported |= (SUPPORTED_10baseT_Half |
7161 SUPPORTED_10baseT_Full |
7162 SUPPORTED_100baseT_Half |
7163 SUPPORTED_100baseT_Full |
7164 SUPPORTED_1000baseT_Full |
7165 SUPPORTED_2500baseX_Full |
7166 SUPPORTED_10000baseT_Full |
7171 SUPPORTED_Asym_Pause);
7174 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7175 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7178 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7181 SUPPORTED_Asym_Pause);
7184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7185 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7188 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7189 SUPPORTED_1000baseT_Full |
7192 SUPPORTED_Asym_Pause);
7195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7196 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7199 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7200 SUPPORTED_1000baseT_Full |
7204 SUPPORTED_Asym_Pause);
7207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7208 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7211 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7212 SUPPORTED_2500baseX_Full |
7213 SUPPORTED_1000baseT_Full |
7217 SUPPORTED_Asym_Pause);
7220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7221 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7224 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7228 SUPPORTED_Asym_Pause);
7231 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7232 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7233 bp->link_params.ext_phy_config);
7237 BNX2X_ERR("NVRAM config error. "
7238 "BAD XGXS ext_phy_config 0x%x\n",
7239 bp->link_params.ext_phy_config);
7243 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7245 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7250 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7251 bp->port.link_config);
7254 bp->link_params.phy_addr = bp->port.phy_addr;
7256 /* mask what we support according to speed_cap_mask */
7257 if (!(bp->link_params.speed_cap_mask &
7258 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7259 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7261 if (!(bp->link_params.speed_cap_mask &
7262 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7263 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7265 if (!(bp->link_params.speed_cap_mask &
7266 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7267 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7269 if (!(bp->link_params.speed_cap_mask &
7270 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7271 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7273 if (!(bp->link_params.speed_cap_mask &
7274 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7275 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7276 SUPPORTED_1000baseT_Full);
7278 if (!(bp->link_params.speed_cap_mask &
7279 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7280 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7282 if (!(bp->link_params.speed_cap_mask &
7283 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7284 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7286 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7289 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7291 bp->link_params.req_duplex = DUPLEX_FULL;
7293 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7294 case PORT_FEATURE_LINK_SPEED_AUTO:
7295 if (bp->port.supported & SUPPORTED_Autoneg) {
7296 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7297 bp->port.advertising = bp->port.supported;
7300 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7302 if ((ext_phy_type ==
7303 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7305 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7306 /* force 10G, no AN */
7307 bp->link_params.req_line_speed = SPEED_10000;
7308 bp->port.advertising =
7309 (ADVERTISED_10000baseT_Full |
7313 BNX2X_ERR("NVRAM config error. "
7314 "Invalid link_config 0x%x"
7315 " Autoneg not supported\n",
7316 bp->port.link_config);
7321 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7322 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7323 bp->link_params.req_line_speed = SPEED_10;
7324 bp->port.advertising = (ADVERTISED_10baseT_Full |
7327 BNX2X_ERR("NVRAM config error. "
7328 "Invalid link_config 0x%x"
7329 " speed_cap_mask 0x%x\n",
7330 bp->port.link_config,
7331 bp->link_params.speed_cap_mask);
7336 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7337 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7338 bp->link_params.req_line_speed = SPEED_10;
7339 bp->link_params.req_duplex = DUPLEX_HALF;
7340 bp->port.advertising = (ADVERTISED_10baseT_Half |
7343 BNX2X_ERR("NVRAM config error. "
7344 "Invalid link_config 0x%x"
7345 " speed_cap_mask 0x%x\n",
7346 bp->port.link_config,
7347 bp->link_params.speed_cap_mask);
7352 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7353 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7354 bp->link_params.req_line_speed = SPEED_100;
7355 bp->port.advertising = (ADVERTISED_100baseT_Full |
7358 BNX2X_ERR("NVRAM config error. "
7359 "Invalid link_config 0x%x"
7360 " speed_cap_mask 0x%x\n",
7361 bp->port.link_config,
7362 bp->link_params.speed_cap_mask);
7367 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7368 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7369 bp->link_params.req_line_speed = SPEED_100;
7370 bp->link_params.req_duplex = DUPLEX_HALF;
7371 bp->port.advertising = (ADVERTISED_100baseT_Half |
7374 BNX2X_ERR("NVRAM config error. "
7375 "Invalid link_config 0x%x"
7376 " speed_cap_mask 0x%x\n",
7377 bp->port.link_config,
7378 bp->link_params.speed_cap_mask);
7383 case PORT_FEATURE_LINK_SPEED_1G:
7384 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7385 bp->link_params.req_line_speed = SPEED_1000;
7386 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7389 BNX2X_ERR("NVRAM config error. "
7390 "Invalid link_config 0x%x"
7391 " speed_cap_mask 0x%x\n",
7392 bp->port.link_config,
7393 bp->link_params.speed_cap_mask);
7398 case PORT_FEATURE_LINK_SPEED_2_5G:
7399 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7400 bp->link_params.req_line_speed = SPEED_2500;
7401 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7404 BNX2X_ERR("NVRAM config error. "
7405 "Invalid link_config 0x%x"
7406 " speed_cap_mask 0x%x\n",
7407 bp->port.link_config,
7408 bp->link_params.speed_cap_mask);
7413 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7414 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7415 case PORT_FEATURE_LINK_SPEED_10G_KR:
7416 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7417 bp->link_params.req_line_speed = SPEED_10000;
7418 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7421 BNX2X_ERR("NVRAM config error. "
7422 "Invalid link_config 0x%x"
7423 " speed_cap_mask 0x%x\n",
7424 bp->port.link_config,
7425 bp->link_params.speed_cap_mask);
7431 BNX2X_ERR("NVRAM config error. "
7432 "BAD link speed link_config 0x%x\n",
7433 bp->port.link_config);
7434 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7435 bp->port.advertising = bp->port.supported;
7439 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7440 PORT_FEATURE_FLOW_CONTROL_MASK);
7441 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7442 !(bp->port.supported & SUPPORTED_Autoneg))
7443 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7445 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7446 " advertising 0x%x\n",
7447 bp->link_params.req_line_speed,
7448 bp->link_params.req_duplex,
7449 bp->link_params.req_flow_ctrl, bp->port.advertising);
7452 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7454 int port = BP_PORT(bp);
7457 bp->link_params.bp = bp;
7458 bp->link_params.port = port;
7460 bp->link_params.serdes_config =
7461 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7462 bp->link_params.lane_config =
7463 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7464 bp->link_params.ext_phy_config =
7466 dev_info.port_hw_config[port].external_phy_config);
7467 bp->link_params.speed_cap_mask =
7469 dev_info.port_hw_config[port].speed_capability_mask);
7471 bp->port.link_config =
7472 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7474 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7475 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7476 " link_config 0x%08x\n",
7477 bp->link_params.serdes_config,
7478 bp->link_params.lane_config,
7479 bp->link_params.ext_phy_config,
7480 bp->link_params.speed_cap_mask, bp->port.link_config);
7482 bp->link_params.switch_cfg = (bp->port.link_config &
7483 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7484 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7486 bnx2x_link_settings_requested(bp);
7488 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7489 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7490 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7491 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7492 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7493 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7494 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7495 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7496 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7497 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7500 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7502 int func = BP_FUNC(bp);
7506 bnx2x_get_common_hwinfo(bp);
7510 if (CHIP_IS_E1H(bp)) {
7512 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7514 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7515 FUNC_MF_CFG_E1HOV_TAG_MASK);
7516 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7520 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7522 func, bp->e1hov, bp->e1hov);
7524 BNX2X_DEV_INFO("Single function mode\n");
7526 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7527 " aborting\n", func);
7533 if (!BP_NOMCP(bp)) {
7534 bnx2x_get_port_hwinfo(bp);
7536 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7537 DRV_MSG_SEQ_NUMBER_MASK);
7538 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7542 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7543 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7544 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7545 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7546 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7547 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7548 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7549 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7550 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7551 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7552 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7554 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7562 /* only supposed to happen on emulation/FPGA */
7563 BNX2X_ERR("warning random MAC workaround active\n");
7564 random_ether_addr(bp->dev->dev_addr);
7565 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7571 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7573 int func = BP_FUNC(bp);
7576 /* Disable interrupt handling until HW is initialized */
7577 atomic_set(&bp->intr_sem, 1);
7579 mutex_init(&bp->port.phy_mutex);
7581 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7582 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7584 rc = bnx2x_get_hwinfo(bp);
7586 /* need to reset chip if undi was active */
7588 bnx2x_undi_unload(bp);
7590 if (CHIP_REV_IS_FPGA(bp))
7591 printk(KERN_ERR PFX "FPGA detected\n");
7593 if (BP_NOMCP(bp) && (func == 0))
7595 "MCP disabled, must load devices in order!\n");
7599 bp->flags &= ~TPA_ENABLE_FLAG;
7600 bp->dev->features &= ~NETIF_F_LRO;
7602 bp->flags |= TPA_ENABLE_FLAG;
7603 bp->dev->features |= NETIF_F_LRO;
7607 bp->tx_ring_size = MAX_TX_AVAIL;
7608 bp->rx_ring_size = MAX_RX_AVAIL;
7616 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7617 bp->current_interval = (poll ? poll : bp->timer_interval);
7619 init_timer(&bp->timer);
7620 bp->timer.expires = jiffies + bp->current_interval;
7621 bp->timer.data = (unsigned long) bp;
7622 bp->timer.function = bnx2x_timer;
7628 * ethtool service functions
7631 /* All ethtool functions called with rtnl_lock */
7633 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7635 struct bnx2x *bp = netdev_priv(dev);
7637 cmd->supported = bp->port.supported;
7638 cmd->advertising = bp->port.advertising;
7640 if (netif_carrier_ok(dev)) {
7641 cmd->speed = bp->link_vars.line_speed;
7642 cmd->duplex = bp->link_vars.duplex;
7644 cmd->speed = bp->link_params.req_line_speed;
7645 cmd->duplex = bp->link_params.req_duplex;
7650 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7651 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7652 if (vn_max_rate < cmd->speed)
7653 cmd->speed = vn_max_rate;
7656 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7658 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7660 switch (ext_phy_type) {
7661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7662 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7663 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7666 cmd->port = PORT_FIBRE;
7669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7670 cmd->port = PORT_TP;
7673 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7674 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7675 bp->link_params.ext_phy_config);
7679 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7680 bp->link_params.ext_phy_config);
7684 cmd->port = PORT_TP;
7686 cmd->phy_address = bp->port.phy_addr;
7687 cmd->transceiver = XCVR_INTERNAL;
7689 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7690 cmd->autoneg = AUTONEG_ENABLE;
7692 cmd->autoneg = AUTONEG_DISABLE;
7697 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7698 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7699 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7700 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7701 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7702 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7703 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7708 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7710 struct bnx2x *bp = netdev_priv(dev);
7716 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7717 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7718 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7719 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7720 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7721 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7722 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7724 if (cmd->autoneg == AUTONEG_ENABLE) {
7725 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7726 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7730 /* advertise the requested speed and duplex if supported */
7731 cmd->advertising &= bp->port.supported;
7733 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7734 bp->link_params.req_duplex = DUPLEX_FULL;
7735 bp->port.advertising |= (ADVERTISED_Autoneg |
7738 } else { /* forced speed */
7739 /* advertise the requested speed and duplex if supported */
7740 switch (cmd->speed) {
7742 if (cmd->duplex == DUPLEX_FULL) {
7743 if (!(bp->port.supported &
7744 SUPPORTED_10baseT_Full)) {
7746 "10M full not supported\n");
7750 advertising = (ADVERTISED_10baseT_Full |
7753 if (!(bp->port.supported &
7754 SUPPORTED_10baseT_Half)) {
7756 "10M half not supported\n");
7760 advertising = (ADVERTISED_10baseT_Half |
7766 if (cmd->duplex == DUPLEX_FULL) {
7767 if (!(bp->port.supported &
7768 SUPPORTED_100baseT_Full)) {
7770 "100M full not supported\n");
7774 advertising = (ADVERTISED_100baseT_Full |
7777 if (!(bp->port.supported &
7778 SUPPORTED_100baseT_Half)) {
7780 "100M half not supported\n");
7784 advertising = (ADVERTISED_100baseT_Half |
7790 if (cmd->duplex != DUPLEX_FULL) {
7791 DP(NETIF_MSG_LINK, "1G half not supported\n");
7795 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7796 DP(NETIF_MSG_LINK, "1G full not supported\n");
7800 advertising = (ADVERTISED_1000baseT_Full |
7805 if (cmd->duplex != DUPLEX_FULL) {
7807 "2.5G half not supported\n");
7811 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7813 "2.5G full not supported\n");
7817 advertising = (ADVERTISED_2500baseX_Full |
7822 if (cmd->duplex != DUPLEX_FULL) {
7823 DP(NETIF_MSG_LINK, "10G half not supported\n");
7827 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7828 DP(NETIF_MSG_LINK, "10G full not supported\n");
7832 advertising = (ADVERTISED_10000baseT_Full |
7837 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7841 bp->link_params.req_line_speed = cmd->speed;
7842 bp->link_params.req_duplex = cmd->duplex;
7843 bp->port.advertising = advertising;
7846 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7847 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7848 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7849 bp->port.advertising);
7851 if (netif_running(dev)) {
7852 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7859 #define PHY_FW_VER_LEN 10
7861 static void bnx2x_get_drvinfo(struct net_device *dev,
7862 struct ethtool_drvinfo *info)
7864 struct bnx2x *bp = netdev_priv(dev);
7865 u8 phy_fw_ver[PHY_FW_VER_LEN];
7867 strcpy(info->driver, DRV_MODULE_NAME);
7868 strcpy(info->version, DRV_MODULE_VERSION);
7870 phy_fw_ver[0] = '\0';
7872 bnx2x_acquire_phy_lock(bp);
7873 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7874 (bp->state != BNX2X_STATE_CLOSED),
7875 phy_fw_ver, PHY_FW_VER_LEN);
7876 bnx2x_release_phy_lock(bp);
7879 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7880 (bp->common.bc_ver & 0xff0000) >> 16,
7881 (bp->common.bc_ver & 0xff00) >> 8,
7882 (bp->common.bc_ver & 0xff),
7883 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7884 strcpy(info->bus_info, pci_name(bp->pdev));
7885 info->n_stats = BNX2X_NUM_STATS;
7886 info->testinfo_len = BNX2X_NUM_TESTS;
7887 info->eedump_len = bp->common.flash_size;
7888 info->regdump_len = 0;
7891 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7893 struct bnx2x *bp = netdev_priv(dev);
7895 if (bp->flags & NO_WOL_FLAG) {
7899 wol->supported = WAKE_MAGIC;
7901 wol->wolopts = WAKE_MAGIC;
7905 memset(&wol->sopass, 0, sizeof(wol->sopass));
7908 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7910 struct bnx2x *bp = netdev_priv(dev);
7912 if (wol->wolopts & ~WAKE_MAGIC)
7915 if (wol->wolopts & WAKE_MAGIC) {
7916 if (bp->flags & NO_WOL_FLAG)
7926 static u32 bnx2x_get_msglevel(struct net_device *dev)
7928 struct bnx2x *bp = netdev_priv(dev);
7930 return bp->msglevel;
7933 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7935 struct bnx2x *bp = netdev_priv(dev);
7937 if (capable(CAP_NET_ADMIN))
7938 bp->msglevel = level;
7941 static int bnx2x_nway_reset(struct net_device *dev)
7943 struct bnx2x *bp = netdev_priv(dev);
7948 if (netif_running(dev)) {
7949 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7956 static int bnx2x_get_eeprom_len(struct net_device *dev)
7958 struct bnx2x *bp = netdev_priv(dev);
7960 return bp->common.flash_size;
7963 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7965 int port = BP_PORT(bp);
7969 /* adjust timeout for emulation/FPGA */
7970 count = NVRAM_TIMEOUT_COUNT;
7971 if (CHIP_REV_IS_SLOW(bp))
7974 /* request access to nvram interface */
7975 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7976 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7978 for (i = 0; i < count*10; i++) {
7979 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7980 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7986 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7987 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7994 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7996 int port = BP_PORT(bp);
8000 /* adjust timeout for emulation/FPGA */
8001 count = NVRAM_TIMEOUT_COUNT;
8002 if (CHIP_REV_IS_SLOW(bp))
8005 /* relinquish nvram interface */
8006 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8007 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8009 for (i = 0; i < count*10; i++) {
8010 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8011 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8017 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8018 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8025 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8029 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8031 /* enable both bits, even on read */
8032 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8033 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8034 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8037 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8041 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8043 /* disable both bits, even after read */
8044 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8045 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8046 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8049 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8055 /* build the command word */
8056 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8058 /* need to clear DONE bit separately */
8059 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8061 /* address of the NVRAM to read from */
8062 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8063 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8065 /* issue a read command */
8066 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8068 /* adjust timeout for emulation/FPGA */
8069 count = NVRAM_TIMEOUT_COUNT;
8070 if (CHIP_REV_IS_SLOW(bp))
8073 /* wait for completion */
8076 for (i = 0; i < count; i++) {
8078 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8080 if (val & MCPR_NVM_COMMAND_DONE) {
8081 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8082 /* we read nvram data in cpu order
8083 * but ethtool sees it as an array of bytes
8084 * converting to big-endian will do the work */
8085 val = cpu_to_be32(val);
8095 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8102 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8104 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8109 if (offset + buf_size > bp->common.flash_size) {
8110 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8111 " buf_size (0x%x) > flash_size (0x%x)\n",
8112 offset, buf_size, bp->common.flash_size);
8116 /* request access to nvram interface */
8117 rc = bnx2x_acquire_nvram_lock(bp);
8121 /* enable access to nvram interface */
8122 bnx2x_enable_nvram_access(bp);
8124 /* read the first word(s) */
8125 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8126 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8127 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8128 memcpy(ret_buf, &val, 4);
8130 /* advance to the next dword */
8131 offset += sizeof(u32);
8132 ret_buf += sizeof(u32);
8133 buf_size -= sizeof(u32);
8138 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8139 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8140 memcpy(ret_buf, &val, 4);
8143 /* disable access to nvram interface */
8144 bnx2x_disable_nvram_access(bp);
8145 bnx2x_release_nvram_lock(bp);
8150 static int bnx2x_get_eeprom(struct net_device *dev,
8151 struct ethtool_eeprom *eeprom, u8 *eebuf)
8153 struct bnx2x *bp = netdev_priv(dev);
8156 if (!netif_running(dev))
8159 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8160 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8161 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8162 eeprom->len, eeprom->len);
8164 /* parameters already validated in ethtool_get_eeprom */
8166 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8171 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8176 /* build the command word */
8177 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8179 /* need to clear DONE bit separately */
8180 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8182 /* write the data */
8183 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8185 /* address of the NVRAM to write to */
8186 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8187 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8189 /* issue the write command */
8190 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8192 /* adjust timeout for emulation/FPGA */
8193 count = NVRAM_TIMEOUT_COUNT;
8194 if (CHIP_REV_IS_SLOW(bp))
8197 /* wait for completion */
8199 for (i = 0; i < count; i++) {
8201 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8202 if (val & MCPR_NVM_COMMAND_DONE) {
8211 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8213 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8221 if (offset + buf_size > bp->common.flash_size) {
8222 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8223 " buf_size (0x%x) > flash_size (0x%x)\n",
8224 offset, buf_size, bp->common.flash_size);
8228 /* request access to nvram interface */
8229 rc = bnx2x_acquire_nvram_lock(bp);
8233 /* enable access to nvram interface */
8234 bnx2x_enable_nvram_access(bp);
8236 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8237 align_offset = (offset & ~0x03);
8238 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8241 val &= ~(0xff << BYTE_OFFSET(offset));
8242 val |= (*data_buf << BYTE_OFFSET(offset));
8244 /* nvram data is returned as an array of bytes
8245 * convert it back to cpu order */
8246 val = be32_to_cpu(val);
8248 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8252 /* disable access to nvram interface */
8253 bnx2x_disable_nvram_access(bp);
8254 bnx2x_release_nvram_lock(bp);
8259 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8267 if (buf_size == 1) /* ethtool */
8268 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8270 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8272 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8277 if (offset + buf_size > bp->common.flash_size) {
8278 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8279 " buf_size (0x%x) > flash_size (0x%x)\n",
8280 offset, buf_size, bp->common.flash_size);
8284 /* request access to nvram interface */
8285 rc = bnx2x_acquire_nvram_lock(bp);
8289 /* enable access to nvram interface */
8290 bnx2x_enable_nvram_access(bp);
8293 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8294 while ((written_so_far < buf_size) && (rc == 0)) {
8295 if (written_so_far == (buf_size - sizeof(u32)))
8296 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8297 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8298 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8299 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8300 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8302 memcpy(&val, data_buf, 4);
8304 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8306 /* advance to the next dword */
8307 offset += sizeof(u32);
8308 data_buf += sizeof(u32);
8309 written_so_far += sizeof(u32);
8313 /* disable access to nvram interface */
8314 bnx2x_disable_nvram_access(bp);
8315 bnx2x_release_nvram_lock(bp);
8320 static int bnx2x_set_eeprom(struct net_device *dev,
8321 struct ethtool_eeprom *eeprom, u8 *eebuf)
8323 struct bnx2x *bp = netdev_priv(dev);
8326 if (!netif_running(dev))
8329 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8330 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8331 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8332 eeprom->len, eeprom->len);
8334 /* parameters already validated in ethtool_set_eeprom */
8336 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8337 if (eeprom->magic == 0x00504859)
8340 bnx2x_acquire_phy_lock(bp);
8341 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8342 bp->link_params.ext_phy_config,
8343 (bp->state != BNX2X_STATE_CLOSED),
8344 eebuf, eeprom->len);
8345 if ((bp->state == BNX2X_STATE_OPEN) ||
8346 (bp->state == BNX2X_STATE_DISABLED)) {
8347 rc |= bnx2x_link_reset(&bp->link_params,
8349 rc |= bnx2x_phy_init(&bp->link_params,
8352 bnx2x_release_phy_lock(bp);
8354 } else /* Only the PMF can access the PHY */
8357 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8362 static int bnx2x_get_coalesce(struct net_device *dev,
8363 struct ethtool_coalesce *coal)
8365 struct bnx2x *bp = netdev_priv(dev);
8367 memset(coal, 0, sizeof(struct ethtool_coalesce));
8369 coal->rx_coalesce_usecs = bp->rx_ticks;
8370 coal->tx_coalesce_usecs = bp->tx_ticks;
8375 static int bnx2x_set_coalesce(struct net_device *dev,
8376 struct ethtool_coalesce *coal)
8378 struct bnx2x *bp = netdev_priv(dev);
8380 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8381 if (bp->rx_ticks > 3000)
8382 bp->rx_ticks = 3000;
8384 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8385 if (bp->tx_ticks > 0x3000)
8386 bp->tx_ticks = 0x3000;
8388 if (netif_running(dev))
8389 bnx2x_update_coalesce(bp);
8394 static void bnx2x_get_ringparam(struct net_device *dev,
8395 struct ethtool_ringparam *ering)
8397 struct bnx2x *bp = netdev_priv(dev);
8399 ering->rx_max_pending = MAX_RX_AVAIL;
8400 ering->rx_mini_max_pending = 0;
8401 ering->rx_jumbo_max_pending = 0;
8403 ering->rx_pending = bp->rx_ring_size;
8404 ering->rx_mini_pending = 0;
8405 ering->rx_jumbo_pending = 0;
8407 ering->tx_max_pending = MAX_TX_AVAIL;
8408 ering->tx_pending = bp->tx_ring_size;
8411 static int bnx2x_set_ringparam(struct net_device *dev,
8412 struct ethtool_ringparam *ering)
8414 struct bnx2x *bp = netdev_priv(dev);
8417 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8418 (ering->tx_pending > MAX_TX_AVAIL) ||
8419 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8422 bp->rx_ring_size = ering->rx_pending;
8423 bp->tx_ring_size = ering->tx_pending;
8425 if (netif_running(dev)) {
8426 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8427 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8433 static void bnx2x_get_pauseparam(struct net_device *dev,
8434 struct ethtool_pauseparam *epause)
8436 struct bnx2x *bp = netdev_priv(dev);
8438 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8439 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8441 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8442 BNX2X_FLOW_CTRL_RX);
8443 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8444 BNX2X_FLOW_CTRL_TX);
8446 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8447 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8448 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8451 static int bnx2x_set_pauseparam(struct net_device *dev,
8452 struct ethtool_pauseparam *epause)
8454 struct bnx2x *bp = netdev_priv(dev);
8459 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8460 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8461 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8463 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8465 if (epause->rx_pause)
8466 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8468 if (epause->tx_pause)
8469 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8471 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8472 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8474 if (epause->autoneg) {
8475 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8476 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8480 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8481 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8485 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8487 if (netif_running(dev)) {
8488 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8495 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8497 struct bnx2x *bp = netdev_priv(dev);
8501 /* TPA requires Rx CSUM offloading */
8502 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8503 if (!(dev->features & NETIF_F_LRO)) {
8504 dev->features |= NETIF_F_LRO;
8505 bp->flags |= TPA_ENABLE_FLAG;
8509 } else if (dev->features & NETIF_F_LRO) {
8510 dev->features &= ~NETIF_F_LRO;
8511 bp->flags &= ~TPA_ENABLE_FLAG;
8515 if (changed && netif_running(dev)) {
8516 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8517 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8523 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8525 struct bnx2x *bp = netdev_priv(dev);
8530 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8532 struct bnx2x *bp = netdev_priv(dev);
8537 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8538 TPA'ed packets will be discarded due to wrong TCP CSUM */
8540 u32 flags = ethtool_op_get_flags(dev);
8542 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8548 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8551 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8552 dev->features |= NETIF_F_TSO6;
8554 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8555 dev->features &= ~NETIF_F_TSO6;
8561 static const struct {
8562 char string[ETH_GSTRING_LEN];
8563 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8564 { "register_test (offline)" },
8565 { "memory_test (offline)" },
8566 { "loopback_test (offline)" },
8567 { "nvram_test (online)" },
8568 { "interrupt_test (online)" },
8569 { "link_test (online)" },
8570 { "idle check (online)" },
8571 { "MC errors (online)" }
8574 static int bnx2x_self_test_count(struct net_device *dev)
8576 return BNX2X_NUM_TESTS;
8579 static int bnx2x_test_registers(struct bnx2x *bp)
8581 int idx, i, rc = -ENODEV;
8583 int port = BP_PORT(bp);
8584 static const struct {
8589 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8590 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8591 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8592 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8593 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8594 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8595 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8596 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8597 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8598 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8599 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8600 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8601 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8602 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8603 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8604 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8605 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8606 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8607 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8608 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8609 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8610 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8611 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8612 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8613 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8614 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8615 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8616 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8617 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8618 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8619 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8620 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8621 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8622 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8623 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8624 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8625 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8626 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8628 { 0xffffffff, 0, 0x00000000 }
8631 if (!netif_running(bp->dev))
8634 /* Repeat the test twice:
8635 First by writing 0x00000000, second by writing 0xffffffff */
8636 for (idx = 0; idx < 2; idx++) {
8643 wr_val = 0xffffffff;
8647 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8648 u32 offset, mask, save_val, val;
8650 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8651 mask = reg_tbl[i].mask;
8653 save_val = REG_RD(bp, offset);
8655 REG_WR(bp, offset, wr_val);
8656 val = REG_RD(bp, offset);
8658 /* Restore the original register's value */
8659 REG_WR(bp, offset, save_val);
8661 /* verify that value is as expected value */
8662 if ((val & mask) != (wr_val & mask))
8673 static int bnx2x_test_memory(struct bnx2x *bp)
8675 int i, j, rc = -ENODEV;
8677 static const struct {
8681 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8682 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8683 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8684 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8685 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8686 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8687 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8691 static const struct {
8697 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8698 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8699 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8700 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8701 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8702 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8704 { NULL, 0xffffffff, 0, 0 }
8707 if (!netif_running(bp->dev))
8710 /* Go through all the memories */
8711 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8712 for (j = 0; j < mem_tbl[i].size; j++)
8713 REG_RD(bp, mem_tbl[i].offset + j*4);
8715 /* Check the parity status */
8716 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8717 val = REG_RD(bp, prty_tbl[i].offset);
8718 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8719 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8721 "%s is 0x%x\n", prty_tbl[i].name, val);
8732 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8737 while (bnx2x_link_test(bp) && cnt--)
8741 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8743 unsigned int pkt_size, num_pkts, i;
8744 struct sk_buff *skb;
8745 unsigned char *packet;
8746 struct bnx2x_fastpath *fp = &bp->fp[0];
8747 u16 tx_start_idx, tx_idx;
8748 u16 rx_start_idx, rx_idx;
8750 struct sw_tx_bd *tx_buf;
8751 struct eth_tx_bd *tx_bd;
8753 union eth_rx_cqe *cqe;
8755 struct sw_rx_bd *rx_buf;
8759 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8760 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8761 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8763 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8765 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8766 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8767 /* wait until link state is restored */
8769 while (cnt-- && bnx2x_test_link(&bp->link_params,
8776 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8779 goto test_loopback_exit;
8781 packet = skb_put(skb, pkt_size);
8782 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8783 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8784 for (i = ETH_HLEN; i < pkt_size; i++)
8785 packet[i] = (unsigned char) (i & 0xff);
8788 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8789 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8791 pkt_prod = fp->tx_pkt_prod++;
8792 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8793 tx_buf->first_bd = fp->tx_bd_prod;
8796 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8797 mapping = pci_map_single(bp->pdev, skb->data,
8798 skb_headlen(skb), PCI_DMA_TODEVICE);
8799 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8800 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8801 tx_bd->nbd = cpu_to_le16(1);
8802 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8803 tx_bd->vlan = cpu_to_le16(pkt_prod);
8804 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8805 ETH_TX_BD_FLAGS_END_BD);
8806 tx_bd->general_data = ((UNICAST_ADDRESS <<
8807 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8811 fp->hw_tx_prods->bds_prod =
8812 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8813 mb(); /* FW restriction: must not reorder writing nbd and packets */
8814 fp->hw_tx_prods->packets_prod =
8815 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8816 DOORBELL(bp, FP_IDX(fp), 0);
8822 bp->dev->trans_start = jiffies;
8826 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8827 if (tx_idx != tx_start_idx + num_pkts)
8828 goto test_loopback_exit;
8830 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8831 if (rx_idx != rx_start_idx + num_pkts)
8832 goto test_loopback_exit;
8834 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8835 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8836 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8837 goto test_loopback_rx_exit;
8839 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8840 if (len != pkt_size)
8841 goto test_loopback_rx_exit;
8843 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8845 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8846 for (i = ETH_HLEN; i < pkt_size; i++)
8847 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8848 goto test_loopback_rx_exit;
8852 test_loopback_rx_exit:
8854 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8855 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8856 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8857 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8859 /* Update producers */
8860 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8864 bp->link_params.loopback_mode = LOOPBACK_NONE;
8869 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8873 if (!netif_running(bp->dev))
8874 return BNX2X_LOOPBACK_FAILED;
8876 bnx2x_netif_stop(bp, 1);
8877 bnx2x_acquire_phy_lock(bp);
8879 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8880 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8881 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8884 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8885 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8886 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8889 bnx2x_release_phy_lock(bp);
8890 bnx2x_netif_start(bp);
8895 #define CRC32_RESIDUAL 0xdebb20e3
8897 static int bnx2x_test_nvram(struct bnx2x *bp)
8899 static const struct {
8903 { 0, 0x14 }, /* bootstrap */
8904 { 0x14, 0xec }, /* dir */
8905 { 0x100, 0x350 }, /* manuf_info */
8906 { 0x450, 0xf0 }, /* feature_info */
8907 { 0x640, 0x64 }, /* upgrade_key_info */
8909 { 0x708, 0x70 }, /* manuf_key_info */
8914 u8 *data = (u8 *)buf;
8918 rc = bnx2x_nvram_read(bp, 0, data, 4);
8920 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8921 goto test_nvram_exit;
8924 magic = be32_to_cpu(buf[0]);
8925 if (magic != 0x669955aa) {
8926 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8928 goto test_nvram_exit;
8931 for (i = 0; nvram_tbl[i].size; i++) {
8933 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8937 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8938 goto test_nvram_exit;
8941 csum = ether_crc_le(nvram_tbl[i].size, data);
8942 if (csum != CRC32_RESIDUAL) {
8944 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8946 goto test_nvram_exit;
8954 static int bnx2x_test_intr(struct bnx2x *bp)
8956 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8959 if (!netif_running(bp->dev))
8962 config->hdr.length_6b = 0;
8964 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8966 config->hdr.offset = BP_FUNC(bp);
8967 config->hdr.client_id = BP_CL_ID(bp);
8968 config->hdr.reserved1 = 0;
8970 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8971 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8972 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8974 bp->set_mac_pending++;
8975 for (i = 0; i < 10; i++) {
8976 if (!bp->set_mac_pending)
8978 msleep_interruptible(10);
8987 static void bnx2x_self_test(struct net_device *dev,
8988 struct ethtool_test *etest, u64 *buf)
8990 struct bnx2x *bp = netdev_priv(dev);
8992 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8994 if (!netif_running(dev))
8997 /* offline tests are not supported in MF mode */
8999 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9001 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9004 link_up = bp->link_vars.link_up;
9005 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9006 bnx2x_nic_load(bp, LOAD_DIAG);
9007 /* wait until link state is restored */
9008 bnx2x_wait_for_link(bp, link_up);
9010 if (bnx2x_test_registers(bp) != 0) {
9012 etest->flags |= ETH_TEST_FL_FAILED;
9014 if (bnx2x_test_memory(bp) != 0) {
9016 etest->flags |= ETH_TEST_FL_FAILED;
9018 buf[2] = bnx2x_test_loopback(bp, link_up);
9020 etest->flags |= ETH_TEST_FL_FAILED;
9022 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9023 bnx2x_nic_load(bp, LOAD_NORMAL);
9024 /* wait until link state is restored */
9025 bnx2x_wait_for_link(bp, link_up);
9027 if (bnx2x_test_nvram(bp) != 0) {
9029 etest->flags |= ETH_TEST_FL_FAILED;
9031 if (bnx2x_test_intr(bp) != 0) {
9033 etest->flags |= ETH_TEST_FL_FAILED;
9036 if (bnx2x_link_test(bp) != 0) {
9038 etest->flags |= ETH_TEST_FL_FAILED;
9040 buf[7] = bnx2x_mc_assert(bp);
9042 etest->flags |= ETH_TEST_FL_FAILED;
9044 #ifdef BNX2X_EXTRA_DEBUG
9045 bnx2x_panic_dump(bp);
9049 static const struct {
9053 #define STATS_FLAGS_PORT 1
9054 #define STATS_FLAGS_FUNC 2
9055 u8 string[ETH_GSTRING_LEN];
9056 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9057 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9058 8, STATS_FLAGS_FUNC, "rx_bytes" },
9059 { STATS_OFFSET32(error_bytes_received_hi),
9060 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9061 { STATS_OFFSET32(total_bytes_transmitted_hi),
9062 8, STATS_FLAGS_FUNC, "tx_bytes" },
9063 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9064 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9065 { STATS_OFFSET32(total_unicast_packets_received_hi),
9066 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9067 { STATS_OFFSET32(total_multicast_packets_received_hi),
9068 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9069 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9070 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9071 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9072 8, STATS_FLAGS_FUNC, "tx_packets" },
9073 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9074 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9075 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9076 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9077 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9078 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9079 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9080 8, STATS_FLAGS_PORT, "rx_align_errors" },
9081 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9082 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9083 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9084 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9085 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9086 8, STATS_FLAGS_PORT, "tx_deferred" },
9087 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9088 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9089 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9090 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9091 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9092 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9093 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9094 8, STATS_FLAGS_PORT, "rx_fragments" },
9095 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9096 8, STATS_FLAGS_PORT, "rx_jabbers" },
9097 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9098 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9099 { STATS_OFFSET32(jabber_packets_received),
9100 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9101 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9102 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9103 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9104 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9105 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9106 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9107 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9108 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9109 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9110 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9111 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9112 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9113 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9114 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9115 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9116 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9117 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9118 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9119 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9120 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9121 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9122 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9123 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9124 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9125 { STATS_OFFSET32(mac_filter_discard),
9126 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9127 { STATS_OFFSET32(no_buff_discard),
9128 4, STATS_FLAGS_FUNC, "rx_discards" },
9129 { STATS_OFFSET32(xxoverflow_discard),
9130 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9131 { STATS_OFFSET32(brb_drop_hi),
9132 8, STATS_FLAGS_PORT, "brb_discard" },
9133 { STATS_OFFSET32(brb_truncate_hi),
9134 8, STATS_FLAGS_PORT, "brb_truncate" },
9135 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9136 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9137 { STATS_OFFSET32(rx_skb_alloc_failed),
9138 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9139 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9140 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9143 #define IS_NOT_E1HMF_STAT(bp, i) \
9144 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9146 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9148 struct bnx2x *bp = netdev_priv(dev);
9151 switch (stringset) {
9153 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9154 if (IS_NOT_E1HMF_STAT(bp, i))
9156 strcpy(buf + j*ETH_GSTRING_LEN,
9157 bnx2x_stats_arr[i].string);
9163 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9168 static int bnx2x_get_stats_count(struct net_device *dev)
9170 struct bnx2x *bp = netdev_priv(dev);
9171 int i, num_stats = 0;
9173 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9174 if (IS_NOT_E1HMF_STAT(bp, i))
9181 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9182 struct ethtool_stats *stats, u64 *buf)
9184 struct bnx2x *bp = netdev_priv(dev);
9185 u32 *hw_stats = (u32 *)&bp->eth_stats;
9188 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9189 if (IS_NOT_E1HMF_STAT(bp, i))
9192 if (bnx2x_stats_arr[i].size == 0) {
9193 /* skip this counter */
9198 if (bnx2x_stats_arr[i].size == 4) {
9199 /* 4-byte counter */
9200 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9204 /* 8-byte counter */
9205 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9206 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9211 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9213 struct bnx2x *bp = netdev_priv(dev);
9214 int port = BP_PORT(bp);
9217 if (!netif_running(dev))
9226 for (i = 0; i < (data * 2); i++) {
9228 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9229 bp->link_params.hw_led_mode,
9230 bp->link_params.chip_id);
9232 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9233 bp->link_params.hw_led_mode,
9234 bp->link_params.chip_id);
9236 msleep_interruptible(500);
9237 if (signal_pending(current))
9241 if (bp->link_vars.link_up)
9242 bnx2x_set_led(bp, port, LED_MODE_OPER,
9243 bp->link_vars.line_speed,
9244 bp->link_params.hw_led_mode,
9245 bp->link_params.chip_id);
9250 static struct ethtool_ops bnx2x_ethtool_ops = {
9251 .get_settings = bnx2x_get_settings,
9252 .set_settings = bnx2x_set_settings,
9253 .get_drvinfo = bnx2x_get_drvinfo,
9254 .get_wol = bnx2x_get_wol,
9255 .set_wol = bnx2x_set_wol,
9256 .get_msglevel = bnx2x_get_msglevel,
9257 .set_msglevel = bnx2x_set_msglevel,
9258 .nway_reset = bnx2x_nway_reset,
9259 .get_link = ethtool_op_get_link,
9260 .get_eeprom_len = bnx2x_get_eeprom_len,
9261 .get_eeprom = bnx2x_get_eeprom,
9262 .set_eeprom = bnx2x_set_eeprom,
9263 .get_coalesce = bnx2x_get_coalesce,
9264 .set_coalesce = bnx2x_set_coalesce,
9265 .get_ringparam = bnx2x_get_ringparam,
9266 .set_ringparam = bnx2x_set_ringparam,
9267 .get_pauseparam = bnx2x_get_pauseparam,
9268 .set_pauseparam = bnx2x_set_pauseparam,
9269 .get_rx_csum = bnx2x_get_rx_csum,
9270 .set_rx_csum = bnx2x_set_rx_csum,
9271 .get_tx_csum = ethtool_op_get_tx_csum,
9272 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9273 .set_flags = bnx2x_set_flags,
9274 .get_flags = ethtool_op_get_flags,
9275 .get_sg = ethtool_op_get_sg,
9276 .set_sg = ethtool_op_set_sg,
9277 .get_tso = ethtool_op_get_tso,
9278 .set_tso = bnx2x_set_tso,
9279 .self_test_count = bnx2x_self_test_count,
9280 .self_test = bnx2x_self_test,
9281 .get_strings = bnx2x_get_strings,
9282 .phys_id = bnx2x_phys_id,
9283 .get_stats_count = bnx2x_get_stats_count,
9284 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9287 /* end of ethtool_ops */
9289 /****************************************************************************
9290 * General service functions
9291 ****************************************************************************/
9293 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9297 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9301 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9302 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9303 PCI_PM_CTRL_PME_STATUS));
9305 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9306 /* delay required during transition out of D3hot */
9311 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9315 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9317 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9320 /* No more memory access after this point until
9321 * device is brought back to D0.
9331 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9335 /* Tell compiler that status block fields can change */
9337 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9338 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9340 return (fp->rx_comp_cons != rx_cons_sb);
9344 * net_device service functions
9347 static int bnx2x_poll(struct napi_struct *napi, int budget)
9349 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9351 struct bnx2x *bp = fp->bp;
9354 #ifdef BNX2X_STOP_ON_ERROR
9355 if (unlikely(bp->panic))
9359 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9360 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9361 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9363 bnx2x_update_fpsb_idx(fp);
9365 if (bnx2x_has_tx_work(fp))
9366 bnx2x_tx_int(fp, budget);
9368 if (bnx2x_has_rx_work(fp))
9369 work_done = bnx2x_rx_int(fp, budget);
9370 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9372 /* must not complete if we consumed full budget */
9373 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9375 #ifdef BNX2X_STOP_ON_ERROR
9378 napi_complete(napi);
9380 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9381 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9382 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9383 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9389 /* we split the first BD into headers and data BDs
9390 * to ease the pain of our fellow microcode engineers
9391 * we use one mapping for both BDs
9392 * So far this has only been observed to happen
9393 * in Other Operating Systems(TM)
9395 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9396 struct bnx2x_fastpath *fp,
9397 struct eth_tx_bd **tx_bd, u16 hlen,
9398 u16 bd_prod, int nbd)
9400 struct eth_tx_bd *h_tx_bd = *tx_bd;
9401 struct eth_tx_bd *d_tx_bd;
9403 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9405 /* first fix first BD */
9406 h_tx_bd->nbd = cpu_to_le16(nbd);
9407 h_tx_bd->nbytes = cpu_to_le16(hlen);
9409 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9410 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9411 h_tx_bd->addr_lo, h_tx_bd->nbd);
9413 /* now get a new data BD
9414 * (after the pbd) and fill it */
9415 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9416 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9418 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9419 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9421 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9422 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9423 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9425 /* this marks the BD as one that has no individual mapping
9426 * the FW ignores this flag in a BD not marked start
9428 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9429 DP(NETIF_MSG_TX_QUEUED,
9430 "TSO split data size is %d (%x:%x)\n",
9431 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9433 /* update tx_bd for marking the last BD flag */
9439 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9442 csum = (u16) ~csum_fold(csum_sub(csum,
9443 csum_partial(t_header - fix, fix, 0)));
9446 csum = (u16) ~csum_fold(csum_add(csum,
9447 csum_partial(t_header, -fix, 0)));
9449 return swab16(csum);
9452 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9456 if (skb->ip_summed != CHECKSUM_PARTIAL)
9460 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9462 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9463 rc |= XMIT_CSUM_TCP;
9467 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9468 rc |= XMIT_CSUM_TCP;
9472 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9475 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9481 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9482 /* check if packet requires linearization (packet is too fragmented) */
9483 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9488 int first_bd_sz = 0;
9490 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9491 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9493 if (xmit_type & XMIT_GSO) {
9494 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9495 /* Check if LSO packet needs to be copied:
9496 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9497 int wnd_size = MAX_FETCH_BD - 3;
9498 /* Number of windows to check */
9499 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9504 /* Headers length */
9505 hlen = (int)(skb_transport_header(skb) - skb->data) +
9508 /* Amount of data (w/o headers) on linear part of SKB*/
9509 first_bd_sz = skb_headlen(skb) - hlen;
9511 wnd_sum = first_bd_sz;
9513 /* Calculate the first sum - it's special */
9514 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9516 skb_shinfo(skb)->frags[frag_idx].size;
9518 /* If there was data on linear skb data - check it */
9519 if (first_bd_sz > 0) {
9520 if (unlikely(wnd_sum < lso_mss)) {
9525 wnd_sum -= first_bd_sz;
9528 /* Others are easier: run through the frag list and
9529 check all windows */
9530 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9532 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9534 if (unlikely(wnd_sum < lso_mss)) {
9539 skb_shinfo(skb)->frags[wnd_idx].size;
9543 /* in non-LSO too fragmented packet should always
9550 if (unlikely(to_copy))
9551 DP(NETIF_MSG_TX_QUEUED,
9552 "Linearization IS REQUIRED for %s packet. "
9553 "num_frags %d hlen %d first_bd_sz %d\n",
9554 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9555 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9561 /* called with netif_tx_lock
9562 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9563 * netif_wake_queue()
9565 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9567 struct bnx2x *bp = netdev_priv(dev);
9568 struct bnx2x_fastpath *fp;
9569 struct sw_tx_bd *tx_buf;
9570 struct eth_tx_bd *tx_bd;
9571 struct eth_tx_parse_bd *pbd = NULL;
9572 u16 pkt_prod, bd_prod;
9575 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9576 int vlan_off = (bp->e1hov ? 4 : 0);
9580 #ifdef BNX2X_STOP_ON_ERROR
9581 if (unlikely(bp->panic))
9582 return NETDEV_TX_BUSY;
9585 fp_index = (smp_processor_id() % bp->num_queues);
9586 fp = &bp->fp[fp_index];
9588 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9589 bp->eth_stats.driver_xoff++,
9590 netif_stop_queue(dev);
9591 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9592 return NETDEV_TX_BUSY;
9595 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9596 " gso type %x xmit_type %x\n",
9597 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9598 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9600 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9601 /* First, check if we need to linearize the skb
9602 (due to FW restrictions) */
9603 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9604 /* Statistics of linearization */
9606 if (skb_linearize(skb) != 0) {
9607 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9608 "silently dropping this SKB\n");
9609 dev_kfree_skb_any(skb);
9610 return NETDEV_TX_OK;
9616 Please read carefully. First we use one BD which we mark as start,
9617 then for TSO or xsum we have a parsing info BD,
9618 and only then we have the rest of the TSO BDs.
9619 (don't forget to mark the last one as last,
9620 and to unmap only AFTER you write to the BD ...)
9621 And above all, all pdb sizes are in words - NOT DWORDS!
9624 pkt_prod = fp->tx_pkt_prod++;
9625 bd_prod = TX_BD(fp->tx_bd_prod);
9627 /* get a tx_buf and first BD */
9628 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9629 tx_bd = &fp->tx_desc_ring[bd_prod];
9631 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9632 tx_bd->general_data = (UNICAST_ADDRESS <<
9633 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9635 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9637 /* remember the first BD of the packet */
9638 tx_buf->first_bd = fp->tx_bd_prod;
9641 DP(NETIF_MSG_TX_QUEUED,
9642 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9643 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9646 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9647 (bp->flags & HW_VLAN_TX_FLAG)) {
9648 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9649 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9653 tx_bd->vlan = cpu_to_le16(pkt_prod);
9656 /* turn on parsing and get a BD */
9657 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9658 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9660 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9663 if (xmit_type & XMIT_CSUM) {
9664 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9666 /* for now NS flag is not used in Linux */
9667 pbd->global_data = (hlen |
9668 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9669 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9671 pbd->ip_hlen = (skb_transport_header(skb) -
9672 skb_network_header(skb)) / 2;
9674 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9676 pbd->total_hlen = cpu_to_le16(hlen);
9677 hlen = hlen*2 - vlan_off;
9679 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9681 if (xmit_type & XMIT_CSUM_V4)
9682 tx_bd->bd_flags.as_bitfield |=
9683 ETH_TX_BD_FLAGS_IP_CSUM;
9685 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9687 if (xmit_type & XMIT_CSUM_TCP) {
9688 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9691 s8 fix = SKB_CS_OFF(skb); /* signed! */
9693 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9694 pbd->cs_offset = fix / 2;
9696 DP(NETIF_MSG_TX_QUEUED,
9697 "hlen %d offset %d fix %d csum before fix %x\n",
9698 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9701 /* HW bug: fixup the CSUM */
9702 pbd->tcp_pseudo_csum =
9703 bnx2x_csum_fix(skb_transport_header(skb),
9706 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9707 pbd->tcp_pseudo_csum);
9711 mapping = pci_map_single(bp->pdev, skb->data,
9712 skb_headlen(skb), PCI_DMA_TODEVICE);
9714 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9715 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9716 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9717 tx_bd->nbd = cpu_to_le16(nbd);
9718 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9720 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9721 " nbytes %d flags %x vlan %x\n",
9722 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9723 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9724 le16_to_cpu(tx_bd->vlan));
9726 if (xmit_type & XMIT_GSO) {
9728 DP(NETIF_MSG_TX_QUEUED,
9729 "TSO packet len %d hlen %d total len %d tso size %d\n",
9730 skb->len, hlen, skb_headlen(skb),
9731 skb_shinfo(skb)->gso_size);
9733 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9735 if (unlikely(skb_headlen(skb) > hlen))
9736 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9739 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9740 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9741 pbd->tcp_flags = pbd_tcp_flags(skb);
9743 if (xmit_type & XMIT_GSO_V4) {
9744 pbd->ip_id = swab16(ip_hdr(skb)->id);
9745 pbd->tcp_pseudo_csum =
9746 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9748 0, IPPROTO_TCP, 0));
9751 pbd->tcp_pseudo_csum =
9752 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9753 &ipv6_hdr(skb)->daddr,
9754 0, IPPROTO_TCP, 0));
9756 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9759 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9760 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9762 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9763 tx_bd = &fp->tx_desc_ring[bd_prod];
9765 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9766 frag->size, PCI_DMA_TODEVICE);
9768 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9769 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9770 tx_bd->nbytes = cpu_to_le16(frag->size);
9771 tx_bd->vlan = cpu_to_le16(pkt_prod);
9772 tx_bd->bd_flags.as_bitfield = 0;
9774 DP(NETIF_MSG_TX_QUEUED,
9775 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9776 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9777 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9780 /* now at last mark the BD as the last BD */
9781 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9783 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9784 tx_bd, tx_bd->bd_flags.as_bitfield);
9786 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9788 /* now send a tx doorbell, counting the next BD
9789 * if the packet contains or ends with it
9791 if (TX_BD_POFF(bd_prod) < nbd)
9795 DP(NETIF_MSG_TX_QUEUED,
9796 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9797 " tcp_flags %x xsum %x seq %u hlen %u\n",
9798 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9799 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9800 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9802 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9805 * Make sure that the BD data is updated before updating the producer
9806 * since FW might read the BD right after the producer is updated.
9807 * This is only applicable for weak-ordered memory model archs such
9808 * as IA-64. The following barrier is also mandatory since FW will
9809 * assumes packets must have BDs.
9813 fp->hw_tx_prods->bds_prod =
9814 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9815 mb(); /* FW restriction: must not reorder writing nbd and packets */
9816 fp->hw_tx_prods->packets_prod =
9817 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9818 DOORBELL(bp, FP_IDX(fp), 0);
9822 fp->tx_bd_prod += nbd;
9823 dev->trans_start = jiffies;
9825 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9826 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9827 if we put Tx into XOFF state. */
9829 netif_stop_queue(dev);
9830 bp->eth_stats.driver_xoff++;
9831 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9832 netif_wake_queue(dev);
9836 return NETDEV_TX_OK;
9839 /* called with rtnl_lock */
9840 static int bnx2x_open(struct net_device *dev)
9842 struct bnx2x *bp = netdev_priv(dev);
9844 netif_carrier_off(dev);
9846 bnx2x_set_power_state(bp, PCI_D0);
9848 return bnx2x_nic_load(bp, LOAD_OPEN);
9851 /* called with rtnl_lock */
9852 static int bnx2x_close(struct net_device *dev)
9854 struct bnx2x *bp = netdev_priv(dev);
9856 /* Unload the driver, release IRQs */
9857 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9858 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9859 if (!CHIP_REV_IS_SLOW(bp))
9860 bnx2x_set_power_state(bp, PCI_D3hot);
9865 /* called with netif_tx_lock from set_multicast */
9866 static void bnx2x_set_rx_mode(struct net_device *dev)
9868 struct bnx2x *bp = netdev_priv(dev);
9869 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9870 int port = BP_PORT(bp);
9872 if (bp->state != BNX2X_STATE_OPEN) {
9873 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9877 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9879 if (dev->flags & IFF_PROMISC)
9880 rx_mode = BNX2X_RX_MODE_PROMISC;
9882 else if ((dev->flags & IFF_ALLMULTI) ||
9883 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9884 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9886 else { /* some multicasts */
9887 if (CHIP_IS_E1(bp)) {
9889 struct dev_mc_list *mclist;
9890 struct mac_configuration_cmd *config =
9891 bnx2x_sp(bp, mcast_config);
9893 for (i = 0, mclist = dev->mc_list;
9894 mclist && (i < dev->mc_count);
9895 i++, mclist = mclist->next) {
9897 config->config_table[i].
9898 cam_entry.msb_mac_addr =
9899 swab16(*(u16 *)&mclist->dmi_addr[0]);
9900 config->config_table[i].
9901 cam_entry.middle_mac_addr =
9902 swab16(*(u16 *)&mclist->dmi_addr[2]);
9903 config->config_table[i].
9904 cam_entry.lsb_mac_addr =
9905 swab16(*(u16 *)&mclist->dmi_addr[4]);
9906 config->config_table[i].cam_entry.flags =
9908 config->config_table[i].
9909 target_table_entry.flags = 0;
9910 config->config_table[i].
9911 target_table_entry.client_id = 0;
9912 config->config_table[i].
9913 target_table_entry.vlan_id = 0;
9916 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9917 config->config_table[i].
9918 cam_entry.msb_mac_addr,
9919 config->config_table[i].
9920 cam_entry.middle_mac_addr,
9921 config->config_table[i].
9922 cam_entry.lsb_mac_addr);
9924 old = config->hdr.length_6b;
9926 for (; i < old; i++) {
9927 if (CAM_IS_INVALID(config->
9929 /* already invalidated */
9933 CAM_INVALIDATE(config->
9938 if (CHIP_REV_IS_SLOW(bp))
9939 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9941 offset = BNX2X_MAX_MULTICAST*(1 + port);
9943 config->hdr.length_6b = i;
9944 config->hdr.offset = offset;
9945 config->hdr.client_id = BP_CL_ID(bp);
9946 config->hdr.reserved1 = 0;
9948 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9949 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9950 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9953 /* Accept one or more multicasts */
9954 struct dev_mc_list *mclist;
9955 u32 mc_filter[MC_HASH_SIZE];
9956 u32 crc, bit, regidx;
9959 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9961 for (i = 0, mclist = dev->mc_list;
9962 mclist && (i < dev->mc_count);
9963 i++, mclist = mclist->next) {
9965 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9968 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9969 bit = (crc >> 24) & 0xff;
9972 mc_filter[regidx] |= (1 << bit);
9975 for (i = 0; i < MC_HASH_SIZE; i++)
9976 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9981 bp->rx_mode = rx_mode;
9982 bnx2x_set_storm_rx_mode(bp);
9985 /* called with rtnl_lock */
9986 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9988 struct sockaddr *addr = p;
9989 struct bnx2x *bp = netdev_priv(dev);
9991 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9994 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9995 if (netif_running(dev)) {
9997 bnx2x_set_mac_addr_e1(bp, 1);
9999 bnx2x_set_mac_addr_e1h(bp, 1);
10005 /* called with rtnl_lock */
10006 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10008 struct mii_ioctl_data *data = if_mii(ifr);
10009 struct bnx2x *bp = netdev_priv(dev);
10010 int port = BP_PORT(bp);
10015 data->phy_id = bp->port.phy_addr;
10019 case SIOCGMIIREG: {
10022 if (!netif_running(dev))
10025 mutex_lock(&bp->port.phy_mutex);
10026 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10027 DEFAULT_PHY_DEV_ADDR,
10028 (data->reg_num & 0x1f), &mii_regval);
10029 data->val_out = mii_regval;
10030 mutex_unlock(&bp->port.phy_mutex);
10035 if (!capable(CAP_NET_ADMIN))
10038 if (!netif_running(dev))
10041 mutex_lock(&bp->port.phy_mutex);
10042 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10043 DEFAULT_PHY_DEV_ADDR,
10044 (data->reg_num & 0x1f), data->val_in);
10045 mutex_unlock(&bp->port.phy_mutex);
10053 return -EOPNOTSUPP;
10056 /* called with rtnl_lock */
10057 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10059 struct bnx2x *bp = netdev_priv(dev);
10062 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10063 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10066 /* This does not race with packet allocation
10067 * because the actual alloc size is
10068 * only updated as part of load
10070 dev->mtu = new_mtu;
10072 if (netif_running(dev)) {
10073 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10074 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10080 static void bnx2x_tx_timeout(struct net_device *dev)
10082 struct bnx2x *bp = netdev_priv(dev);
10084 #ifdef BNX2X_STOP_ON_ERROR
10088 /* This allows the netif to be shutdown gracefully before resetting */
10089 schedule_work(&bp->reset_task);
10093 /* called with rtnl_lock */
10094 static void bnx2x_vlan_rx_register(struct net_device *dev,
10095 struct vlan_group *vlgrp)
10097 struct bnx2x *bp = netdev_priv(dev);
10101 /* Set flags according to the required capabilities */
10102 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10104 if (dev->features & NETIF_F_HW_VLAN_TX)
10105 bp->flags |= HW_VLAN_TX_FLAG;
10107 if (dev->features & NETIF_F_HW_VLAN_RX)
10108 bp->flags |= HW_VLAN_RX_FLAG;
10110 if (netif_running(dev))
10111 bnx2x_set_client_config(bp);
10116 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10117 static void poll_bnx2x(struct net_device *dev)
10119 struct bnx2x *bp = netdev_priv(dev);
10121 disable_irq(bp->pdev->irq);
10122 bnx2x_interrupt(bp->pdev->irq, dev);
10123 enable_irq(bp->pdev->irq);
10127 static const struct net_device_ops bnx2x_netdev_ops = {
10128 .ndo_open = bnx2x_open,
10129 .ndo_stop = bnx2x_close,
10130 .ndo_start_xmit = bnx2x_start_xmit,
10131 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10132 .ndo_set_mac_address = bnx2x_change_mac_addr,
10133 .ndo_validate_addr = eth_validate_addr,
10134 .ndo_do_ioctl = bnx2x_ioctl,
10135 .ndo_change_mtu = bnx2x_change_mtu,
10136 .ndo_tx_timeout = bnx2x_tx_timeout,
10138 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10140 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10141 .ndo_poll_controller = poll_bnx2x,
10146 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10147 struct net_device *dev)
10152 SET_NETDEV_DEV(dev, &pdev->dev);
10153 bp = netdev_priv(dev);
10158 bp->func = PCI_FUNC(pdev->devfn);
10160 rc = pci_enable_device(pdev);
10162 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10166 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10167 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10170 goto err_out_disable;
10173 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10174 printk(KERN_ERR PFX "Cannot find second PCI device"
10175 " base address, aborting\n");
10177 goto err_out_disable;
10180 if (atomic_read(&pdev->enable_cnt) == 1) {
10181 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10183 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10185 goto err_out_disable;
10188 pci_set_master(pdev);
10189 pci_save_state(pdev);
10192 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10193 if (bp->pm_cap == 0) {
10194 printk(KERN_ERR PFX "Cannot find power management"
10195 " capability, aborting\n");
10197 goto err_out_release;
10200 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10201 if (bp->pcie_cap == 0) {
10202 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10205 goto err_out_release;
10208 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10209 bp->flags |= USING_DAC_FLAG;
10210 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10211 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10212 " failed, aborting\n");
10214 goto err_out_release;
10217 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10218 printk(KERN_ERR PFX "System does not support DMA,"
10221 goto err_out_release;
10224 dev->mem_start = pci_resource_start(pdev, 0);
10225 dev->base_addr = dev->mem_start;
10226 dev->mem_end = pci_resource_end(pdev, 0);
10228 dev->irq = pdev->irq;
10230 bp->regview = pci_ioremap_bar(pdev, 0);
10231 if (!bp->regview) {
10232 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10234 goto err_out_release;
10237 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10238 min_t(u64, BNX2X_DB_SIZE,
10239 pci_resource_len(pdev, 2)));
10240 if (!bp->doorbells) {
10241 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10243 goto err_out_unmap;
10246 bnx2x_set_power_state(bp, PCI_D0);
10248 /* clean indirect addresses */
10249 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10250 PCICFG_VENDOR_ID_OFFSET);
10251 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10252 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10253 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10254 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10256 dev->watchdog_timeo = TX_TIMEOUT;
10258 dev->netdev_ops = &bnx2x_netdev_ops;
10259 dev->ethtool_ops = &bnx2x_ethtool_ops;
10260 dev->features |= NETIF_F_SG;
10261 dev->features |= NETIF_F_HW_CSUM;
10262 if (bp->flags & USING_DAC_FLAG)
10263 dev->features |= NETIF_F_HIGHDMA;
10265 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10266 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10268 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10269 dev->features |= NETIF_F_TSO6;
10275 iounmap(bp->regview);
10276 bp->regview = NULL;
10278 if (bp->doorbells) {
10279 iounmap(bp->doorbells);
10280 bp->doorbells = NULL;
10284 if (atomic_read(&pdev->enable_cnt) == 1)
10285 pci_release_regions(pdev);
10288 pci_disable_device(pdev);
10289 pci_set_drvdata(pdev, NULL);
10295 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10297 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10299 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10303 /* return value of 1=2.5GHz 2=5GHz */
10304 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10306 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10308 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10312 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10313 const struct pci_device_id *ent)
10315 static int version_printed;
10316 struct net_device *dev = NULL;
10320 if (version_printed++ == 0)
10321 printk(KERN_INFO "%s", version);
10323 /* dev zeroed in init_etherdev */
10324 dev = alloc_etherdev(sizeof(*bp));
10326 printk(KERN_ERR PFX "Cannot allocate net device\n");
10330 bp = netdev_priv(dev);
10331 bp->msglevel = debug;
10333 rc = bnx2x_init_dev(pdev, dev);
10339 pci_set_drvdata(pdev, dev);
10341 rc = bnx2x_init_bp(bp);
10343 goto init_one_exit;
10345 rc = register_netdev(dev);
10347 dev_err(&pdev->dev, "Cannot register net device\n");
10348 goto init_one_exit;
10351 bp->common.name = board_info[ent->driver_data].name;
10352 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10353 " IRQ %d, ", dev->name, bp->common.name,
10354 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10355 bnx2x_get_pcie_width(bp),
10356 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10357 dev->base_addr, bp->pdev->irq);
10358 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10363 iounmap(bp->regview);
10366 iounmap(bp->doorbells);
10370 if (atomic_read(&pdev->enable_cnt) == 1)
10371 pci_release_regions(pdev);
10373 pci_disable_device(pdev);
10374 pci_set_drvdata(pdev, NULL);
10379 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10381 struct net_device *dev = pci_get_drvdata(pdev);
10385 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10388 bp = netdev_priv(dev);
10390 unregister_netdev(dev);
10393 iounmap(bp->regview);
10396 iounmap(bp->doorbells);
10400 if (atomic_read(&pdev->enable_cnt) == 1)
10401 pci_release_regions(pdev);
10403 pci_disable_device(pdev);
10404 pci_set_drvdata(pdev, NULL);
10407 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10409 struct net_device *dev = pci_get_drvdata(pdev);
10413 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10416 bp = netdev_priv(dev);
10420 pci_save_state(pdev);
10422 if (!netif_running(dev)) {
10427 netif_device_detach(dev);
10429 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10431 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10438 static int bnx2x_resume(struct pci_dev *pdev)
10440 struct net_device *dev = pci_get_drvdata(pdev);
10445 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10448 bp = netdev_priv(dev);
10452 pci_restore_state(pdev);
10454 if (!netif_running(dev)) {
10459 bnx2x_set_power_state(bp, PCI_D0);
10460 netif_device_attach(dev);
10462 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10469 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10473 bp->state = BNX2X_STATE_ERROR;
10475 bp->rx_mode = BNX2X_RX_MODE_NONE;
10477 bnx2x_netif_stop(bp, 0);
10479 del_timer_sync(&bp->timer);
10480 bp->stats_state = STATS_STATE_DISABLED;
10481 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10484 bnx2x_free_irq(bp);
10486 if (CHIP_IS_E1(bp)) {
10487 struct mac_configuration_cmd *config =
10488 bnx2x_sp(bp, mcast_config);
10490 for (i = 0; i < config->hdr.length_6b; i++)
10491 CAM_INVALIDATE(config->config_table[i]);
10494 /* Free SKBs, SGEs, TPA pool and driver internals */
10495 bnx2x_free_skbs(bp);
10496 for_each_queue(bp, i)
10497 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10498 for_each_queue(bp, i)
10499 netif_napi_del(&bnx2x_fp(bp, i, napi));
10500 bnx2x_free_mem(bp);
10502 bp->state = BNX2X_STATE_CLOSED;
10504 netif_carrier_off(bp->dev);
10509 static void bnx2x_eeh_recover(struct bnx2x *bp)
10513 mutex_init(&bp->port.phy_mutex);
10515 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10516 bp->link_params.shmem_base = bp->common.shmem_base;
10517 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10519 if (!bp->common.shmem_base ||
10520 (bp->common.shmem_base < 0xA0000) ||
10521 (bp->common.shmem_base >= 0xC0000)) {
10522 BNX2X_DEV_INFO("MCP not active\n");
10523 bp->flags |= NO_MCP_FLAG;
10527 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10528 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10529 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10530 BNX2X_ERR("BAD MCP validity signature\n");
10532 if (!BP_NOMCP(bp)) {
10533 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10534 & DRV_MSG_SEQ_NUMBER_MASK);
10535 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10540 * bnx2x_io_error_detected - called when PCI error is detected
10541 * @pdev: Pointer to PCI device
10542 * @state: The current pci connection state
10544 * This function is called after a PCI bus error affecting
10545 * this device has been detected.
10547 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10548 pci_channel_state_t state)
10550 struct net_device *dev = pci_get_drvdata(pdev);
10551 struct bnx2x *bp = netdev_priv(dev);
10555 netif_device_detach(dev);
10557 if (netif_running(dev))
10558 bnx2x_eeh_nic_unload(bp);
10560 pci_disable_device(pdev);
10564 /* Request a slot reset */
10565 return PCI_ERS_RESULT_NEED_RESET;
10569 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10570 * @pdev: Pointer to PCI device
10572 * Restart the card from scratch, as if from a cold-boot.
10574 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10576 struct net_device *dev = pci_get_drvdata(pdev);
10577 struct bnx2x *bp = netdev_priv(dev);
10581 if (pci_enable_device(pdev)) {
10582 dev_err(&pdev->dev,
10583 "Cannot re-enable PCI device after reset\n");
10585 return PCI_ERS_RESULT_DISCONNECT;
10588 pci_set_master(pdev);
10589 pci_restore_state(pdev);
10591 if (netif_running(dev))
10592 bnx2x_set_power_state(bp, PCI_D0);
10596 return PCI_ERS_RESULT_RECOVERED;
10600 * bnx2x_io_resume - called when traffic can start flowing again
10601 * @pdev: Pointer to PCI device
10603 * This callback is called when the error recovery driver tells us that
10604 * its OK to resume normal operation.
10606 static void bnx2x_io_resume(struct pci_dev *pdev)
10608 struct net_device *dev = pci_get_drvdata(pdev);
10609 struct bnx2x *bp = netdev_priv(dev);
10613 bnx2x_eeh_recover(bp);
10615 if (netif_running(dev))
10616 bnx2x_nic_load(bp, LOAD_NORMAL);
10618 netif_device_attach(dev);
10623 static struct pci_error_handlers bnx2x_err_handler = {
10624 .error_detected = bnx2x_io_error_detected,
10625 .slot_reset = bnx2x_io_slot_reset,
10626 .resume = bnx2x_io_resume,
10629 static struct pci_driver bnx2x_pci_driver = {
10630 .name = DRV_MODULE_NAME,
10631 .id_table = bnx2x_pci_tbl,
10632 .probe = bnx2x_init_one,
10633 .remove = __devexit_p(bnx2x_remove_one),
10634 .suspend = bnx2x_suspend,
10635 .resume = bnx2x_resume,
10636 .err_handler = &bnx2x_err_handler,
10639 static int __init bnx2x_init(void)
10641 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10642 if (bnx2x_wq == NULL) {
10643 printk(KERN_ERR PFX "Cannot create workqueue\n");
10647 return pci_register_driver(&bnx2x_pci_driver);
10650 static void __exit bnx2x_cleanup(void)
10652 pci_unregister_driver(&bnx2x_pci_driver);
10654 destroy_workqueue(bnx2x_wq);
10657 module_init(bnx2x_init);
10658 module_exit(bnx2x_cleanup);