1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 /* enable nig attention */
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
740 /* Tell compiler that status block fields can change */
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return (fp->tx_pkt_cons != tx_cons_sb);
746 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
748 /* Tell compiler that consumer and producer can change */
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
754 /* free skb in the packet ring at pos idx
755 * return idx of last bd freed
757 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
761 struct eth_tx_bd *tx_bd;
762 struct sk_buff *skb = tx_buf->skb;
763 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
766 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
770 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
771 tx_bd = &fp->tx_desc_ring[bd_idx];
772 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
773 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
775 nbd = le16_to_cpu(tx_bd->nbd) - 1;
776 new_cons = nbd + tx_buf->first_bd;
777 #ifdef BNX2X_STOP_ON_ERROR
778 if (nbd > (MAX_SKB_FRAGS + 2)) {
779 BNX2X_ERR("BAD nbd!\n");
784 /* Skip a parse bd and the TSO split header bd
785 since they have no mapping */
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
790 ETH_TX_BD_FLAGS_TCP_CSUM |
791 ETH_TX_BD_FLAGS_SW_LSO)) {
793 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 /* is this a TSO split header bd? */
796 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
805 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
806 tx_bd = &fp->tx_desc_ring[bd_idx];
807 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
810 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
816 tx_buf->first_bd = 0;
822 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
828 barrier(); /* Tell compiler that prod and cons can change */
829 prod = fp->tx_bd_prod;
830 cons = fp->tx_bd_cons;
832 /* NUM_TX_RINGS = number of "next-page" entries
833 It will be used as a threshold */
834 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
836 #ifdef BNX2X_STOP_ON_ERROR
838 WARN_ON(used > fp->bp->tx_ring_size);
839 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
842 return (s16)(fp->bp->tx_ring_size) - used;
845 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
847 struct bnx2x *bp = fp->bp;
848 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
856 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 sw_cons = fp->tx_pkt_cons;
859 while (sw_cons != hw_cons) {
862 pkt_cons = TX_BD(sw_cons);
864 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
866 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
867 hw_cons, sw_cons, pkt_cons);
869 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
871 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
874 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
882 fp->tx_pkt_cons = sw_cons;
883 fp->tx_bd_cons = bd_cons;
885 /* Need to make the tx_cons update visible to start_xmit()
886 * before checking for netif_queue_stopped(). Without the
887 * memory barrier, there is a small possibility that start_xmit()
888 * will miss it and cause the queue to be stopped forever.
892 /* TBD need a thresh? */
893 if (unlikely(netif_queue_stopped(bp->dev))) {
895 netif_tx_lock(bp->dev);
897 if (netif_queue_stopped(bp->dev) &&
898 (bp->state == BNX2X_STATE_OPEN) &&
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
902 netif_tx_unlock(bp->dev);
907 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 union eth_rx_cqe *rr_cqe)
910 struct bnx2x *bp = fp->bp;
911 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
915 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
916 FP_IDX(fp), cid, command, bp->state,
917 rr_cqe->ramrod_cqe.ramrod_type);
922 switch (command | fp->state) {
923 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 BNX2X_FP_STATE_OPENING):
925 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
927 fp->state = BNX2X_FP_STATE_OPEN;
930 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
933 fp->state = BNX2X_FP_STATE_HALTED;
937 BNX2X_ERR("unexpected MC reply (%d) "
938 "fp->state is %x\n", command, fp->state);
941 mb(); /* force bnx2x_wait_ramrod() to see the change */
945 switch (command | bp->state) {
946 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 bp->state = BNX2X_STATE_OPEN;
951 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 fp->state = BNX2X_FP_STATE_HALTED;
957 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
958 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
959 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
966 bp->set_mac_pending = 0;
969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
978 mb(); /* force bnx2x_wait_ramrod() to see the change */
981 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
988 /* Skip "next page" elements */
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
1001 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1010 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1018 if (unlikely(page == NULL))
1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1022 PCI_DMA_FROMDEVICE);
1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1037 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1065 /* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1070 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1090 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1093 u16 last_max = fp->last_max_sge;
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1099 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1113 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1116 struct bnx2x *bp = fp->bp;
1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
1120 u16 last_max, last_elem, first_elem;
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1166 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1172 /* Clear the two last indices in the page to 1:
1173 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1179 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1207 #ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209 #ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1214 fp->tpa_queue_used);
1218 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1223 struct sw_rx_page *rx_pg, old_rx_pg;
1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 u32 i, frag_len, frag_size, pages;
1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1232 /* This is needed in order to enable forwarding support */
1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1235 max(frag_size, (u32)len_on_bd));
1237 #ifdef BNX2X_STOP_ON_ERROR
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1256 rx_pg = &fp->rx_page_ring[sge_idx];
1259 /* If we fail to allocate a substitute page, we simply stop
1260 where we are and drop the whole packet */
1261 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 if (unlikely(err)) {
1263 bp->eth_stats.rx_skb_alloc_failed++;
1267 /* Unmap the page as we r going to pass it to the stack */
1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1271 /* Add one frag and update the appropriate fields in the skb */
1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1274 skb->data_len += frag_len;
1275 skb->truesize += frag_len;
1276 skb->len += frag_len;
1278 frag_size -= frag_len;
1284 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1288 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 struct sk_buff *skb = rx_buf->skb;
1291 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1293 /* Unmap skb in the pool anyway, as we are going to change
1294 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1296 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1297 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1299 if (likely(new_skb)) {
1300 /* fix ip xsum and give it to the stack */
1301 /* (no need to map the new skb) */
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1311 prefetch(((char *)(skb)) + 128);
1313 #ifdef BNX2X_STOP_ON_ERROR
1314 if (pad + len > bp->rx_buf_size) {
1315 BNX2X_ERR("skb_put is about to fail... "
1316 "pad %d len %d rx_buf_size %d\n",
1317 pad, len, bp->rx_buf_size);
1323 skb_reserve(skb, pad);
1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
1332 iph = (struct iphdr *)skb->data;
1334 /* If there is no Rx VLAN offloading -
1335 take VLAN tag into an account */
1336 if (unlikely(is_not_hwaccel_vlan_cqe))
1337 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1340 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1343 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1344 &cqe->fast_path_cqe, cqe_idx)) {
1346 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1347 (!is_not_hwaccel_vlan_cqe))
1348 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1349 le16_to_cpu(cqe->fast_path_cqe.
1353 netif_receive_skb(skb);
1355 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1356 " - dropping packet!\n");
1361 /* put new skb in bin */
1362 fp->tpa_pool[queue].skb = new_skb;
1365 /* else drop the packet and keep the buffer in the bin */
1366 DP(NETIF_MSG_RX_STATUS,
1367 "Failed to allocate new skb - dropping packet!\n");
1368 bp->eth_stats.rx_skb_alloc_failed++;
1371 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1374 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1375 struct bnx2x_fastpath *fp,
1376 u16 bd_prod, u16 rx_comp_prod,
1379 struct tstorm_eth_rx_producers rx_prods = {0};
1382 /* Update producers */
1383 rx_prods.bd_prod = bd_prod;
1384 rx_prods.cqe_prod = rx_comp_prod;
1385 rx_prods.sge_prod = rx_sge_prod;
1388 * Make sure that the BD and SGE data is updated before updating the
1389 * producers since FW might read the BD/SGE right after the producer
1391 * This is only applicable for weak-ordered memory model archs such
1392 * as IA-64. The following barrier is also mandatory since FW will
1393 * assumes BDs must have buffers.
1397 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1398 REG_WR(bp, BAR_TSTRORM_INTMEM +
1399 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1400 ((u32 *)&rx_prods)[i]);
1402 mmiowb(); /* keep prod updates ordered */
1404 DP(NETIF_MSG_RX_STATUS,
1405 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1406 bd_prod, rx_comp_prod, rx_sge_prod);
1409 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1411 struct bnx2x *bp = fp->bp;
1412 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1413 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1416 #ifdef BNX2X_STOP_ON_ERROR
1417 if (unlikely(bp->panic))
1421 /* CQ "next element" is of the size of the regular element,
1422 that's why it's ok here */
1423 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1424 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1427 bd_cons = fp->rx_bd_cons;
1428 bd_prod = fp->rx_bd_prod;
1429 bd_prod_fw = bd_prod;
1430 sw_comp_cons = fp->rx_comp_cons;
1431 sw_comp_prod = fp->rx_comp_prod;
1433 /* Memory barrier necessary as speculative reads of the rx
1434 * buffer can be ahead of the index in the status block
1438 DP(NETIF_MSG_RX_STATUS,
1439 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1440 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1442 while (sw_comp_cons != hw_comp_cons) {
1443 struct sw_rx_bd *rx_buf = NULL;
1444 struct sk_buff *skb;
1445 union eth_rx_cqe *cqe;
1449 comp_ring_cons = RCQ_BD(sw_comp_cons);
1450 bd_prod = RX_BD(bd_prod);
1451 bd_cons = RX_BD(bd_cons);
1453 cqe = &fp->rx_comp_ring[comp_ring_cons];
1454 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1456 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1457 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1458 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1459 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1460 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1461 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1463 /* is this a slowpath msg? */
1464 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1465 bnx2x_sp_event(fp, cqe);
1468 /* this is an rx packet */
1470 rx_buf = &fp->rx_buf_ring[bd_cons];
1472 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1473 pad = cqe->fast_path_cqe.placement_offset;
1475 /* If CQE is marked both TPA_START and TPA_END
1476 it is a non-TPA CQE */
1477 if ((!fp->disable_tpa) &&
1478 (TPA_TYPE(cqe_fp_flags) !=
1479 (TPA_TYPE_START | TPA_TYPE_END))) {
1480 u16 queue = cqe->fast_path_cqe.queue_index;
1482 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1483 DP(NETIF_MSG_RX_STATUS,
1484 "calling tpa_start on queue %d\n",
1487 bnx2x_tpa_start(fp, queue, skb,
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_stop on queue %d\n",
1497 if (!BNX2X_RX_SUM_FIX(cqe))
1498 BNX2X_ERR("STOP on none TCP "
1501 /* This is a size of the linear data
1503 len = le16_to_cpu(cqe->fast_path_cqe.
1505 bnx2x_tpa_stop(bp, fp, queue, pad,
1506 len, cqe, comp_ring_cons);
1507 #ifdef BNX2X_STOP_ON_ERROR
1512 bnx2x_update_sge_prod(fp,
1513 &cqe->fast_path_cqe);
1518 pci_dma_sync_single_for_device(bp->pdev,
1519 pci_unmap_addr(rx_buf, mapping),
1520 pad + RX_COPY_THRESH,
1521 PCI_DMA_FROMDEVICE);
1523 prefetch(((char *)(skb)) + 128);
1525 /* is this an error packet? */
1526 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1527 DP(NETIF_MSG_RX_ERR,
1528 "ERROR flags %x rx packet %u\n",
1529 cqe_fp_flags, sw_comp_cons);
1530 bp->eth_stats.rx_err_discard_pkt++;
1534 /* Since we don't have a jumbo ring
1535 * copy small packets if mtu > 1500
1537 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1538 (len <= RX_COPY_THRESH)) {
1539 struct sk_buff *new_skb;
1541 new_skb = netdev_alloc_skb(bp->dev,
1543 if (new_skb == NULL) {
1544 DP(NETIF_MSG_RX_ERR,
1545 "ERROR packet dropped "
1546 "because of alloc failure\n");
1547 bp->eth_stats.rx_skb_alloc_failed++;
1552 skb_copy_from_linear_data_offset(skb, pad,
1553 new_skb->data + pad, len);
1554 skb_reserve(new_skb, pad);
1555 skb_put(new_skb, len);
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1561 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1562 pci_unmap_single(bp->pdev,
1563 pci_unmap_addr(rx_buf, mapping),
1565 PCI_DMA_FROMDEVICE);
1566 skb_reserve(skb, pad);
1570 DP(NETIF_MSG_RX_ERR,
1571 "ERROR packet dropped because "
1572 "of alloc failure\n");
1573 bp->eth_stats.rx_skb_alloc_failed++;
1575 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1579 skb->protocol = eth_type_trans(skb, bp->dev);
1581 skb->ip_summed = CHECKSUM_NONE;
1583 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 bp->eth_stats.hw_csum_err++;
1591 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1592 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1593 PARSING_FLAGS_VLAN))
1594 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1595 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1598 netif_receive_skb(skb);
1604 bd_cons = NEXT_RX_IDX(bd_cons);
1605 bd_prod = NEXT_RX_IDX(bd_prod);
1606 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1609 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1610 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1612 if (rx_pkt == budget)
1616 fp->rx_bd_cons = bd_cons;
1617 fp->rx_bd_prod = bd_prod_fw;
1618 fp->rx_comp_cons = sw_comp_cons;
1619 fp->rx_comp_prod = sw_comp_prod;
1621 /* Update producers */
1622 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1625 fp->rx_pkt += rx_pkt;
1631 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1633 struct bnx2x_fastpath *fp = fp_cookie;
1634 struct bnx2x *bp = fp->bp;
1635 int index = FP_IDX(fp);
1637 /* Return here if interrupt is disabled */
1638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1643 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1644 index, FP_SB_ID(fp));
1645 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1647 #ifdef BNX2X_STOP_ON_ERROR
1648 if (unlikely(bp->panic))
1652 prefetch(fp->rx_cons_sb);
1653 prefetch(fp->tx_cons_sb);
1654 prefetch(&fp->status_blk->c_status_block.status_block_index);
1655 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1662 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1664 struct net_device *dev = dev_instance;
1665 struct bnx2x *bp = netdev_priv(dev);
1666 u16 status = bnx2x_ack_int(bp);
1669 /* Return here if interrupt is shared and it's not for us */
1670 if (unlikely(status == 0)) {
1671 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1674 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1676 /* Return here if interrupt is disabled */
1677 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1678 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1682 #ifdef BNX2X_STOP_ON_ERROR
1683 if (unlikely(bp->panic))
1687 mask = 0x2 << bp->fp[0].sb_id;
1688 if (status & mask) {
1689 struct bnx2x_fastpath *fp = &bp->fp[0];
1691 prefetch(fp->rx_cons_sb);
1692 prefetch(fp->tx_cons_sb);
1693 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 prefetch(&fp->status_blk->u_status_block.status_block_index);
1696 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1702 if (unlikely(status & 0x1)) {
1703 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1711 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1717 /* end of fast path */
1719 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1724 * General service functions
1727 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1730 u32 resource_bit = (1 << resource);
1731 int func = BP_FUNC(bp);
1732 u32 hw_lock_control_reg;
1735 /* Validating that the resource is within range */
1736 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1738 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1739 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1744 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1746 hw_lock_control_reg =
1747 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1750 /* Validating that the resource is not already taken */
1751 lock_status = REG_RD(bp, hw_lock_control_reg);
1752 if (lock_status & resource_bit) {
1753 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1754 lock_status, resource_bit);
1758 /* Try for 5 second every 5ms */
1759 for (cnt = 0; cnt < 1000; cnt++) {
1760 /* Try to acquire the lock */
1761 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
1763 if (lock_status & resource_bit)
1768 DP(NETIF_MSG_HW, "Timeout\n");
1772 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1775 u32 resource_bit = (1 << resource);
1776 int func = BP_FUNC(bp);
1777 u32 hw_lock_control_reg;
1779 /* Validating that the resource is within range */
1780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1790 hw_lock_control_reg =
1791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1794 /* Validating that the resource is currently taken */
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
1796 if (!(lock_status & resource_bit)) {
1797 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1798 lock_status, resource_bit);
1802 REG_WR(bp, hw_lock_control_reg, resource_bit);
1806 /* HW Lock for shared dual port PHYs */
1807 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1809 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1811 mutex_lock(&bp->port.phy_mutex);
1813 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1814 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1815 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1818 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1823 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1824 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1826 mutex_unlock(&bp->port.phy_mutex);
1829 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1831 /* The GPIO should be swapped if swap register is set and active */
1832 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1833 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1834 int gpio_shift = gpio_num +
1835 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1836 u32 gpio_mask = (1 << gpio_shift);
1839 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1840 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1845 /* read GPIO and mask except the float bits */
1846 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1849 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1850 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1851 gpio_num, gpio_shift);
1852 /* clear FLOAT and set CLR */
1853 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1854 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1857 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1858 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1859 gpio_num, gpio_shift);
1860 /* clear FLOAT and set SET */
1861 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1865 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1866 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1867 gpio_num, gpio_shift);
1869 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1876 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1882 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1884 u32 spio_mask = (1 << spio_num);
1887 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1888 (spio_num > MISC_REGISTERS_SPIO_7)) {
1889 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1893 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1894 /* read SPIO and mask except the float bits */
1895 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1898 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1900 /* clear FLOAT and set CLR */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1905 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1907 /* clear FLOAT and set SET */
1908 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1912 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1913 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1915 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1922 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1923 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1928 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1930 switch (bp->link_vars.ieee_fc &
1931 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1932 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1933 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1936 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1937 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1940 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1941 bp->port.advertising |= ADVERTISED_Asym_Pause;
1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1950 static void bnx2x_link_report(struct bnx2x *bp)
1952 if (bp->link_vars.link_up) {
1953 if (bp->state == BNX2X_STATE_OPEN)
1954 netif_carrier_on(bp->dev);
1955 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1957 printk("%d Mbps ", bp->link_vars.line_speed);
1959 if (bp->link_vars.duplex == DUPLEX_FULL)
1960 printk("full duplex");
1962 printk("half duplex");
1964 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1965 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1966 printk(", receive ");
1967 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1968 printk("& transmit ");
1970 printk(", transmit ");
1972 printk("flow control ON");
1976 } else { /* link_down */
1977 netif_carrier_off(bp->dev);
1978 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1982 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1984 if (!BP_NOMCP(bp)) {
1987 /* Initialize link parameters structure variables */
1988 /* It is recommended to turn off RX FC for jumbo frames
1989 for better performance */
1991 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1992 else if (bp->dev->mtu > 5000)
1993 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1995 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1997 bnx2x_acquire_phy_lock(bp);
1998 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1999 bnx2x_release_phy_lock(bp);
2001 bnx2x_calc_fc_adv(bp);
2003 if (bp->link_vars.link_up)
2004 bnx2x_link_report(bp);
2009 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2013 static void bnx2x_link_set(struct bnx2x *bp)
2015 if (!BP_NOMCP(bp)) {
2016 bnx2x_acquire_phy_lock(bp);
2017 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2018 bnx2x_release_phy_lock(bp);
2020 bnx2x_calc_fc_adv(bp);
2022 BNX2X_ERR("Bootcode is missing -not setting link\n");
2025 static void bnx2x__link_reset(struct bnx2x *bp)
2027 if (!BP_NOMCP(bp)) {
2028 bnx2x_acquire_phy_lock(bp);
2029 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2030 bnx2x_release_phy_lock(bp);
2032 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2035 static u8 bnx2x_link_test(struct bnx2x *bp)
2039 bnx2x_acquire_phy_lock(bp);
2040 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2041 bnx2x_release_phy_lock(bp);
2046 /* Calculates the sum of vn_min_rates.
2047 It's needed for further normalizing of the min_rates.
2052 0 - if all the min_rates are 0.
2053 In the later case fairness algorithm should be deactivated.
2054 If not all min_rates are zero then those that are zeroes will
2057 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2059 int i, port = BP_PORT(bp);
2063 for (i = 0; i < E1HVN_MAX; i++) {
2065 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2066 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2067 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2068 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2069 /* If min rate is zero - set it to 1 */
2071 vn_min_rate = DEF_MIN_RATE;
2075 wsum += vn_min_rate;
2079 /* ... only if all min rates are zeros - disable FAIRNESS */
2086 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2089 struct cmng_struct_per_port *m_cmng_port)
2091 u32 r_param = port_rate / 8;
2092 int port = BP_PORT(bp);
2095 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2097 /* Enable minmax only if we are in e1hmf mode */
2099 u32 fair_periodic_timeout_usec;
2102 /* Enable rate shaping and fairness */
2103 m_cmng_port->flags.cmng_vn_enable = 1;
2104 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2105 m_cmng_port->flags.rate_shaping_enable = 1;
2108 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2109 " fairness will be disabled\n");
2111 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2112 m_cmng_port->rs_vars.rs_periodic_timeout =
2113 RS_PERIODIC_TIMEOUT_USEC / 4;
2115 /* this is the threshold below which no timer arming will occur
2116 1.25 coefficient is for the threshold to be a little bigger
2117 than the real time, to compensate for timer in-accuracy */
2118 m_cmng_port->rs_vars.rs_threshold =
2119 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2121 /* resolution of fairness timer */
2122 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2123 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2124 t_fair = T_FAIR_COEF / port_rate;
2126 /* this is the threshold below which we won't arm
2127 the timer anymore */
2128 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2130 /* we multiply by 1e3/8 to get bytes/msec.
2131 We don't want the credits to pass a credit
2132 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2133 m_cmng_port->fair_vars.upper_bound =
2134 r_param * t_fair * FAIR_MEM;
2135 /* since each tick is 4 usec */
2136 m_cmng_port->fair_vars.fairness_timeout =
2137 fair_periodic_timeout_usec / 4;
2140 /* Disable rate shaping and fairness */
2141 m_cmng_port->flags.cmng_vn_enable = 0;
2142 m_cmng_port->flags.fairness_enable = 0;
2143 m_cmng_port->flags.rate_shaping_enable = 0;
2146 "Single function mode minmax will be disabled\n");
2149 /* Store it to internal memory */
2150 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2151 REG_WR(bp, BAR_XSTRORM_INTMEM +
2152 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2153 ((u32 *)(m_cmng_port))[i]);
2156 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2157 u32 wsum, u16 port_rate,
2158 struct cmng_struct_per_port *m_cmng_port)
2160 struct rate_shaping_vars_per_vn m_rs_vn;
2161 struct fairness_vars_per_vn m_fair_vn;
2162 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2163 u16 vn_min_rate, vn_max_rate;
2166 /* If function is hidden - set min and max to zeroes */
2167 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2172 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2173 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2174 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2175 if current min rate is zero - set it to 1.
2176 This is a requirement of the algorithm. */
2177 if ((vn_min_rate == 0) && wsum)
2178 vn_min_rate = DEF_MIN_RATE;
2179 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2180 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2183 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2184 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2186 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2187 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2189 /* global vn counter - maximal Mbps for this vn */
2190 m_rs_vn.vn_counter.rate = vn_max_rate;
2192 /* quota - number of bytes transmitted in this period */
2193 m_rs_vn.vn_counter.quota =
2194 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2196 #ifdef BNX2X_PER_PROT_QOS
2197 /* per protocol counter */
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2199 /* maximal Mbps for this protocol */
2200 m_rs_vn.protocol_counters[protocol].rate =
2201 protocol_max_rate[protocol];
2202 /* the quota in each timer period -
2203 number of bytes transmitted in this period */
2204 m_rs_vn.protocol_counters[protocol].quota =
2205 (u32)(rs_periodic_timeout_usec *
2207 protocol_counters[protocol].rate/8));
2212 /* credit for each period of the fairness algorithm:
2213 number of bytes in T_FAIR (the vn share the port rate).
2214 wsum should not be larger than 10000, thus
2215 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2216 m_fair_vn.vn_credit_delta =
2217 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2218 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2219 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2220 m_fair_vn.vn_credit_delta);
2223 #ifdef BNX2X_PER_PROT_QOS
2225 u32 protocolWeightSum = 0;
2227 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2228 protocolWeightSum +=
2229 drvInit.protocol_min_rate[protocol];
2230 /* per protocol counter -
2231 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2232 if (protocolWeightSum > 0) {
2234 protocol < NUM_OF_PROTOCOLS; protocol++)
2235 /* credit for each period of the
2236 fairness algorithm - number of bytes in
2237 T_FAIR (the protocol share the vn rate) */
2238 m_fair_vn.protocol_credit_delta[protocol] =
2239 (u32)((vn_min_rate / 8) * t_fair *
2240 protocol_min_rate / protocolWeightSum);
2245 /* Store it to internal memory */
2246 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 ((u32 *)(&m_rs_vn))[i]);
2251 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 ((u32 *)(&m_fair_vn))[i]);
2257 /* This function is called upon link interrupt */
2258 static void bnx2x_link_attn(struct bnx2x *bp)
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267 if (bp->link_vars.link_up) {
2269 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2270 struct host_port_stats *pstats;
2272 pstats = bnx2x_sp(bp, port_stats);
2273 /* reset old bmac stats */
2274 memset(&(pstats->mac_stx[0]), 0,
2275 sizeof(struct mac_stx));
2277 if ((bp->state == BNX2X_STATE_OPEN) ||
2278 (bp->state == BNX2X_STATE_DISABLED))
2279 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2282 /* indicate link status */
2283 bnx2x_link_report(bp);
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2289 if (vn == BP_E1HVN(bp))
2292 func = ((vn << 1) | BP_PORT(bp));
2294 /* Set the attention towards other drivers
2296 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2297 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2301 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2302 struct cmng_struct_per_port m_cmng_port;
2304 int port = BP_PORT(bp);
2306 /* Init RATE SHAPING and FAIRNESS contexts */
2307 wsum = bnx2x_calc_vn_wsum(bp);
2308 bnx2x_init_port_minmax(bp, (int)wsum,
2309 bp->link_vars.line_speed,
2312 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2313 bnx2x_init_vn_minmax(bp, 2*vn + port,
2314 wsum, bp->link_vars.line_speed,
2319 static void bnx2x__link_status_update(struct bnx2x *bp)
2321 if (bp->state != BNX2X_STATE_OPEN)
2324 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2326 if (bp->link_vars.link_up)
2327 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2329 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2331 /* indicate link status */
2332 bnx2x_link_report(bp);
2335 static void bnx2x_pmf_update(struct bnx2x *bp)
2337 int port = BP_PORT(bp);
2341 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2343 /* enable nig attention */
2344 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2345 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2348 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2356 * General service functions
2359 /* the slow path queue is odd since completions arrive on the fastpath ring */
2360 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2361 u32 data_hi, u32 data_lo, int common)
2363 int func = BP_FUNC(bp);
2365 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2366 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2367 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2368 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2369 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2371 #ifdef BNX2X_STOP_ON_ERROR
2372 if (unlikely(bp->panic))
2376 spin_lock_bh(&bp->spq_lock);
2378 if (!bp->spq_left) {
2379 BNX2X_ERR("BUG! SPQ ring full!\n");
2380 spin_unlock_bh(&bp->spq_lock);
2385 /* CID needs port number to be encoded int it */
2386 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2387 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2389 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2391 bp->spq_prod_bd->hdr.type |=
2392 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2394 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2395 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2399 if (bp->spq_prod_bd == bp->spq_last_bd) {
2400 bp->spq_prod_bd = bp->spq;
2401 bp->spq_prod_idx = 0;
2402 DP(NETIF_MSG_TIMER, "end of spq\n");
2409 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2412 spin_unlock_bh(&bp->spq_lock);
2416 /* acquire split MCP access lock register */
2417 static int bnx2x_acquire_alr(struct bnx2x *bp)
2424 for (j = 0; j < i*10; j++) {
2426 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2427 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2428 if (val & (1L << 31))
2433 if (!(val & (1L << 31))) {
2434 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2441 /* release split MCP access lock register */
2442 static void bnx2x_release_alr(struct bnx2x *bp)
2446 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2449 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2451 struct host_def_status_block *def_sb = bp->def_status_blk;
2454 barrier(); /* status block is written to by the chip */
2455 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2456 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2459 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2460 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2463 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2464 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2467 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2468 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2471 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2472 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2479 * slow path service functions
2482 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2484 int port = BP_PORT(bp);
2485 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2486 COMMAND_REG_ATTN_BITS_SET);
2487 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2488 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2489 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2490 NIG_REG_MASK_INTERRUPT_PORT0;
2493 if (bp->attn_state & asserted)
2494 BNX2X_ERR("IGU ERROR\n");
2496 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 aeu_mask = REG_RD(bp, aeu_addr);
2499 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2500 aeu_mask, asserted);
2501 aeu_mask &= ~(asserted & 0xff);
2502 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2504 REG_WR(bp, aeu_addr, aeu_mask);
2505 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2507 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2508 bp->attn_state |= asserted;
2509 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2511 if (asserted & ATTN_HARD_WIRED_MASK) {
2512 if (asserted & ATTN_NIG_FOR_FUNC) {
2514 bnx2x_acquire_phy_lock(bp);
2516 /* save nig interrupt mask */
2517 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2518 REG_WR(bp, nig_int_mask_addr, 0);
2520 bnx2x_link_attn(bp);
2522 /* handle unicore attn? */
2524 if (asserted & ATTN_SW_TIMER_4_FUNC)
2525 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2527 if (asserted & GPIO_2_FUNC)
2528 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2530 if (asserted & GPIO_3_FUNC)
2531 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2533 if (asserted & GPIO_4_FUNC)
2534 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2537 if (asserted & ATTN_GENERAL_ATTN_1) {
2538 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2539 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2541 if (asserted & ATTN_GENERAL_ATTN_2) {
2542 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2543 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2545 if (asserted & ATTN_GENERAL_ATTN_3) {
2546 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2550 if (asserted & ATTN_GENERAL_ATTN_4) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2554 if (asserted & ATTN_GENERAL_ATTN_5) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2558 if (asserted & ATTN_GENERAL_ATTN_6) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2564 } /* if hardwired */
2566 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2568 REG_WR(bp, hc_addr, asserted);
2570 /* now set back the mask */
2571 if (asserted & ATTN_NIG_FOR_FUNC) {
2572 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2573 bnx2x_release_phy_lock(bp);
2577 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2579 int port = BP_PORT(bp);
2583 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2584 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2586 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2588 val = REG_RD(bp, reg_offset);
2589 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2590 REG_WR(bp, reg_offset, val);
2592 BNX2X_ERR("SPIO5 hw attention\n");
2594 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2595 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2596 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2597 /* Fan failure attention */
2599 /* The PHY reset is controlled by GPIO 1 */
2600 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2601 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2602 /* Low power mode is controlled by GPIO 2 */
2603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2604 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2605 /* mark the failure */
2606 bp->link_params.ext_phy_config &=
2607 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2608 bp->link_params.ext_phy_config |=
2609 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2611 dev_info.port_hw_config[port].
2612 external_phy_config,
2613 bp->link_params.ext_phy_config);
2614 /* log the failure */
2615 printk(KERN_ERR PFX "Fan Failure on Network"
2616 " Controller %s has caused the driver to"
2617 " shutdown the card to prevent permanent"
2618 " damage. Please contact Dell Support for"
2619 " assistance\n", bp->dev->name);
2627 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2631 REG_WR(bp, reg_offset, val);
2633 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_0));
2639 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2643 if (attn & BNX2X_DOORQ_ASSERT) {
2645 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2646 BNX2X_ERR("DB hw attention 0x%x\n", val);
2647 /* DORQ discard attention */
2649 BNX2X_ERR("FATAL error from DORQ\n");
2652 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654 int port = BP_PORT(bp);
2657 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2658 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2662 REG_WR(bp, reg_offset, val);
2664 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_1));
2670 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2674 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2677 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2678 /* CFC error attention */
2680 BNX2X_ERR("FATAL error from CFC\n");
2683 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2686 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2687 /* RQ_USDMDP_FIFO_OVERFLOW */
2689 BNX2X_ERR("FATAL error from PXP\n");
2692 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694 int port = BP_PORT(bp);
2697 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2698 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700 val = REG_RD(bp, reg_offset);
2701 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2702 REG_WR(bp, reg_offset, val);
2704 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2705 (attn & HW_INTERRUT_ASSERT_SET_2));
2710 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2714 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716 if (attn & BNX2X_PMF_LINK_ASSERT) {
2717 int func = BP_FUNC(bp);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2720 bnx2x__link_status_update(bp);
2721 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723 bnx2x_pmf_update(bp);
2725 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2727 BNX2X_ERR("MC assert!\n");
2728 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2734 } else if (attn & BNX2X_MCP_ASSERT) {
2736 BNX2X_ERR("MCP assert!\n");
2737 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2741 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2744 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2745 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2746 if (attn & BNX2X_GRC_TIMEOUT) {
2747 val = CHIP_IS_E1H(bp) ?
2748 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2749 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751 if (attn & BNX2X_GRC_RSV) {
2752 val = CHIP_IS_E1H(bp) ?
2753 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2754 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2760 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762 struct attn_route attn;
2763 struct attn_route group_mask;
2764 int port = BP_PORT(bp);
2770 /* need to take HW lock because MCP or other port might also
2771 try to handle this event */
2772 bnx2x_acquire_alr(bp);
2774 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2775 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2776 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2777 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2778 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2779 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2781 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2782 if (deasserted & (1 << index)) {
2783 group_mask = bp->attn_group[index];
2785 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2786 index, group_mask.sig[0], group_mask.sig[1],
2787 group_mask.sig[2], group_mask.sig[3]);
2789 bnx2x_attn_int_deasserted3(bp,
2790 attn.sig[3] & group_mask.sig[3]);
2791 bnx2x_attn_int_deasserted1(bp,
2792 attn.sig[1] & group_mask.sig[1]);
2793 bnx2x_attn_int_deasserted2(bp,
2794 attn.sig[2] & group_mask.sig[2]);
2795 bnx2x_attn_int_deasserted0(bp,
2796 attn.sig[0] & group_mask.sig[0]);
2798 if ((attn.sig[0] & group_mask.sig[0] &
2799 HW_PRTY_ASSERT_SET_0) ||
2800 (attn.sig[1] & group_mask.sig[1] &
2801 HW_PRTY_ASSERT_SET_1) ||
2802 (attn.sig[2] & group_mask.sig[2] &
2803 HW_PRTY_ASSERT_SET_2))
2804 BNX2X_ERR("FATAL HW block parity attention\n");
2808 bnx2x_release_alr(bp);
2810 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2813 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815 REG_WR(bp, reg_addr, val);
2817 if (~bp->attn_state & deasserted)
2818 BNX2X_ERR("IGU ERROR\n");
2820 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2821 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824 aeu_mask = REG_RD(bp, reg_addr);
2826 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2827 aeu_mask, deasserted);
2828 aeu_mask |= (deasserted & 0xff);
2829 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2831 REG_WR(bp, reg_addr, aeu_mask);
2832 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2835 bp->attn_state &= ~deasserted;
2836 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2839 static void bnx2x_attn_int(struct bnx2x *bp)
2841 /* read local copy of bits */
2842 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846 u32 attn_state = bp->attn_state;
2848 /* look for changed bits */
2849 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2850 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2853 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2854 attn_bits, attn_ack, asserted, deasserted);
2856 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2857 BNX2X_ERR("BAD attention state\n");
2859 /* handle bits that were raised */
2861 bnx2x_attn_int_asserted(bp, asserted);
2864 bnx2x_attn_int_deasserted(bp, deasserted);
2867 static void bnx2x_sp_task(struct work_struct *work)
2869 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2873 /* Return here if interrupt is disabled */
2874 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2875 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2879 status = bnx2x_update_dsb_idx(bp);
2880 /* if (status == 0) */
2881 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2883 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2889 /* CStorm events: query_stats, port delete ramrod */
2891 bp->stats_pending = 0;
2893 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2895 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2897 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2901 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2906 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2908 struct net_device *dev = dev_instance;
2909 struct bnx2x *bp = netdev_priv(dev);
2911 /* Return here if interrupt is disabled */
2912 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2913 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2917 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2919 #ifdef BNX2X_STOP_ON_ERROR
2920 if (unlikely(bp->panic))
2924 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2929 /* end of slow path */
2933 /****************************************************************************
2935 ****************************************************************************/
2937 /* sum[hi:lo] += add[hi:lo] */
2938 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2941 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2944 /* difference = minuend - subtrahend */
2945 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2947 if (m_lo < s_lo) { \
2949 d_hi = m_hi - s_hi; \
2951 /* we can 'loan' 1 */ \
2953 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2955 /* m_hi <= s_hi */ \
2960 /* m_lo >= s_lo */ \
2961 if (m_hi < s_hi) { \
2965 /* m_hi >= s_hi */ \
2966 d_hi = m_hi - s_hi; \
2967 d_lo = m_lo - s_lo; \
2972 #define UPDATE_STAT64(s, t) \
2974 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2975 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2976 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2977 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2978 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2979 pstats->mac_stx[1].t##_lo, diff.lo); \
2982 #define UPDATE_STAT64_NIG(s, t) \
2984 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2985 diff.lo, new->s##_lo, old->s##_lo); \
2986 ADD_64(estats->t##_hi, diff.hi, \
2987 estats->t##_lo, diff.lo); \
2990 /* sum[hi:lo] += add */
2991 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2994 s_hi += (s_lo < a) ? 1 : 0; \
2997 #define UPDATE_EXTEND_STAT(s) \
2999 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3000 pstats->mac_stx[1].s##_lo, \
3004 #define UPDATE_EXTEND_TSTAT(s, t) \
3006 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3007 old_tclient->s = le32_to_cpu(tclient->s); \
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3011 #define UPDATE_EXTEND_XSTAT(s, t) \
3013 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3014 old_xclient->s = le32_to_cpu(xclient->s); \
3015 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3019 * General service functions
3022 static inline long bnx2x_hilo(u32 *hiref)
3024 u32 lo = *(hiref + 1);
3025 #if (BITS_PER_LONG == 64)
3028 return HILO_U64(hi, lo);
3035 * Init service functions
3038 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3040 if (!bp->stats_pending) {
3041 struct eth_query_ramrod_data ramrod_data = {0};
3044 ramrod_data.drv_counter = bp->stats_counter++;
3045 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3046 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3048 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3049 ((u32 *)&ramrod_data)[1],
3050 ((u32 *)&ramrod_data)[0], 0);
3052 /* stats ramrod has it's own slot on the spq */
3054 bp->stats_pending = 1;
3059 static void bnx2x_stats_init(struct bnx2x *bp)
3061 int port = BP_PORT(bp);
3063 bp->executer_idx = 0;
3064 bp->stats_counter = 0;
3068 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3070 bp->port.port_stx = 0;
3071 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3073 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3074 bp->port.old_nig_stats.brb_discard =
3075 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3076 bp->port.old_nig_stats.brb_truncate =
3077 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3078 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3079 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3080 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3081 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3083 /* function stats */
3084 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3085 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3086 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3087 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3089 bp->stats_state = STATS_STATE_DISABLED;
3090 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3091 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3094 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3096 struct dmae_command *dmae = &bp->stats_dmae;
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099 *stats_comp = DMAE_COMP_VAL;
3102 if (bp->executer_idx) {
3103 int loader_idx = PMF_DMAE_C(bp);
3105 memset(dmae, 0, sizeof(struct dmae_command));
3107 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3108 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3109 DMAE_CMD_DST_RESET |
3111 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3113 DMAE_CMD_ENDIANITY_DW_SWAP |
3115 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3117 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3119 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3120 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3121 sizeof(struct dmae_command) *
3122 (loader_idx + 1)) >> 2;
3123 dmae->dst_addr_hi = 0;
3124 dmae->len = sizeof(struct dmae_command) >> 2;
3127 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3128 dmae->comp_addr_hi = 0;
3132 bnx2x_post_dmae(bp, dmae, loader_idx);
3134 } else if (bp->func_stx) {
3136 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3140 static int bnx2x_stats_comp(struct bnx2x *bp)
3142 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3146 while (*stats_comp != DMAE_COMP_VAL) {
3148 BNX2X_ERR("timeout waiting for stats finished\n");
3158 * Statistics service functions
3161 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3163 struct dmae_command *dmae;
3165 int loader_idx = PMF_DMAE_C(bp);
3166 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3169 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3170 BNX2X_ERR("BUG!\n");
3174 bp->executer_idx = 0;
3176 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3178 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3180 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3182 DMAE_CMD_ENDIANITY_DW_SWAP |
3184 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3185 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3187 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3188 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3189 dmae->src_addr_lo = bp->port.port_stx >> 2;
3190 dmae->src_addr_hi = 0;
3191 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3192 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3193 dmae->len = DMAE_LEN32_RD_MAX;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3195 dmae->comp_addr_hi = 0;
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3200 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3201 dmae->src_addr_hi = 0;
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3203 DMAE_LEN32_RD_MAX * 4);
3204 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3205 DMAE_LEN32_RD_MAX * 4);
3206 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3207 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3208 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3209 dmae->comp_val = DMAE_COMP_VAL;
3212 bnx2x_hw_stats_post(bp);
3213 bnx2x_stats_comp(bp);
3216 static void bnx2x_port_stats_init(struct bnx2x *bp)
3218 struct dmae_command *dmae;
3219 int port = BP_PORT(bp);
3220 int vn = BP_E1HVN(bp);
3222 int loader_idx = PMF_DMAE_C(bp);
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227 if (!bp->link_vars.link_up || !bp->port.pmf) {
3228 BNX2X_ERR("BUG!\n");
3232 bp->executer_idx = 0;
3235 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3236 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241 DMAE_CMD_ENDIANITY_DW_SWAP |
3243 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (vn << DMAE_CMD_E1HVN_SHIFT));
3246 if (bp->port.port_stx) {
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3253 dmae->dst_addr_hi = 0;
3254 dmae->len = sizeof(struct host_port_stats) >> 2;
3255 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3256 dmae->comp_addr_hi = 0;
3262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263 dmae->opcode = opcode;
3264 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3265 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3266 dmae->dst_addr_lo = bp->func_stx >> 2;
3267 dmae->dst_addr_hi = 0;
3268 dmae->len = sizeof(struct host_func_stats) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3275 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3276 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3281 DMAE_CMD_ENDIANITY_DW_SWAP |
3283 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3284 (vn << DMAE_CMD_E1HVN_SHIFT));
3286 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3288 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3289 NIG_REG_INGRESS_BMAC0_MEM);
3291 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3292 BIGMAC_REGISTER_TX_STAT_GTBYT */
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3301 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 dmae->comp_addr_hi = 0;
3306 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3307 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3314 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3316 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3317 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3318 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3323 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3325 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3327 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (mac_addr +
3331 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3335 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3340 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (mac_addr +
3344 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3347 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3349 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3355 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3356 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3357 dmae->opcode = opcode;
3358 dmae->src_addr_lo = (mac_addr +
3359 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3360 dmae->src_addr_hi = 0;
3361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3362 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3363 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3364 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3365 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3372 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 dmae->opcode = opcode;
3374 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3375 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3378 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3379 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3380 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 dmae->comp_addr_hi = 0;
3384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3385 dmae->opcode = opcode;
3386 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3387 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3388 dmae->src_addr_hi = 0;
3389 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3390 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3392 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3393 dmae->len = (2*sizeof(u32)) >> 2;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3400 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3401 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3403 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3405 DMAE_CMD_ENDIANITY_DW_SWAP |
3407 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3408 (vn << DMAE_CMD_E1HVN_SHIFT));
3409 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3410 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3411 dmae->src_addr_hi = 0;
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3413 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3415 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3416 dmae->len = (2*sizeof(u32)) >> 2;
3417 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3418 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_val = DMAE_COMP_VAL;
3424 static void bnx2x_func_stats_init(struct bnx2x *bp)
3426 struct dmae_command *dmae = &bp->stats_dmae;
3427 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3430 if (!bp->func_stx) {
3431 BNX2X_ERR("BUG!\n");
3435 bp->executer_idx = 0;
3436 memset(dmae, 0, sizeof(struct dmae_command));
3438 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3439 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3440 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3442 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3444 DMAE_CMD_ENDIANITY_DW_SWAP |
3446 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3447 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3448 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3449 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3450 dmae->dst_addr_lo = bp->func_stx >> 2;
3451 dmae->dst_addr_hi = 0;
3452 dmae->len = sizeof(struct host_func_stats) >> 2;
3453 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3454 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_val = DMAE_COMP_VAL;
3460 static void bnx2x_stats_start(struct bnx2x *bp)
3463 bnx2x_port_stats_init(bp);
3465 else if (bp->func_stx)
3466 bnx2x_func_stats_init(bp);
3468 bnx2x_hw_stats_post(bp);
3469 bnx2x_storm_stats_post(bp);
3472 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_pmf_update(bp);
3476 bnx2x_stats_start(bp);
3479 static void bnx2x_stats_restart(struct bnx2x *bp)
3481 bnx2x_stats_comp(bp);
3482 bnx2x_stats_start(bp);
3485 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3487 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3488 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3489 struct regpair diff;
3491 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3492 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3493 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3494 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3495 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3496 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3497 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3498 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3500 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3502 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3503 UPDATE_STAT64(tx_stat_gt127,
3504 tx_stat_etherstatspkts65octetsto127octets);
3505 UPDATE_STAT64(tx_stat_gt255,
3506 tx_stat_etherstatspkts128octetsto255octets);
3507 UPDATE_STAT64(tx_stat_gt511,
3508 tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_STAT64(tx_stat_gt1023,
3510 tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_STAT64(tx_stat_gt1518,
3512 tx_stat_etherstatspkts1024octetsto1522octets);
3513 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3514 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3515 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3516 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3517 UPDATE_STAT64(tx_stat_gterr,
3518 tx_stat_dot3statsinternalmactransmiterrors);
3519 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3522 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3524 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3525 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3527 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3528 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3529 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3530 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3531 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3532 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3533 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3534 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3535 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3536 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3537 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3538 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3539 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3540 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3541 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3542 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3543 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3545 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3546 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3547 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3548 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3549 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3550 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3551 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3552 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3553 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3554 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3556 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3560 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3562 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3563 struct nig_stats *old = &(bp->port.old_nig_stats);
3564 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3565 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3566 struct regpair diff;
3568 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3569 bnx2x_bmac_stats_update(bp);
3571 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3572 bnx2x_emac_stats_update(bp);
3574 else { /* unreached */
3575 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3579 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3580 new->brb_discard - old->brb_discard);
3581 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3582 new->brb_truncate - old->brb_truncate);
3584 UPDATE_STAT64_NIG(egress_mac_pkt0,
3585 etherstatspkts1024octetsto1522octets);
3586 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3588 memcpy(old, new, sizeof(struct nig_stats));
3590 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3591 sizeof(struct mac_stx));
3592 estats->brb_drop_hi = pstats->brb_drop_hi;
3593 estats->brb_drop_lo = pstats->brb_drop_lo;
3595 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3600 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3602 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3603 int cl_id = BP_CL_ID(bp);
3604 struct tstorm_per_port_stats *tport =
3605 &stats->tstorm_common.port_statistics;
3606 struct tstorm_per_client_stats *tclient =
3607 &stats->tstorm_common.client_statistics[cl_id];
3608 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3609 struct xstorm_per_client_stats *xclient =
3610 &stats->xstorm_common.client_statistics[cl_id];
3611 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3612 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3616 /* are storm stats valid? */
3617 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3620 " tstorm counter (%d) != stats_counter (%d)\n",
3621 tclient->stats_counter, bp->stats_counter);
3624 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3625 bp->stats_counter) {
3626 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3627 " xstorm counter (%d) != stats_counter (%d)\n",
3628 xclient->stats_counter, bp->stats_counter);
3632 fstats->total_bytes_received_hi =
3633 fstats->valid_bytes_received_hi =
3634 le32_to_cpu(tclient->total_rcv_bytes.hi);
3635 fstats->total_bytes_received_lo =
3636 fstats->valid_bytes_received_lo =
3637 le32_to_cpu(tclient->total_rcv_bytes.lo);
3639 estats->error_bytes_received_hi =
3640 le32_to_cpu(tclient->rcv_error_bytes.hi);
3641 estats->error_bytes_received_lo =
3642 le32_to_cpu(tclient->rcv_error_bytes.lo);
3643 ADD_64(estats->error_bytes_received_hi,
3644 estats->rx_stat_ifhcinbadoctets_hi,
3645 estats->error_bytes_received_lo,
3646 estats->rx_stat_ifhcinbadoctets_lo);
3648 ADD_64(fstats->total_bytes_received_hi,
3649 estats->error_bytes_received_hi,
3650 fstats->total_bytes_received_lo,
3651 estats->error_bytes_received_lo);
3653 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3654 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3655 total_multicast_packets_received);
3656 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3657 total_broadcast_packets_received);
3659 fstats->total_bytes_transmitted_hi =
3660 le32_to_cpu(xclient->total_sent_bytes.hi);
3661 fstats->total_bytes_transmitted_lo =
3662 le32_to_cpu(xclient->total_sent_bytes.lo);
3664 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3665 total_unicast_packets_transmitted);
3666 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3667 total_multicast_packets_transmitted);
3668 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3669 total_broadcast_packets_transmitted);
3671 memcpy(estats, &(fstats->total_bytes_received_hi),
3672 sizeof(struct host_func_stats) - 2*sizeof(u32));
3674 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3675 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3676 estats->brb_truncate_discard =
3677 le32_to_cpu(tport->brb_truncate_discard);
3678 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3680 old_tclient->rcv_unicast_bytes.hi =
3681 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3682 old_tclient->rcv_unicast_bytes.lo =
3683 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3684 old_tclient->rcv_broadcast_bytes.hi =
3685 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3686 old_tclient->rcv_broadcast_bytes.lo =
3687 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3688 old_tclient->rcv_multicast_bytes.hi =
3689 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3690 old_tclient->rcv_multicast_bytes.lo =
3691 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3692 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3694 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3695 old_tclient->packets_too_big_discard =
3696 le32_to_cpu(tclient->packets_too_big_discard);
3697 estats->no_buff_discard =
3698 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3699 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3701 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3702 old_xclient->unicast_bytes_sent.hi =
3703 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3704 old_xclient->unicast_bytes_sent.lo =
3705 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3706 old_xclient->multicast_bytes_sent.hi =
3707 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3708 old_xclient->multicast_bytes_sent.lo =
3709 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3710 old_xclient->broadcast_bytes_sent.hi =
3711 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3712 old_xclient->broadcast_bytes_sent.lo =
3713 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3715 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3720 static void bnx2x_net_stats_update(struct bnx2x *bp)
3722 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3723 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3724 struct net_device_stats *nstats = &bp->dev->stats;
3726 nstats->rx_packets =
3727 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3728 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3729 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3731 nstats->tx_packets =
3732 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3733 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3734 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3736 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3738 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3740 nstats->rx_dropped = old_tclient->checksum_discard +
3741 estats->mac_discard;
3742 nstats->tx_dropped = 0;
3745 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3747 nstats->collisions =
3748 estats->tx_stat_dot3statssinglecollisionframes_lo +
3749 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3750 estats->tx_stat_dot3statslatecollisions_lo +
3751 estats->tx_stat_dot3statsexcessivecollisions_lo;
3753 estats->jabber_packets_received =
3754 old_tclient->packets_too_big_discard +
3755 estats->rx_stat_dot3statsframestoolong_lo;
3757 nstats->rx_length_errors =
3758 estats->rx_stat_etherstatsundersizepkts_lo +
3759 estats->jabber_packets_received;
3760 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3761 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3762 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3763 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3764 nstats->rx_missed_errors = estats->xxoverflow_discard;
3766 nstats->rx_errors = nstats->rx_length_errors +
3767 nstats->rx_over_errors +
3768 nstats->rx_crc_errors +
3769 nstats->rx_frame_errors +
3770 nstats->rx_fifo_errors +
3771 nstats->rx_missed_errors;
3773 nstats->tx_aborted_errors =
3774 estats->tx_stat_dot3statslatecollisions_lo +
3775 estats->tx_stat_dot3statsexcessivecollisions_lo;
3776 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3777 nstats->tx_fifo_errors = 0;
3778 nstats->tx_heartbeat_errors = 0;
3779 nstats->tx_window_errors = 0;
3781 nstats->tx_errors = nstats->tx_aborted_errors +
3782 nstats->tx_carrier_errors;
3785 static void bnx2x_stats_update(struct bnx2x *bp)
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3790 if (*stats_comp != DMAE_COMP_VAL)
3794 update = (bnx2x_hw_stats_update(bp) == 0);
3796 update |= (bnx2x_storm_stats_update(bp) == 0);
3799 bnx2x_net_stats_update(bp);
3802 if (bp->stats_pending) {
3803 bp->stats_pending++;
3804 if (bp->stats_pending == 3) {
3805 BNX2X_ERR("stats not updated for 3 times\n");
3812 if (bp->msglevel & NETIF_MSG_TIMER) {
3813 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3815 struct net_device_stats *nstats = &bp->dev->stats;
3818 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3819 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3821 bnx2x_tx_avail(bp->fp),
3822 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3823 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3825 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3826 bp->fp->rx_comp_cons),
3827 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3828 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3829 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3830 estats->driver_xoff, estats->brb_drop_lo);
3831 printk(KERN_DEBUG "tstats: checksum_discard %u "
3832 "packets_too_big_discard %u no_buff_discard %u "
3833 "mac_discard %u mac_filter_discard %u "
3834 "xxovrflow_discard %u brb_truncate_discard %u "
3835 "ttl0_discard %u\n",
3836 old_tclient->checksum_discard,
3837 old_tclient->packets_too_big_discard,
3838 old_tclient->no_buff_discard, estats->mac_discard,
3839 estats->mac_filter_discard, estats->xxoverflow_discard,
3840 estats->brb_truncate_discard,
3841 old_tclient->ttl0_discard);
3843 for_each_queue(bp, i) {
3844 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3845 bnx2x_fp(bp, i, tx_pkt),
3846 bnx2x_fp(bp, i, rx_pkt),
3847 bnx2x_fp(bp, i, rx_calls));
3851 bnx2x_hw_stats_post(bp);
3852 bnx2x_storm_stats_post(bp);
3855 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3857 struct dmae_command *dmae;
3859 int loader_idx = PMF_DMAE_C(bp);
3860 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3862 bp->executer_idx = 0;
3864 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3866 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3868 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3870 DMAE_CMD_ENDIANITY_DW_SWAP |
3872 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3873 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3875 if (bp->port.port_stx) {
3877 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3879 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3881 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3882 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3883 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3884 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3885 dmae->dst_addr_hi = 0;
3886 dmae->len = sizeof(struct host_port_stats) >> 2;
3888 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3889 dmae->comp_addr_hi = 0;
3892 dmae->comp_addr_lo =
3893 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi =
3895 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
3904 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3905 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3908 dmae->dst_addr_lo = bp->func_stx >> 2;
3909 dmae->dst_addr_hi = 0;
3910 dmae->len = sizeof(struct host_func_stats) >> 2;
3911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3913 dmae->comp_val = DMAE_COMP_VAL;
3919 static void bnx2x_stats_stop(struct bnx2x *bp)
3923 bnx2x_stats_comp(bp);
3926 update = (bnx2x_hw_stats_update(bp) == 0);
3928 update |= (bnx2x_storm_stats_update(bp) == 0);
3931 bnx2x_net_stats_update(bp);
3934 bnx2x_port_stats_stop(bp);
3936 bnx2x_hw_stats_post(bp);
3937 bnx2x_stats_comp(bp);
3941 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3945 static const struct {
3946 void (*action)(struct bnx2x *bp);
3947 enum bnx2x_stats_state next_state;
3948 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3951 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3952 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3953 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3954 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3957 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3958 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3959 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3960 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3964 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3966 enum bnx2x_stats_state state = bp->stats_state;
3968 bnx2x_stats_stm[state][event].action(bp);
3969 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3971 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3972 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3973 state, event, bp->stats_state);
3976 static void bnx2x_timer(unsigned long data)
3978 struct bnx2x *bp = (struct bnx2x *) data;
3980 if (!netif_running(bp->dev))
3983 if (atomic_read(&bp->intr_sem) != 0)
3987 struct bnx2x_fastpath *fp = &bp->fp[0];
3990 bnx2x_tx_int(fp, 1000);
3991 rc = bnx2x_rx_int(fp, 1000);
3994 if (!BP_NOMCP(bp)) {
3995 int func = BP_FUNC(bp);
3999 ++bp->fw_drv_pulse_wr_seq;
4000 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4001 /* TBD - add SYSTEM_TIME */
4002 drv_pulse = bp->fw_drv_pulse_wr_seq;
4003 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4005 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4006 MCP_PULSE_SEQ_MASK);
4007 /* The delta between driver pulse and mcp response
4008 * should be 1 (before mcp response) or 0 (after mcp response)
4010 if ((drv_pulse != mcp_pulse) &&
4011 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4012 /* someone lost a heartbeat... */
4013 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4014 drv_pulse, mcp_pulse);
4018 if ((bp->state == BNX2X_STATE_OPEN) ||
4019 (bp->state == BNX2X_STATE_DISABLED))
4020 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4023 mod_timer(&bp->timer, jiffies + bp->current_interval);
4026 /* end of Statistics */
4031 * nic init service functions
4034 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4036 int port = BP_PORT(bp);
4038 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4039 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4040 sizeof(struct ustorm_status_block)/4);
4041 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4043 sizeof(struct cstorm_status_block)/4);
4046 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4047 dma_addr_t mapping, int sb_id)
4049 int port = BP_PORT(bp);
4050 int func = BP_FUNC(bp);
4055 section = ((u64)mapping) + offsetof(struct host_status_block,
4057 sb->u_status_block.status_block_id = sb_id;
4059 REG_WR(bp, BAR_USTRORM_INTMEM +
4060 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4061 REG_WR(bp, BAR_USTRORM_INTMEM +
4062 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4064 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4065 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4067 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4068 REG_WR16(bp, BAR_USTRORM_INTMEM +
4069 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4072 section = ((u64)mapping) + offsetof(struct host_status_block,
4074 sb->c_status_block.status_block_id = sb_id;
4076 REG_WR(bp, BAR_CSTRORM_INTMEM +
4077 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4078 REG_WR(bp, BAR_CSTRORM_INTMEM +
4079 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4081 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4082 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4084 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4085 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4086 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4088 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4091 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4093 int func = BP_FUNC(bp);
4095 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4096 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 sizeof(struct ustorm_def_status_block)/4);
4098 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4099 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4100 sizeof(struct cstorm_def_status_block)/4);
4101 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4102 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4103 sizeof(struct xstorm_def_status_block)/4);
4104 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4105 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4106 sizeof(struct tstorm_def_status_block)/4);
4109 static void bnx2x_init_def_sb(struct bnx2x *bp,
4110 struct host_def_status_block *def_sb,
4111 dma_addr_t mapping, int sb_id)
4113 int port = BP_PORT(bp);
4114 int func = BP_FUNC(bp);
4115 int index, val, reg_offset;
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 atten_status_block);
4121 def_sb->atten_status_block.status_block_id = sb_id;
4125 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4126 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4129 bp->attn_group[index].sig[0] = REG_RD(bp,
4130 reg_offset + 0x10*index);
4131 bp->attn_group[index].sig[1] = REG_RD(bp,
4132 reg_offset + 0x4 + 0x10*index);
4133 bp->attn_group[index].sig[2] = REG_RD(bp,
4134 reg_offset + 0x8 + 0x10*index);
4135 bp->attn_group[index].sig[3] = REG_RD(bp,
4136 reg_offset + 0xc + 0x10*index);
4139 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4140 HC_REG_ATTN_MSG0_ADDR_L);
4142 REG_WR(bp, reg_offset, U64_LO(section));
4143 REG_WR(bp, reg_offset + 4, U64_HI(section));
4145 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4147 val = REG_RD(bp, reg_offset);
4149 REG_WR(bp, reg_offset, val);
4152 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153 u_def_status_block);
4154 def_sb->u_def_status_block.status_block_id = sb_id;
4156 REG_WR(bp, BAR_USTRORM_INTMEM +
4157 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4158 REG_WR(bp, BAR_USTRORM_INTMEM +
4159 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4161 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4162 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4164 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_USTRORM_INTMEM +
4166 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 c_def_status_block);
4171 def_sb->c_def_status_block.status_block_id = sb_id;
4173 REG_WR(bp, BAR_CSTRORM_INTMEM +
4174 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4175 REG_WR(bp, BAR_CSTRORM_INTMEM +
4176 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4178 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4179 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4181 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4183 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4186 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4187 t_def_status_block);
4188 def_sb->t_def_status_block.status_block_id = sb_id;
4190 REG_WR(bp, BAR_TSTRORM_INTMEM +
4191 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4192 REG_WR(bp, BAR_TSTRORM_INTMEM +
4193 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4195 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4196 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4198 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4199 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4200 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4203 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4204 x_def_status_block);
4205 def_sb->x_def_status_block.status_block_id = sb_id;
4207 REG_WR(bp, BAR_XSTRORM_INTMEM +
4208 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4209 REG_WR(bp, BAR_XSTRORM_INTMEM +
4210 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4213 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4215 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4216 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4217 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4219 bp->stats_pending = 0;
4220 bp->set_mac_pending = 0;
4222 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4225 static void bnx2x_update_coalesce(struct bnx2x *bp)
4227 int port = BP_PORT(bp);
4230 for_each_queue(bp, i) {
4231 int sb_id = bp->fp[i].sb_id;
4233 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4234 REG_WR8(bp, BAR_USTRORM_INTMEM +
4235 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4236 U_SB_ETH_RX_CQ_INDEX),
4238 REG_WR16(bp, BAR_USTRORM_INTMEM +
4239 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4240 U_SB_ETH_RX_CQ_INDEX),
4241 bp->rx_ticks ? 0 : 1);
4242 REG_WR16(bp, BAR_USTRORM_INTMEM +
4243 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4244 U_SB_ETH_RX_BD_INDEX),
4245 bp->rx_ticks ? 0 : 1);
4247 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4248 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4249 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4250 C_SB_ETH_TX_CQ_INDEX),
4252 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4253 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4254 C_SB_ETH_TX_CQ_INDEX),
4255 bp->tx_ticks ? 0 : 1);
4259 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4260 struct bnx2x_fastpath *fp, int last)
4264 for (i = 0; i < last; i++) {
4265 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4266 struct sk_buff *skb = rx_buf->skb;
4269 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4273 if (fp->tpa_state[i] == BNX2X_TPA_START)
4274 pci_unmap_single(bp->pdev,
4275 pci_unmap_addr(rx_buf, mapping),
4277 PCI_DMA_FROMDEVICE);
4284 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4286 int func = BP_FUNC(bp);
4287 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4288 ETH_MAX_AGGREGATION_QUEUES_E1H;
4289 u16 ring_prod, cqe_ring_prod;
4292 bp->rx_buf_size = bp->dev->mtu;
4293 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4294 BCM_RX_ETH_PAYLOAD_ALIGN;
4296 if (bp->flags & TPA_ENABLE_FLAG) {
4298 "rx_buf_size %d effective_mtu %d\n",
4299 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4301 for_each_queue(bp, j) {
4302 struct bnx2x_fastpath *fp = &bp->fp[j];
4304 for (i = 0; i < max_agg_queues; i++) {
4305 fp->tpa_pool[i].skb =
4306 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4307 if (!fp->tpa_pool[i].skb) {
4308 BNX2X_ERR("Failed to allocate TPA "
4309 "skb pool for queue[%d] - "
4310 "disabling TPA on this "
4312 bnx2x_free_tpa_pool(bp, fp, i);
4313 fp->disable_tpa = 1;
4316 pci_unmap_addr_set((struct sw_rx_bd *)
4317 &bp->fp->tpa_pool[i],
4319 fp->tpa_state[i] = BNX2X_TPA_STOP;
4324 for_each_queue(bp, j) {
4325 struct bnx2x_fastpath *fp = &bp->fp[j];
4328 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4329 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4331 /* "next page" elements initialization */
4333 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4334 struct eth_rx_sge *sge;
4336 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4338 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4341 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4342 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4345 bnx2x_init_sge_ring_bit_mask(fp);
4348 for (i = 1; i <= NUM_RX_RINGS; i++) {
4349 struct eth_rx_bd *rx_bd;
4351 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4353 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4354 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4356 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4357 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4361 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4362 struct eth_rx_cqe_next_page *nextpg;
4364 nextpg = (struct eth_rx_cqe_next_page *)
4365 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4367 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4368 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4370 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4371 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4374 /* Allocate SGEs and initialize the ring elements */
4375 for (i = 0, ring_prod = 0;
4376 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4378 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4379 BNX2X_ERR("was only able to allocate "
4381 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4382 /* Cleanup already allocated elements */
4383 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4384 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4385 fp->disable_tpa = 1;
4389 ring_prod = NEXT_SGE_IDX(ring_prod);
4391 fp->rx_sge_prod = ring_prod;
4393 /* Allocate BDs and initialize BD ring */
4394 fp->rx_comp_cons = 0;
4395 cqe_ring_prod = ring_prod = 0;
4396 for (i = 0; i < bp->rx_ring_size; i++) {
4397 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4398 BNX2X_ERR("was only able to allocate "
4400 bp->eth_stats.rx_skb_alloc_failed++;
4403 ring_prod = NEXT_RX_IDX(ring_prod);
4404 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4405 WARN_ON(ring_prod <= i);
4408 fp->rx_bd_prod = ring_prod;
4409 /* must not have more available CQEs than BDs */
4410 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4412 fp->rx_pkt = fp->rx_calls = 0;
4415 * this will generate an interrupt (to the TSTORM)
4416 * must only be done after chip is initialized
4418 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4423 REG_WR(bp, BAR_USTRORM_INTMEM +
4424 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4425 U64_LO(fp->rx_comp_mapping));
4426 REG_WR(bp, BAR_USTRORM_INTMEM +
4427 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4428 U64_HI(fp->rx_comp_mapping));
4432 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4436 for_each_queue(bp, j) {
4437 struct bnx2x_fastpath *fp = &bp->fp[j];
4439 for (i = 1; i <= NUM_TX_RINGS; i++) {
4440 struct eth_tx_bd *tx_bd =
4441 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4444 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4445 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4447 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4448 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4451 fp->tx_pkt_prod = 0;
4452 fp->tx_pkt_cons = 0;
4455 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4460 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4462 int func = BP_FUNC(bp);
4464 spin_lock_init(&bp->spq_lock);
4466 bp->spq_left = MAX_SPQ_PENDING;
4467 bp->spq_prod_idx = 0;
4468 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4469 bp->spq_prod_bd = bp->spq;
4470 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4472 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4473 U64_LO(bp->spq_mapping));
4475 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4476 U64_HI(bp->spq_mapping));
4478 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4482 static void bnx2x_init_context(struct bnx2x *bp)
4486 for_each_queue(bp, i) {
4487 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4488 struct bnx2x_fastpath *fp = &bp->fp[i];
4489 u8 sb_id = FP_SB_ID(fp);
4491 context->xstorm_st_context.tx_bd_page_base_hi =
4492 U64_HI(fp->tx_desc_mapping);
4493 context->xstorm_st_context.tx_bd_page_base_lo =
4494 U64_LO(fp->tx_desc_mapping);
4495 context->xstorm_st_context.db_data_addr_hi =
4496 U64_HI(fp->tx_prods_mapping);
4497 context->xstorm_st_context.db_data_addr_lo =
4498 U64_LO(fp->tx_prods_mapping);
4499 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4500 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4502 context->ustorm_st_context.common.sb_index_numbers =
4503 BNX2X_RX_SB_INDEX_NUM;
4504 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4505 context->ustorm_st_context.common.status_block_id = sb_id;
4506 context->ustorm_st_context.common.flags =
4507 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4508 context->ustorm_st_context.common.mc_alignment_size =
4509 BCM_RX_ETH_PAYLOAD_ALIGN;
4510 context->ustorm_st_context.common.bd_buff_size =
4512 context->ustorm_st_context.common.bd_page_base_hi =
4513 U64_HI(fp->rx_desc_mapping);
4514 context->ustorm_st_context.common.bd_page_base_lo =
4515 U64_LO(fp->rx_desc_mapping);
4516 if (!fp->disable_tpa) {
4517 context->ustorm_st_context.common.flags |=
4518 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4519 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4520 context->ustorm_st_context.common.sge_buff_size =
4521 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4522 context->ustorm_st_context.common.sge_page_base_hi =
4523 U64_HI(fp->rx_sge_mapping);
4524 context->ustorm_st_context.common.sge_page_base_lo =
4525 U64_LO(fp->rx_sge_mapping);
4528 context->cstorm_st_context.sb_index_number =
4529 C_SB_ETH_TX_CQ_INDEX;
4530 context->cstorm_st_context.status_block_id = sb_id;
4532 context->xstorm_ag_context.cdu_reserved =
4533 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4534 CDU_REGION_NUMBER_XCM_AG,
4535 ETH_CONNECTION_TYPE);
4536 context->ustorm_ag_context.cdu_usage =
4537 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4538 CDU_REGION_NUMBER_UCM_AG,
4539 ETH_CONNECTION_TYPE);
4543 static void bnx2x_init_ind_table(struct bnx2x *bp)
4545 int func = BP_FUNC(bp);
4551 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4552 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4553 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4554 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4555 BP_CL_ID(bp) + (i % bp->num_queues));
4558 static void bnx2x_set_client_config(struct bnx2x *bp)
4560 struct tstorm_eth_client_config tstorm_client = {0};
4561 int port = BP_PORT(bp);
4564 tstorm_client.mtu = bp->dev->mtu;
4565 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4566 tstorm_client.config_flags =
4567 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4569 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4570 tstorm_client.config_flags |=
4571 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4572 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4576 if (bp->flags & TPA_ENABLE_FLAG) {
4577 tstorm_client.max_sges_for_packet =
4578 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4579 tstorm_client.max_sges_for_packet =
4580 ((tstorm_client.max_sges_for_packet +
4581 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4582 PAGES_PER_SGE_SHIFT;
4584 tstorm_client.config_flags |=
4585 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4588 for_each_queue(bp, i) {
4589 REG_WR(bp, BAR_TSTRORM_INTMEM +
4590 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4591 ((u32 *)&tstorm_client)[0]);
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4594 ((u32 *)&tstorm_client)[1]);
4597 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4598 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4601 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4603 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4604 int mode = bp->rx_mode;
4605 int mask = (1 << BP_L_ID(bp));
4606 int func = BP_FUNC(bp);
4609 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4612 case BNX2X_RX_MODE_NONE: /* no Rx */
4613 tstorm_mac_filter.ucast_drop_all = mask;
4614 tstorm_mac_filter.mcast_drop_all = mask;
4615 tstorm_mac_filter.bcast_drop_all = mask;
4617 case BNX2X_RX_MODE_NORMAL:
4618 tstorm_mac_filter.bcast_accept_all = mask;
4620 case BNX2X_RX_MODE_ALLMULTI:
4621 tstorm_mac_filter.mcast_accept_all = mask;
4622 tstorm_mac_filter.bcast_accept_all = mask;
4624 case BNX2X_RX_MODE_PROMISC:
4625 tstorm_mac_filter.ucast_accept_all = mask;
4626 tstorm_mac_filter.mcast_accept_all = mask;
4627 tstorm_mac_filter.bcast_accept_all = mask;
4630 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4634 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4635 REG_WR(bp, BAR_TSTRORM_INTMEM +
4636 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4637 ((u32 *)&tstorm_mac_filter)[i]);
4639 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4640 ((u32 *)&tstorm_mac_filter)[i]); */
4643 if (mode != BNX2X_RX_MODE_NONE)
4644 bnx2x_set_client_config(bp);
4647 static void bnx2x_init_internal_common(struct bnx2x *bp)
4651 if (bp->flags & TPA_ENABLE_FLAG) {
4652 struct tstorm_eth_tpa_exist tpa = {0};
4656 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4658 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4662 /* Zero this manually as its initialization is
4663 currently missing in the initTool */
4664 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4665 REG_WR(bp, BAR_USTRORM_INTMEM +
4666 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4669 static void bnx2x_init_internal_port(struct bnx2x *bp)
4671 int port = BP_PORT(bp);
4673 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4679 static void bnx2x_init_internal_func(struct bnx2x *bp)
4681 struct tstorm_eth_function_common_config tstorm_config = {0};
4682 struct stats_indication_flags stats_flags = {0};
4683 int port = BP_PORT(bp);
4684 int func = BP_FUNC(bp);
4689 tstorm_config.config_flags = MULTI_FLAGS;
4690 tstorm_config.rss_result_mask = MULTI_MASK;
4693 tstorm_config.leading_client_id = BP_L_ID(bp);
4695 REG_WR(bp, BAR_TSTRORM_INTMEM +
4696 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4697 (*(u32 *)&tstorm_config));
4699 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4700 bnx2x_set_storm_rx_mode(bp);
4702 /* reset xstorm per client statistics */
4703 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4704 REG_WR(bp, BAR_XSTRORM_INTMEM +
4705 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4708 /* reset tstorm per client statistics */
4709 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4710 REG_WR(bp, BAR_TSTRORM_INTMEM +
4711 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4715 /* Init statistics related context */
4716 stats_flags.collect_eth = 1;
4718 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4719 ((u32 *)&stats_flags)[0]);
4720 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4721 ((u32 *)&stats_flags)[1]);
4723 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4724 ((u32 *)&stats_flags)[0]);
4725 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4726 ((u32 *)&stats_flags)[1]);
4728 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4729 ((u32 *)&stats_flags)[0]);
4730 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4731 ((u32 *)&stats_flags)[1]);
4733 REG_WR(bp, BAR_XSTRORM_INTMEM +
4734 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736 REG_WR(bp, BAR_XSTRORM_INTMEM +
4737 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740 REG_WR(bp, BAR_TSTRORM_INTMEM +
4741 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4742 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4743 REG_WR(bp, BAR_TSTRORM_INTMEM +
4744 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4745 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4747 if (CHIP_IS_E1H(bp)) {
4748 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4757 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4761 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4764 SGE_PAGE_SIZE * PAGES_PER_SGE),
4766 for_each_queue(bp, i) {
4767 struct bnx2x_fastpath *fp = &bp->fp[i];
4769 REG_WR(bp, BAR_USTRORM_INTMEM +
4770 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4771 U64_LO(fp->rx_comp_mapping));
4772 REG_WR(bp, BAR_USTRORM_INTMEM +
4773 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4774 U64_HI(fp->rx_comp_mapping));
4776 REG_WR16(bp, BAR_USTRORM_INTMEM +
4777 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4782 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784 switch (load_code) {
4785 case FW_MSG_CODE_DRV_LOAD_COMMON:
4786 bnx2x_init_internal_common(bp);
4789 case FW_MSG_CODE_DRV_LOAD_PORT:
4790 bnx2x_init_internal_port(bp);
4793 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4794 bnx2x_init_internal_func(bp);
4798 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4803 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4807 for_each_queue(bp, i) {
4808 struct bnx2x_fastpath *fp = &bp->fp[i];
4811 fp->state = BNX2X_FP_STATE_CLOSED;
4813 fp->cl_id = BP_L_ID(bp) + i;
4814 fp->sb_id = fp->cl_id;
4816 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4817 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4818 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820 bnx2x_update_fpsb_idx(fp);
4823 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825 bnx2x_update_dsb_idx(bp);
4826 bnx2x_update_coalesce(bp);
4827 bnx2x_init_rx_rings(bp);
4828 bnx2x_init_tx_ring(bp);
4829 bnx2x_init_sp_ring(bp);
4830 bnx2x_init_context(bp);
4831 bnx2x_init_internal(bp, load_code);
4832 bnx2x_init_ind_table(bp);
4833 bnx2x_stats_init(bp);
4835 /* At this point, we are ready for interrupts */
4836 atomic_set(&bp->intr_sem, 0);
4838 /* flush all before enabling interrupts */
4842 bnx2x_int_enable(bp);
4845 /* end of nic init */
4848 * gzip service functions
4851 static int bnx2x_gunzip_init(struct bnx2x *bp)
4853 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4854 &bp->gunzip_mapping);
4855 if (bp->gunzip_buf == NULL)
4858 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4859 if (bp->strm == NULL)
4862 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864 if (bp->strm->workspace == NULL)
4874 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4875 bp->gunzip_mapping);
4876 bp->gunzip_buf = NULL;
4879 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4880 " un-compression\n", bp->dev->name);
4884 static void bnx2x_gunzip_end(struct bnx2x *bp)
4886 kfree(bp->strm->workspace);
4891 if (bp->gunzip_buf) {
4892 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4893 bp->gunzip_mapping);
4894 bp->gunzip_buf = NULL;
4898 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4902 /* check gzip header */
4903 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4910 if (zbuf[3] & FNAME)
4911 while ((zbuf[n++] != 0) && (n < len));
4913 bp->strm->next_in = zbuf + n;
4914 bp->strm->avail_in = len - n;
4915 bp->strm->next_out = bp->gunzip_buf;
4916 bp->strm->avail_out = FW_BUF_SIZE;
4918 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4922 rc = zlib_inflate(bp->strm, Z_FINISH);
4923 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4924 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4925 bp->dev->name, bp->strm->msg);
4927 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4928 if (bp->gunzip_outlen & 0x3)
4929 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4930 " gunzip_outlen (%d) not aligned\n",
4931 bp->dev->name, bp->gunzip_outlen);
4932 bp->gunzip_outlen >>= 2;
4934 zlib_inflateEnd(bp->strm);
4936 if (rc == Z_STREAM_END)
4942 /* nic load/unload */
4945 * General service functions
4948 /* send a NIG loopback debug packet */
4949 static void bnx2x_lb_pckt(struct bnx2x *bp)
4953 /* Ethernet source and destination addresses */
4954 wb_write[0] = 0x55555555;
4955 wb_write[1] = 0x55555555;
4956 wb_write[2] = 0x20; /* SOP */
4957 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4959 /* NON-IP protocol */
4960 wb_write[0] = 0x09000000;
4961 wb_write[1] = 0x55555555;
4962 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4963 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4966 /* some of the internal memories
4967 * are not directly readable from the driver
4968 * to test them we send debug packets
4970 static int bnx2x_int_mem_test(struct bnx2x *bp)
4976 if (CHIP_REV_IS_FPGA(bp))
4978 else if (CHIP_REV_IS_EMUL(bp))
4983 DP(NETIF_MSG_HW, "start part1\n");
4985 /* Disable inputs of parser neighbor blocks */
4986 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4987 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4988 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4989 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4991 /* Write 0 to parser credits for CFC search request */
4992 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994 /* send Ethernet packet */
4997 /* TODO do i reset NIG statistic? */
4998 /* Wait until NIG register shows 1 packet of size 0x10 */
4999 count = 1000 * factor;
5002 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5003 val = *bnx2x_sp(bp, wb_data[0]);
5011 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5015 /* Wait until PRS register shows 1 packet */
5016 count = 1000 * factor;
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5026 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5030 /* Reset and init BRB, PRS */
5031 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5036 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 DP(NETIF_MSG_HW, "part2\n");
5040 /* Disable inputs of parser neighbor blocks */
5041 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5042 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5043 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5044 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5046 /* Write 0 to parser credits for CFC search request */
5047 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049 /* send 10 Ethernet packets */
5050 for (i = 0; i < 10; i++)
5053 /* Wait until NIG register shows 10 + 1
5054 packets of size 11*0x10 = 0xb0 */
5055 count = 1000 * factor;
5058 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5059 val = *bnx2x_sp(bp, wb_data[0]);
5067 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5071 /* Wait until PRS register shows 2 packets */
5072 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5076 /* Write 1 to parser credits for CFC search request */
5077 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079 /* Wait until PRS register shows 3 packets */
5080 msleep(10 * factor);
5081 /* Wait until NIG register shows 1 packet of size 0x10 */
5082 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5086 /* clear NIG EOP FIFO */
5087 for (i = 0; i < 11; i++)
5088 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5089 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091 BNX2X_ERR("clear of NIG failed\n");
5095 /* Reset and init BRB, PRS, NIG */
5096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5101 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5104 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5107 /* Enable inputs of parser neighbor blocks */
5108 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5109 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5110 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5111 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5113 DP(NETIF_MSG_HW, "done\n");
5118 static void enable_blocks_attention(struct bnx2x *bp)
5120 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5121 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5122 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5123 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5124 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5125 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5126 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5127 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5128 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5129 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5130 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5131 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5132 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5133 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5134 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5135 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5136 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5137 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5138 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5139 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5140 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5141 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5142 if (CHIP_REV_IS_FPGA(bp))
5143 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5146 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5147 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5148 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5149 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5150 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5151 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5152 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5153 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5154 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5158 static void bnx2x_reset_common(struct bnx2x *bp)
5161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5166 static int bnx2x_init_common(struct bnx2x *bp)
5170 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5172 bnx2x_reset_common(bp);
5173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5176 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5177 if (CHIP_IS_E1H(bp))
5178 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5180 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5184 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5185 if (CHIP_IS_E1(bp)) {
5186 /* enable HW interrupt from PXP on USDM overflow
5187 bit 16 on INT_MASK_0 */
5188 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5191 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5195 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5196 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5197 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5198 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5199 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5201 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5202 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5203 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5204 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5205 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5208 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5210 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5211 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5212 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5215 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5216 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5218 /* let the HW do it's magic ... */
5220 /* finish PXP init */
5221 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223 BNX2X_ERR("PXP2 CFG failed\n");
5226 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228 BNX2X_ERR("PXP2 RD_INIT failed\n");
5232 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5233 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5235 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5237 /* clean the DMAE memory */
5239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5241 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5242 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5243 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5244 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5246 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5247 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5248 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5249 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5252 /* soft reset pulse */
5253 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5254 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5257 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5260 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5261 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5262 if (!CHIP_REV_IS_SLOW(bp)) {
5263 /* enable hw interrupt from doorbell Q */
5264 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5267 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5268 if (CHIP_REV_IS_SLOW(bp)) {
5269 /* fix for emulation and FPGA for no pause */
5270 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5271 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5272 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5273 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5276 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5277 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5279 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5280 if (CHIP_IS_E1H(bp))
5281 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5283 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5284 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5285 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5286 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5288 if (CHIP_IS_E1H(bp)) {
5289 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5290 STORM_INTMEM_SIZE_E1H/2);
5292 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5293 0, STORM_INTMEM_SIZE_E1H/2);
5294 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5295 STORM_INTMEM_SIZE_E1H/2);
5297 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5298 0, STORM_INTMEM_SIZE_E1H/2);
5299 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5300 STORM_INTMEM_SIZE_E1H/2);
5302 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5303 0, STORM_INTMEM_SIZE_E1H/2);
5304 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5305 STORM_INTMEM_SIZE_E1H/2);
5307 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5308 0, STORM_INTMEM_SIZE_E1H/2);
5310 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1);
5312 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5313 STORM_INTMEM_SIZE_E1);
5314 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5315 STORM_INTMEM_SIZE_E1);
5316 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5317 STORM_INTMEM_SIZE_E1);
5320 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5321 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5322 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5323 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5326 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5331 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5332 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5333 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5335 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5336 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5337 REG_WR(bp, i, 0xc0cac01a);
5338 /* TODO: replace with something meaningful */
5340 if (CHIP_IS_E1H(bp))
5341 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5342 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5344 if (sizeof(union cdu_context) != 1024)
5345 /* we currently assume that a context is 1024 bytes */
5346 printk(KERN_ALERT PFX "please adjust the size of"
5347 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5349 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5350 val = (4 << 24) + (0 << 12) + 1024;
5351 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5352 if (CHIP_IS_E1(bp)) {
5353 /* !!! fix pxp client crdit until excel update */
5354 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5355 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5358 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5359 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5361 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5362 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5364 /* PXPCS COMMON comes here */
5365 /* Reset PCIE errors for debug */
5366 REG_WR(bp, 0x2814, 0xffffffff);
5367 REG_WR(bp, 0x3820, 0xffffffff);
5369 /* EMAC0 COMMON comes here */
5370 /* EMAC1 COMMON comes here */
5371 /* DBU COMMON comes here */
5372 /* DBG COMMON comes here */
5374 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5375 if (CHIP_IS_E1H(bp)) {
5376 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5377 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5380 if (CHIP_REV_IS_SLOW(bp))
5383 /* finish CFC init */
5384 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5386 BNX2X_ERR("CFC LL_INIT failed\n");
5389 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5391 BNX2X_ERR("CFC AC_INIT failed\n");
5394 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5396 BNX2X_ERR("CFC CAM_INIT failed\n");
5399 REG_WR(bp, CFC_REG_DEBUG0, 0);
5401 /* read NIG statistic
5402 to see if this is our first up since powerup */
5403 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5404 val = *bnx2x_sp(bp, wb_data[0]);
5406 /* do internal memory self test */
5407 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5408 BNX2X_ERR("internal mem self test failed\n");
5412 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5413 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5414 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5415 /* Fan failure is indicated by SPIO 5 */
5416 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5417 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5419 /* set to active low mode */
5420 val = REG_RD(bp, MISC_REG_SPIO_INT);
5421 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5422 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5423 REG_WR(bp, MISC_REG_SPIO_INT, val);
5425 /* enable interrupt to signal the IGU */
5426 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5427 val |= (1 << MISC_REGISTERS_SPIO_5);
5428 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5435 /* clear PXP2 attentions */
5436 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5438 enable_blocks_attention(bp);
5440 if (!BP_NOMCP(bp)) {
5441 bnx2x_acquire_phy_lock(bp);
5442 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5443 bnx2x_release_phy_lock(bp);
5445 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5450 static int bnx2x_init_port(struct bnx2x *bp)
5452 int port = BP_PORT(bp);
5455 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5457 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5459 /* Port PXP comes here */
5460 /* Port PXP2 comes here */
5465 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5466 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5467 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5468 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5473 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5474 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5475 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5476 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5481 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5482 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5483 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5484 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5486 /* Port CMs come here */
5488 /* Port QM comes here */
5490 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5491 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5493 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5494 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5496 /* Port DQ comes here */
5497 /* Port BRB1 comes here */
5498 /* Port PRS comes here */
5499 /* Port TSDM comes here */
5500 /* Port CSDM comes here */
5501 /* Port USDM comes here */
5502 /* Port XSDM comes here */
5503 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5504 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5505 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5506 port ? USEM_PORT1_END : USEM_PORT0_END);
5507 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5508 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5509 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5510 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5511 /* Port UPB comes here */
5512 /* Port XPB comes here */
5514 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5515 port ? PBF_PORT1_END : PBF_PORT0_END);
5517 /* configure PBF to work without PAUSE mtu 9000 */
5518 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5520 /* update threshold */
5521 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5522 /* update init credit */
5523 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5526 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5528 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5531 /* tell the searcher where the T2 table is */
5532 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5534 wb_write[0] = U64_LO(bp->t2_mapping);
5535 wb_write[1] = U64_HI(bp->t2_mapping);
5536 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5537 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5538 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5539 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5541 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5542 /* Port SRCH comes here */
5544 /* Port CDU comes here */
5545 /* Port CFC comes here */
5547 if (CHIP_IS_E1(bp)) {
5548 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5549 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5551 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5552 port ? HC_PORT1_END : HC_PORT0_END);
5554 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5555 MISC_AEU_PORT0_START,
5556 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5557 /* init aeu_mask_attn_func_0/1:
5558 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5559 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5560 * bits 4-7 are used for "per vn group attention" */
5561 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5562 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5564 /* Port PXPCS comes here */
5565 /* Port EMAC0 comes here */
5566 /* Port EMAC1 comes here */
5567 /* Port DBU comes here */
5568 /* Port DBG comes here */
5569 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5570 port ? NIG_PORT1_END : NIG_PORT0_END);
5572 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5574 if (CHIP_IS_E1H(bp)) {
5576 struct cmng_struct_per_port m_cmng_port;
5579 /* 0x2 disable e1hov, 0x1 enable */
5580 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5581 (IS_E1HMF(bp) ? 0x1 : 0x2));
5583 /* Init RATE SHAPING and FAIRNESS contexts.
5584 Initialize as if there is 10G link. */
5585 wsum = bnx2x_calc_vn_wsum(bp);
5586 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5588 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5589 bnx2x_init_vn_minmax(bp, 2*vn + port,
5590 wsum, 10000, &m_cmng_port);
5593 /* Port MCP comes here */
5594 /* Port DMAE comes here */
5596 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5597 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5598 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5599 /* add SPIO 5 to group 0 */
5600 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5601 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5602 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5609 bnx2x__link_reset(bp);
5614 #define ILT_PER_FUNC (768/2)
5615 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5616 /* the phys address is shifted right 12 bits and has an added
5617 1=valid bit added to the 53rd bit
5618 then since this is a wide register(TM)
5619 we split it into two 32 bit writes
5621 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5622 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5623 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5624 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5626 #define CNIC_ILT_LINES 0
5628 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5632 if (CHIP_IS_E1H(bp))
5633 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5635 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5637 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5640 static int bnx2x_init_func(struct bnx2x *bp)
5642 int port = BP_PORT(bp);
5643 int func = BP_FUNC(bp);
5646 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5648 i = FUNC_ILT_BASE(func);
5650 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5651 if (CHIP_IS_E1H(bp)) {
5652 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5653 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5655 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5656 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5659 if (CHIP_IS_E1H(bp)) {
5660 for (i = 0; i < 9; i++)
5661 bnx2x_init_block(bp,
5662 cm_start[func][i], cm_end[func][i]);
5664 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5665 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5668 /* HC init per function */
5669 if (CHIP_IS_E1H(bp)) {
5670 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5673 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5675 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5677 if (CHIP_IS_E1H(bp))
5678 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5680 /* Reset PCIE errors for debug */
5681 REG_WR(bp, 0x2114, 0xffffffff);
5682 REG_WR(bp, 0x2120, 0xffffffff);
5687 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5691 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5692 BP_FUNC(bp), load_code);
5695 mutex_init(&bp->dmae_mutex);
5696 bnx2x_gunzip_init(bp);
5698 switch (load_code) {
5699 case FW_MSG_CODE_DRV_LOAD_COMMON:
5700 rc = bnx2x_init_common(bp);
5705 case FW_MSG_CODE_DRV_LOAD_PORT:
5707 rc = bnx2x_init_port(bp);
5712 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5714 rc = bnx2x_init_func(bp);
5720 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5724 if (!BP_NOMCP(bp)) {
5725 int func = BP_FUNC(bp);
5727 bp->fw_drv_pulse_wr_seq =
5728 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5729 DRV_PULSE_SEQ_MASK);
5730 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5731 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5732 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5736 /* this needs to be done before gunzip end */
5737 bnx2x_zero_def_sb(bp);
5738 for_each_queue(bp, i)
5739 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5742 bnx2x_gunzip_end(bp);
5747 /* send the MCP a request, block until there is a reply */
5748 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5750 int func = BP_FUNC(bp);
5751 u32 seq = ++bp->fw_seq;
5754 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5756 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5757 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5760 /* let the FW do it's magic ... */
5763 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5765 /* Give the FW up to 2 second (200*10ms) */
5766 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5768 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5769 cnt*delay, rc, seq);
5771 /* is this a reply to our command? */
5772 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5773 rc &= FW_MSG_CODE_MASK;
5777 BNX2X_ERR("FW failed to respond!\n");
5785 static void bnx2x_free_mem(struct bnx2x *bp)
5788 #define BNX2X_PCI_FREE(x, y, size) \
5791 pci_free_consistent(bp->pdev, size, x, y); \
5797 #define BNX2X_FREE(x) \
5808 for_each_queue(bp, i) {
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5812 bnx2x_fp(bp, i, status_blk_mapping),
5813 sizeof(struct host_status_block) +
5814 sizeof(struct eth_tx_db_data));
5816 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5817 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5818 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5819 bnx2x_fp(bp, i, tx_desc_mapping),
5820 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5822 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5823 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5824 bnx2x_fp(bp, i, rx_desc_mapping),
5825 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5828 bnx2x_fp(bp, i, rx_comp_mapping),
5829 sizeof(struct eth_fast_path_rx_cqe) *
5833 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5834 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5835 bnx2x_fp(bp, i, rx_sge_mapping),
5836 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5838 /* end of fastpath */
5840 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5841 sizeof(struct host_def_status_block));
5843 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5844 sizeof(struct bnx2x_slowpath));
5847 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5848 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5849 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5850 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5852 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5854 #undef BNX2X_PCI_FREE
5858 static int bnx2x_alloc_mem(struct bnx2x *bp)
5861 #define BNX2X_PCI_ALLOC(x, y, size) \
5863 x = pci_alloc_consistent(bp->pdev, size, y); \
5865 goto alloc_mem_err; \
5866 memset(x, 0, size); \
5869 #define BNX2X_ALLOC(x, size) \
5871 x = vmalloc(size); \
5873 goto alloc_mem_err; \
5874 memset(x, 0, size); \
5880 for_each_queue(bp, i) {
5881 bnx2x_fp(bp, i, bp) = bp;
5884 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5885 &bnx2x_fp(bp, i, status_blk_mapping),
5886 sizeof(struct host_status_block) +
5887 sizeof(struct eth_tx_db_data));
5889 bnx2x_fp(bp, i, hw_tx_prods) =
5890 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5892 bnx2x_fp(bp, i, tx_prods_mapping) =
5893 bnx2x_fp(bp, i, status_blk_mapping) +
5894 sizeof(struct host_status_block);
5896 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5897 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5898 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5899 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5900 &bnx2x_fp(bp, i, tx_desc_mapping),
5901 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5903 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5904 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5905 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5906 &bnx2x_fp(bp, i, rx_desc_mapping),
5907 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5909 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5910 &bnx2x_fp(bp, i, rx_comp_mapping),
5911 sizeof(struct eth_fast_path_rx_cqe) *
5915 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5916 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5917 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5918 &bnx2x_fp(bp, i, rx_sge_mapping),
5919 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5921 /* end of fastpath */
5923 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5924 sizeof(struct host_def_status_block));
5926 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5927 sizeof(struct bnx2x_slowpath));
5930 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5933 for (i = 0; i < 64*1024; i += 64) {
5934 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5935 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5938 /* allocate searcher T2 table
5939 we allocate 1/4 of alloc num for T2
5940 (which is not entered into the ILT) */
5941 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5944 for (i = 0; i < 16*1024; i += 64)
5945 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5947 /* now fixup the last line in the block to point to the next block */
5948 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5950 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5951 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5953 /* QM queues (128*MAX_CONN) */
5954 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5957 /* Slow path ring */
5958 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5966 #undef BNX2X_PCI_ALLOC
5970 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5974 for_each_queue(bp, i) {
5975 struct bnx2x_fastpath *fp = &bp->fp[i];
5977 u16 bd_cons = fp->tx_bd_cons;
5978 u16 sw_prod = fp->tx_pkt_prod;
5979 u16 sw_cons = fp->tx_pkt_cons;
5981 while (sw_cons != sw_prod) {
5982 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5988 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5992 for_each_queue(bp, j) {
5993 struct bnx2x_fastpath *fp = &bp->fp[j];
5995 for (i = 0; i < NUM_RX_BD; i++) {
5996 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5997 struct sk_buff *skb = rx_buf->skb;
6002 pci_unmap_single(bp->pdev,
6003 pci_unmap_addr(rx_buf, mapping),
6005 PCI_DMA_FROMDEVICE);
6010 if (!fp->disable_tpa)
6011 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6012 ETH_MAX_AGGREGATION_QUEUES_E1 :
6013 ETH_MAX_AGGREGATION_QUEUES_E1H);
6017 static void bnx2x_free_skbs(struct bnx2x *bp)
6019 bnx2x_free_tx_skbs(bp);
6020 bnx2x_free_rx_skbs(bp);
6023 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6027 free_irq(bp->msix_table[0].vector, bp->dev);
6028 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6029 bp->msix_table[0].vector);
6031 for_each_queue(bp, i) {
6032 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6033 "state %x\n", i, bp->msix_table[i + offset].vector,
6034 bnx2x_fp(bp, i, state));
6036 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6037 BNX2X_ERR("IRQ of fp #%d being freed while "
6038 "state != closed\n", i);
6040 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6044 static void bnx2x_free_irq(struct bnx2x *bp)
6046 if (bp->flags & USING_MSIX_FLAG) {
6047 bnx2x_free_msix_irqs(bp);
6048 pci_disable_msix(bp->pdev);
6049 bp->flags &= ~USING_MSIX_FLAG;
6052 free_irq(bp->pdev->irq, bp->dev);
6055 static int bnx2x_enable_msix(struct bnx2x *bp)
6059 bp->msix_table[0].entry = 0;
6061 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6063 for_each_queue(bp, i) {
6064 int igu_vec = offset + i + BP_L_ID(bp);
6066 bp->msix_table[i + offset].entry = igu_vec;
6067 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6068 "(fastpath #%u)\n", i + offset, igu_vec, i);
6071 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6072 bp->num_queues + offset);
6074 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6077 bp->flags |= USING_MSIX_FLAG;
6082 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6084 int i, rc, offset = 1;
6086 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6087 bp->dev->name, bp->dev);
6089 BNX2X_ERR("request sp irq failed\n");
6093 for_each_queue(bp, i) {
6094 rc = request_irq(bp->msix_table[i + offset].vector,
6095 bnx2x_msix_fp_int, 0,
6096 bp->dev->name, &bp->fp[i]);
6098 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6100 bnx2x_free_msix_irqs(bp);
6104 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6110 static int bnx2x_req_irq(struct bnx2x *bp)
6114 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6115 bp->dev->name, bp->dev);
6117 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6122 static void bnx2x_napi_enable(struct bnx2x *bp)
6126 for_each_queue(bp, i)
6127 napi_enable(&bnx2x_fp(bp, i, napi));
6130 static void bnx2x_napi_disable(struct bnx2x *bp)
6134 for_each_queue(bp, i)
6135 napi_disable(&bnx2x_fp(bp, i, napi));
6138 static void bnx2x_netif_start(struct bnx2x *bp)
6140 if (atomic_dec_and_test(&bp->intr_sem)) {
6141 if (netif_running(bp->dev)) {
6142 if (bp->state == BNX2X_STATE_OPEN)
6143 netif_wake_queue(bp->dev);
6144 bnx2x_napi_enable(bp);
6145 bnx2x_int_enable(bp);
6150 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6152 bnx2x_int_disable_sync(bp, disable_hw);
6153 bnx2x_napi_disable(bp);
6154 if (netif_running(bp->dev)) {
6155 netif_tx_disable(bp->dev);
6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6161 * Init service functions
6164 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6166 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6167 int port = BP_PORT(bp);
6170 * unicasts 0-31:port0 32-63:port1
6171 * multicast 64-127:port0 128-191:port1
6173 config->hdr.length_6b = 2;
6174 config->hdr.offset = port ? 32 : 0;
6175 config->hdr.client_id = BP_CL_ID(bp);
6176 config->hdr.reserved1 = 0;
6179 config->config_table[0].cam_entry.msb_mac_addr =
6180 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6181 config->config_table[0].cam_entry.middle_mac_addr =
6182 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6183 config->config_table[0].cam_entry.lsb_mac_addr =
6184 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6185 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6187 config->config_table[0].target_table_entry.flags = 0;
6189 CAM_INVALIDATE(config->config_table[0]);
6190 config->config_table[0].target_table_entry.client_id = 0;
6191 config->config_table[0].target_table_entry.vlan_id = 0;
6193 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6194 (set ? "setting" : "clearing"),
6195 config->config_table[0].cam_entry.msb_mac_addr,
6196 config->config_table[0].cam_entry.middle_mac_addr,
6197 config->config_table[0].cam_entry.lsb_mac_addr);
6200 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6201 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6202 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6203 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6205 config->config_table[1].target_table_entry.flags =
6206 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6208 CAM_INVALIDATE(config->config_table[1]);
6209 config->config_table[1].target_table_entry.client_id = 0;
6210 config->config_table[1].target_table_entry.vlan_id = 0;
6212 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6213 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6214 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6217 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6219 struct mac_configuration_cmd_e1h *config =
6220 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6222 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6223 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6227 /* CAM allocation for E1H
6228 * unicasts: by func number
6229 * multicast: 20+FUNC*20, 20 each
6231 config->hdr.length_6b = 1;
6232 config->hdr.offset = BP_FUNC(bp);
6233 config->hdr.client_id = BP_CL_ID(bp);
6234 config->hdr.reserved1 = 0;
6237 config->config_table[0].msb_mac_addr =
6238 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6239 config->config_table[0].middle_mac_addr =
6240 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6241 config->config_table[0].lsb_mac_addr =
6242 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6243 config->config_table[0].client_id = BP_L_ID(bp);
6244 config->config_table[0].vlan_id = 0;
6245 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6247 config->config_table[0].flags = BP_PORT(bp);
6249 config->config_table[0].flags =
6250 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6252 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6253 (set ? "setting" : "clearing"),
6254 config->config_table[0].msb_mac_addr,
6255 config->config_table[0].middle_mac_addr,
6256 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6258 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6259 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6260 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6263 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6264 int *state_p, int poll)
6266 /* can take a while if any port is running */
6269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6270 poll ? "polling" : "waiting", state, idx);
6275 bnx2x_rx_int(bp->fp, 10);
6276 /* if index is different from 0
6277 * the reply for some commands will
6278 * be on the non default queue
6281 bnx2x_rx_int(&bp->fp[idx], 10);
6284 mb(); /* state is changed by bnx2x_sp_event() */
6285 if (*state_p == state)
6292 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6293 poll ? "polling" : "waiting", state, idx);
6294 #ifdef BNX2X_STOP_ON_ERROR
6301 static int bnx2x_setup_leading(struct bnx2x *bp)
6305 /* reset IGU state */
6306 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6309 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6311 /* Wait for completion */
6312 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6317 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6319 /* reset IGU state */
6320 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6323 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6326 /* Wait for completion */
6327 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6328 &(bp->fp[index].state), 0);
6331 static int bnx2x_poll(struct napi_struct *napi, int budget);
6332 static void bnx2x_set_rx_mode(struct net_device *dev);
6334 /* must be called with rtnl_lock */
6335 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 #ifdef BNX2X_STOP_ON_ERROR
6340 if (unlikely(bp->panic))
6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6350 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6351 /* user requested number */
6352 bp->num_queues = use_multi;
6355 bp->num_queues = min_t(u32, num_online_cpus(),
6361 "set number of queues to %d\n", bp->num_queues);
6363 /* if we can't use MSI-X we only need one fp,
6364 * so try to enable MSI-X with the requested number of fp's
6365 * and fallback to MSI or legacy INTx with one fp
6367 rc = bnx2x_enable_msix(bp);
6369 /* failed to enable MSI-X */
6372 BNX2X_ERR("Multi requested but failed"
6373 " to enable MSI-X\n");
6377 if (bnx2x_alloc_mem(bp))
6380 for_each_queue(bp, i)
6381 bnx2x_fp(bp, i, disable_tpa) =
6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6384 for_each_queue(bp, i)
6385 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6388 #ifdef BNX2X_STOP_ON_ERROR
6389 for_each_queue(bp, i) {
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6392 fp->poll_no_work = 0;
6394 fp->poll_max_calls = 0;
6395 fp->poll_complete = 0;
6399 bnx2x_napi_enable(bp);
6401 if (bp->flags & USING_MSIX_FLAG) {
6402 rc = bnx2x_req_msix_irqs(bp);
6404 pci_disable_msix(bp->pdev);
6407 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6410 rc = bnx2x_req_irq(bp);
6412 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6417 /* Send LOAD_REQUEST command to MCP
6418 Returns the type of LOAD command:
6419 if it is the first port to be initialized
6420 common blocks should be initialized, otherwise - not
6422 if (!BP_NOMCP(bp)) {
6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6425 BNX2X_ERR("MCP response failure, aborting\n");
6429 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6430 rc = -EBUSY; /* other port in diagnostic mode */
6435 int port = BP_PORT(bp);
6437 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6438 load_count[0], load_count[1], load_count[2]);
6440 load_count[1 + port]++;
6441 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 if (load_count[0] == 1)
6444 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6445 else if (load_count[1 + port] == 1)
6446 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6448 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6451 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6452 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6459 rc = bnx2x_init_hw(bp, load_code);
6461 BNX2X_ERR("HW init failed, aborting\n");
6465 /* Setup NIC internals and enable interrupts */
6466 bnx2x_nic_init(bp, load_code);
6468 /* Send LOAD_DONE command to MCP */
6469 if (!BP_NOMCP(bp)) {
6470 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6472 BNX2X_ERR("MCP response failure, aborting\n");
6478 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6480 rc = bnx2x_setup_leading(bp);
6482 BNX2X_ERR("Setup leading failed!\n");
6486 if (CHIP_IS_E1H(bp))
6487 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6488 BNX2X_ERR("!!! mf_cfg function disabled\n");
6489 bp->state = BNX2X_STATE_DISABLED;
6492 if (bp->state == BNX2X_STATE_OPEN)
6493 for_each_nondefault_queue(bp, i) {
6494 rc = bnx2x_setup_multi(bp, i);
6500 bnx2x_set_mac_addr_e1(bp, 1);
6502 bnx2x_set_mac_addr_e1h(bp, 1);
6505 bnx2x_initial_phy_init(bp);
6507 /* Start fast path */
6508 switch (load_mode) {
6510 /* Tx queue should be only reenabled */
6511 netif_wake_queue(bp->dev);
6512 /* Initialize the receive filter. */
6513 bnx2x_set_rx_mode(bp->dev);
6517 netif_start_queue(bp->dev);
6518 /* Initialize the receive filter. */
6519 bnx2x_set_rx_mode(bp->dev);
6523 /* Initialize the receive filter. */
6524 bnx2x_set_rx_mode(bp->dev);
6525 bp->state = BNX2X_STATE_DIAG;
6533 bnx2x__link_status_update(bp);
6535 /* start the timer */
6536 mod_timer(&bp->timer, jiffies + bp->current_interval);
6542 bnx2x_int_disable_sync(bp, 1);
6543 if (!BP_NOMCP(bp)) {
6544 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6548 /* Free SKBs, SGEs, TPA pool and driver internals */
6549 bnx2x_free_skbs(bp);
6550 for_each_queue(bp, i)
6551 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6556 bnx2x_napi_disable(bp);
6557 for_each_queue(bp, i)
6558 netif_napi_del(&bnx2x_fp(bp, i, napi));
6561 /* TBD we really need to reset the chip
6562 if we want to recover from this */
6566 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6570 /* halt the connection */
6571 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6572 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6574 /* Wait for completion */
6575 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6576 &(bp->fp[index].state), 1);
6577 if (rc) /* timeout */
6580 /* delete cfc entry */
6581 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6583 /* Wait for completion */
6584 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6585 &(bp->fp[index].state), 1);
6589 static int bnx2x_stop_leading(struct bnx2x *bp)
6591 u16 dsb_sp_prod_idx;
6592 /* if the other port is handling traffic,
6593 this can take a lot of time */
6599 /* Send HALT ramrod */
6600 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6603 /* Wait for completion */
6604 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6605 &(bp->fp[0].state), 1);
6606 if (rc) /* timeout */
6609 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6611 /* Send PORT_DELETE ramrod */
6612 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6614 /* Wait for completion to arrive on default status block
6615 we are going to reset the chip anyway
6616 so there is not much to do if this times out
6618 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6620 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6621 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6622 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6623 #ifdef BNX2X_STOP_ON_ERROR
6632 rmb(); /* Refresh the dsb_sp_prod */
6634 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6635 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6640 static void bnx2x_reset_func(struct bnx2x *bp)
6642 int port = BP_PORT(bp);
6643 int func = BP_FUNC(bp);
6647 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6648 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6650 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6653 base = FUNC_ILT_BASE(func);
6654 for (i = base; i < base + ILT_PER_FUNC; i++)
6655 bnx2x_ilt_wr(bp, i, 0);
6658 static void bnx2x_reset_port(struct bnx2x *bp)
6660 int port = BP_PORT(bp);
6663 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6665 /* Do not rcv packets to BRB */
6666 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6667 /* Do not direct rcv packets that are not for MCP to the BRB */
6668 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6669 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6672 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6675 /* Check for BRB port occupancy */
6676 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6678 DP(NETIF_MSG_IFDOWN,
6679 "BRB1 is not empty %d blocks are occupied\n", val);
6681 /* TODO: Close Doorbell port? */
6684 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6686 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6687 BP_FUNC(bp), reset_code);
6689 switch (reset_code) {
6690 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6691 bnx2x_reset_port(bp);
6692 bnx2x_reset_func(bp);
6693 bnx2x_reset_common(bp);
6696 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6697 bnx2x_reset_port(bp);
6698 bnx2x_reset_func(bp);
6701 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6702 bnx2x_reset_func(bp);
6706 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6711 /* must be called with rtnl_lock */
6712 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6714 int port = BP_PORT(bp);
6718 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6720 bp->rx_mode = BNX2X_RX_MODE_NONE;
6721 bnx2x_set_storm_rx_mode(bp);
6723 bnx2x_netif_stop(bp, 1);
6725 del_timer_sync(&bp->timer);
6726 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6728 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6733 /* Wait until tx fast path tasks complete */
6734 for_each_queue(bp, i) {
6735 struct bnx2x_fastpath *fp = &bp->fp[i];
6739 while (bnx2x_has_tx_work_unload(fp)) {
6741 bnx2x_tx_int(fp, 1000);
6743 BNX2X_ERR("timeout waiting for queue[%d]\n",
6745 #ifdef BNX2X_STOP_ON_ERROR
6757 /* Give HW time to discard old tx messages */
6760 if (CHIP_IS_E1(bp)) {
6761 struct mac_configuration_cmd *config =
6762 bnx2x_sp(bp, mcast_config);
6764 bnx2x_set_mac_addr_e1(bp, 0);
6766 for (i = 0; i < config->hdr.length_6b; i++)
6767 CAM_INVALIDATE(config->config_table[i]);
6769 config->hdr.length_6b = i;
6770 if (CHIP_REV_IS_SLOW(bp))
6771 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6773 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6774 config->hdr.client_id = BP_CL_ID(bp);
6775 config->hdr.reserved1 = 0;
6777 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6778 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6779 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6782 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6784 bnx2x_set_mac_addr_e1h(bp, 0);
6786 for (i = 0; i < MC_HASH_SIZE; i++)
6787 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6790 if (unload_mode == UNLOAD_NORMAL)
6791 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6793 else if (bp->flags & NO_WOL_FLAG) {
6794 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6795 if (CHIP_IS_E1H(bp))
6796 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6798 } else if (bp->wol) {
6799 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6800 u8 *mac_addr = bp->dev->dev_addr;
6802 /* The mac address is written to entries 1-4 to
6803 preserve entry 0 which is used by the PMF */
6804 u8 entry = (BP_E1HVN(bp) + 1)*8;
6806 val = (mac_addr[0] << 8) | mac_addr[1];
6807 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6809 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6810 (mac_addr[4] << 8) | mac_addr[5];
6811 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6813 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6816 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6818 /* Close multi and leading connections
6819 Completions for ramrods are collected in a synchronous way */
6820 for_each_nondefault_queue(bp, i)
6821 if (bnx2x_stop_multi(bp, i))
6824 rc = bnx2x_stop_leading(bp);
6826 BNX2X_ERR("Stop leading failed!\n");
6827 #ifdef BNX2X_STOP_ON_ERROR
6836 reset_code = bnx2x_fw_command(bp, reset_code);
6838 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6839 load_count[0], load_count[1], load_count[2]);
6841 load_count[1 + port]--;
6842 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6843 load_count[0], load_count[1], load_count[2]);
6844 if (load_count[0] == 0)
6845 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6846 else if (load_count[1 + port] == 0)
6847 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6849 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6852 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6853 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6854 bnx2x__link_reset(bp);
6856 /* Reset the chip */
6857 bnx2x_reset_chip(bp, reset_code);
6859 /* Report UNLOAD_DONE to MCP */
6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6864 /* Free SKBs, SGEs, TPA pool and driver internals */
6865 bnx2x_free_skbs(bp);
6866 for_each_queue(bp, i)
6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6868 for_each_queue(bp, i)
6869 netif_napi_del(&bnx2x_fp(bp, i, napi));
6872 bp->state = BNX2X_STATE_CLOSED;
6874 netif_carrier_off(bp->dev);
6879 static void bnx2x_reset_task(struct work_struct *work)
6881 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6883 #ifdef BNX2X_STOP_ON_ERROR
6884 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6885 " so reset not done to allow debug dump,\n"
6886 KERN_ERR " you will need to reboot when done\n");
6892 if (!netif_running(bp->dev))
6893 goto reset_task_exit;
6895 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6896 bnx2x_nic_load(bp, LOAD_NORMAL);
6902 /* end of nic load/unload */
6907 * Init service functions
6910 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6914 /* Check if there is any driver already loaded */
6915 val = REG_RD(bp, MISC_REG_UNPREPARED);
6917 /* Check if it is the UNDI driver
6918 * UNDI driver initializes CID offset for normal bell to 0x7
6920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6923 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6925 int func = BP_FUNC(bp);
6929 /* clear the UNDI indication */
6930 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6932 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6934 /* try unload UNDI on port 0 */
6937 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6938 DRV_MSG_SEQ_NUMBER_MASK);
6939 reset_code = bnx2x_fw_command(bp, reset_code);
6941 /* if UNDI is loaded on the other port */
6942 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6944 /* send "DONE" for previous unload */
6945 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6947 /* unload UNDI on port 1 */
6950 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6951 DRV_MSG_SEQ_NUMBER_MASK);
6952 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6954 bnx2x_fw_command(bp, reset_code);
6957 /* now it's safe to release the lock */
6958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6960 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6961 HC_REG_CONFIG_0), 0x1000);
6963 /* close input traffic and wait for it */
6964 /* Do not rcv packets to BRB */
6966 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6967 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6968 /* Do not direct rcv packets that are not for MCP to
6971 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6972 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6975 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6976 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6979 /* save NIG port swap info */
6980 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6981 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6984 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6987 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6989 /* take the NIG out of reset and restore swap values */
6991 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6992 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6993 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6994 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6996 /* send unload done to the MCP */
6997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6999 /* restore our func and fw_seq */
7002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7003 DRV_MSG_SEQ_NUMBER_MASK);
7006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7010 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7012 u32 val, val2, val3, val4, id;
7015 /* Get the chip revision id and number. */
7016 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7017 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7018 id = ((val & 0xffff) << 16);
7019 val = REG_RD(bp, MISC_REG_CHIP_REV);
7020 id |= ((val & 0xf) << 12);
7021 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7022 id |= ((val & 0xff) << 4);
7023 val = REG_RD(bp, MISC_REG_BOND_ID);
7025 bp->common.chip_id = id;
7026 bp->link_params.chip_id = bp->common.chip_id;
7027 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7029 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7030 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7031 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7032 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7033 bp->common.flash_size, bp->common.flash_size);
7035 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7036 bp->link_params.shmem_base = bp->common.shmem_base;
7037 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7039 if (!bp->common.shmem_base ||
7040 (bp->common.shmem_base < 0xA0000) ||
7041 (bp->common.shmem_base >= 0xC0000)) {
7042 BNX2X_DEV_INFO("MCP not active\n");
7043 bp->flags |= NO_MCP_FLAG;
7047 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7048 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7049 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7050 BNX2X_ERR("BAD MCP validity signature\n");
7052 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7053 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7055 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7056 bp->common.hw_config, bp->common.board);
7058 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7059 SHARED_HW_CFG_LED_MODE_MASK) >>
7060 SHARED_HW_CFG_LED_MODE_SHIFT);
7062 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7063 bp->common.bc_ver = val;
7064 BNX2X_DEV_INFO("bc_ver %X\n", val);
7065 if (val < BNX2X_BC_VER) {
7066 /* for now only warn
7067 * later we might need to enforce this */
7068 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7069 " please upgrade BC\n", BNX2X_BC_VER, val);
7072 if (BP_E1HVN(bp) == 0) {
7073 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7074 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7076 /* no WOL capability for E1HVN != 0 */
7077 bp->flags |= NO_WOL_FLAG;
7079 BNX2X_DEV_INFO("%sWoL capable\n",
7080 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7082 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7083 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7084 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7085 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7087 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7088 val, val2, val3, val4);
7091 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7094 int port = BP_PORT(bp);
7097 switch (switch_cfg) {
7099 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7102 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7103 switch (ext_phy_type) {
7104 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7105 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7108 bp->port.supported |= (SUPPORTED_10baseT_Half |
7109 SUPPORTED_10baseT_Full |
7110 SUPPORTED_100baseT_Half |
7111 SUPPORTED_100baseT_Full |
7112 SUPPORTED_1000baseT_Full |
7113 SUPPORTED_2500baseX_Full |
7118 SUPPORTED_Asym_Pause);
7121 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7125 bp->port.supported |= (SUPPORTED_10baseT_Half |
7126 SUPPORTED_10baseT_Full |
7127 SUPPORTED_100baseT_Half |
7128 SUPPORTED_100baseT_Full |
7129 SUPPORTED_1000baseT_Full |
7134 SUPPORTED_Asym_Pause);
7138 BNX2X_ERR("NVRAM config error. "
7139 "BAD SerDes ext_phy_config 0x%x\n",
7140 bp->link_params.ext_phy_config);
7144 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7146 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7149 case SWITCH_CFG_10G:
7150 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7153 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7154 switch (ext_phy_type) {
7155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7156 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7159 bp->port.supported |= (SUPPORTED_10baseT_Half |
7160 SUPPORTED_10baseT_Full |
7161 SUPPORTED_100baseT_Half |
7162 SUPPORTED_100baseT_Full |
7163 SUPPORTED_1000baseT_Full |
7164 SUPPORTED_2500baseX_Full |
7165 SUPPORTED_10000baseT_Full |
7170 SUPPORTED_Asym_Pause);
7173 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7174 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7177 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7180 SUPPORTED_Asym_Pause);
7183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7184 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7187 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7188 SUPPORTED_1000baseT_Full |
7191 SUPPORTED_Asym_Pause);
7194 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7195 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7198 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7199 SUPPORTED_1000baseT_Full |
7203 SUPPORTED_Asym_Pause);
7206 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7207 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7210 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7211 SUPPORTED_2500baseX_Full |
7212 SUPPORTED_1000baseT_Full |
7216 SUPPORTED_Asym_Pause);
7219 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7220 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7223 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7227 SUPPORTED_Asym_Pause);
7230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7231 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7232 bp->link_params.ext_phy_config);
7236 BNX2X_ERR("NVRAM config error. "
7237 "BAD XGXS ext_phy_config 0x%x\n",
7238 bp->link_params.ext_phy_config);
7242 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7244 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7249 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7250 bp->port.link_config);
7253 bp->link_params.phy_addr = bp->port.phy_addr;
7255 /* mask what we support according to speed_cap_mask */
7256 if (!(bp->link_params.speed_cap_mask &
7257 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7258 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7260 if (!(bp->link_params.speed_cap_mask &
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7262 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7264 if (!(bp->link_params.speed_cap_mask &
7265 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7266 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7268 if (!(bp->link_params.speed_cap_mask &
7269 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7270 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7272 if (!(bp->link_params.speed_cap_mask &
7273 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7274 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7275 SUPPORTED_1000baseT_Full);
7277 if (!(bp->link_params.speed_cap_mask &
7278 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7279 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7281 if (!(bp->link_params.speed_cap_mask &
7282 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7283 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7285 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7288 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7290 bp->link_params.req_duplex = DUPLEX_FULL;
7292 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7293 case PORT_FEATURE_LINK_SPEED_AUTO:
7294 if (bp->port.supported & SUPPORTED_Autoneg) {
7295 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7296 bp->port.advertising = bp->port.supported;
7299 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7301 if ((ext_phy_type ==
7302 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7304 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7305 /* force 10G, no AN */
7306 bp->link_params.req_line_speed = SPEED_10000;
7307 bp->port.advertising =
7308 (ADVERTISED_10000baseT_Full |
7312 BNX2X_ERR("NVRAM config error. "
7313 "Invalid link_config 0x%x"
7314 " Autoneg not supported\n",
7315 bp->port.link_config);
7320 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7321 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7322 bp->link_params.req_line_speed = SPEED_10;
7323 bp->port.advertising = (ADVERTISED_10baseT_Full |
7326 BNX2X_ERR("NVRAM config error. "
7327 "Invalid link_config 0x%x"
7328 " speed_cap_mask 0x%x\n",
7329 bp->port.link_config,
7330 bp->link_params.speed_cap_mask);
7335 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7336 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7337 bp->link_params.req_line_speed = SPEED_10;
7338 bp->link_params.req_duplex = DUPLEX_HALF;
7339 bp->port.advertising = (ADVERTISED_10baseT_Half |
7342 BNX2X_ERR("NVRAM config error. "
7343 "Invalid link_config 0x%x"
7344 " speed_cap_mask 0x%x\n",
7345 bp->port.link_config,
7346 bp->link_params.speed_cap_mask);
7351 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7352 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7353 bp->link_params.req_line_speed = SPEED_100;
7354 bp->port.advertising = (ADVERTISED_100baseT_Full |
7357 BNX2X_ERR("NVRAM config error. "
7358 "Invalid link_config 0x%x"
7359 " speed_cap_mask 0x%x\n",
7360 bp->port.link_config,
7361 bp->link_params.speed_cap_mask);
7366 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7367 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7368 bp->link_params.req_line_speed = SPEED_100;
7369 bp->link_params.req_duplex = DUPLEX_HALF;
7370 bp->port.advertising = (ADVERTISED_100baseT_Half |
7373 BNX2X_ERR("NVRAM config error. "
7374 "Invalid link_config 0x%x"
7375 " speed_cap_mask 0x%x\n",
7376 bp->port.link_config,
7377 bp->link_params.speed_cap_mask);
7382 case PORT_FEATURE_LINK_SPEED_1G:
7383 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7384 bp->link_params.req_line_speed = SPEED_1000;
7385 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7388 BNX2X_ERR("NVRAM config error. "
7389 "Invalid link_config 0x%x"
7390 " speed_cap_mask 0x%x\n",
7391 bp->port.link_config,
7392 bp->link_params.speed_cap_mask);
7397 case PORT_FEATURE_LINK_SPEED_2_5G:
7398 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7399 bp->link_params.req_line_speed = SPEED_2500;
7400 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7403 BNX2X_ERR("NVRAM config error. "
7404 "Invalid link_config 0x%x"
7405 " speed_cap_mask 0x%x\n",
7406 bp->port.link_config,
7407 bp->link_params.speed_cap_mask);
7412 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7413 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7414 case PORT_FEATURE_LINK_SPEED_10G_KR:
7415 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7416 bp->link_params.req_line_speed = SPEED_10000;
7417 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7420 BNX2X_ERR("NVRAM config error. "
7421 "Invalid link_config 0x%x"
7422 " speed_cap_mask 0x%x\n",
7423 bp->port.link_config,
7424 bp->link_params.speed_cap_mask);
7430 BNX2X_ERR("NVRAM config error. "
7431 "BAD link speed link_config 0x%x\n",
7432 bp->port.link_config);
7433 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7434 bp->port.advertising = bp->port.supported;
7438 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7439 PORT_FEATURE_FLOW_CONTROL_MASK);
7440 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7441 !(bp->port.supported & SUPPORTED_Autoneg))
7442 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7444 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7445 " advertising 0x%x\n",
7446 bp->link_params.req_line_speed,
7447 bp->link_params.req_duplex,
7448 bp->link_params.req_flow_ctrl, bp->port.advertising);
7451 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7453 int port = BP_PORT(bp);
7456 bp->link_params.bp = bp;
7457 bp->link_params.port = port;
7459 bp->link_params.serdes_config =
7460 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7461 bp->link_params.lane_config =
7462 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7463 bp->link_params.ext_phy_config =
7465 dev_info.port_hw_config[port].external_phy_config);
7466 bp->link_params.speed_cap_mask =
7468 dev_info.port_hw_config[port].speed_capability_mask);
7470 bp->port.link_config =
7471 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7473 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7474 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7475 " link_config 0x%08x\n",
7476 bp->link_params.serdes_config,
7477 bp->link_params.lane_config,
7478 bp->link_params.ext_phy_config,
7479 bp->link_params.speed_cap_mask, bp->port.link_config);
7481 bp->link_params.switch_cfg = (bp->port.link_config &
7482 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7483 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7485 bnx2x_link_settings_requested(bp);
7487 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7488 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7489 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7490 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7491 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7492 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7493 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7494 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7495 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7496 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7499 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7501 int func = BP_FUNC(bp);
7505 bnx2x_get_common_hwinfo(bp);
7509 if (CHIP_IS_E1H(bp)) {
7511 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7513 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7514 FUNC_MF_CFG_E1HOV_TAG_MASK);
7515 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7519 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7521 func, bp->e1hov, bp->e1hov);
7523 BNX2X_DEV_INFO("Single function mode\n");
7525 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7526 " aborting\n", func);
7532 if (!BP_NOMCP(bp)) {
7533 bnx2x_get_port_hwinfo(bp);
7535 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7536 DRV_MSG_SEQ_NUMBER_MASK);
7537 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7541 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7542 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7543 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7544 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7545 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7546 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7547 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7548 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7549 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7550 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7551 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7553 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7561 /* only supposed to happen on emulation/FPGA */
7562 BNX2X_ERR("warning random MAC workaround active\n");
7563 random_ether_addr(bp->dev->dev_addr);
7564 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7570 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7572 int func = BP_FUNC(bp);
7575 /* Disable interrupt handling until HW is initialized */
7576 atomic_set(&bp->intr_sem, 1);
7578 mutex_init(&bp->port.phy_mutex);
7580 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7581 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7583 rc = bnx2x_get_hwinfo(bp);
7585 /* need to reset chip if undi was active */
7587 bnx2x_undi_unload(bp);
7589 if (CHIP_REV_IS_FPGA(bp))
7590 printk(KERN_ERR PFX "FPGA detected\n");
7592 if (BP_NOMCP(bp) && (func == 0))
7594 "MCP disabled, must load devices in order!\n");
7598 bp->flags &= ~TPA_ENABLE_FLAG;
7599 bp->dev->features &= ~NETIF_F_LRO;
7601 bp->flags |= TPA_ENABLE_FLAG;
7602 bp->dev->features |= NETIF_F_LRO;
7606 bp->tx_ring_size = MAX_TX_AVAIL;
7607 bp->rx_ring_size = MAX_RX_AVAIL;
7615 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7616 bp->current_interval = (poll ? poll : bp->timer_interval);
7618 init_timer(&bp->timer);
7619 bp->timer.expires = jiffies + bp->current_interval;
7620 bp->timer.data = (unsigned long) bp;
7621 bp->timer.function = bnx2x_timer;
7627 * ethtool service functions
7630 /* All ethtool functions called with rtnl_lock */
7632 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7634 struct bnx2x *bp = netdev_priv(dev);
7636 cmd->supported = bp->port.supported;
7637 cmd->advertising = bp->port.advertising;
7639 if (netif_carrier_ok(dev)) {
7640 cmd->speed = bp->link_vars.line_speed;
7641 cmd->duplex = bp->link_vars.duplex;
7643 cmd->speed = bp->link_params.req_line_speed;
7644 cmd->duplex = bp->link_params.req_duplex;
7649 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7650 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7651 if (vn_max_rate < cmd->speed)
7652 cmd->speed = vn_max_rate;
7655 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7657 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7659 switch (ext_phy_type) {
7660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7662 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7663 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7665 cmd->port = PORT_FIBRE;
7668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7669 cmd->port = PORT_TP;
7672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7673 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7674 bp->link_params.ext_phy_config);
7678 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7679 bp->link_params.ext_phy_config);
7683 cmd->port = PORT_TP;
7685 cmd->phy_address = bp->port.phy_addr;
7686 cmd->transceiver = XCVR_INTERNAL;
7688 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7689 cmd->autoneg = AUTONEG_ENABLE;
7691 cmd->autoneg = AUTONEG_DISABLE;
7696 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7697 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7698 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7699 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7700 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7701 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7702 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7707 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7709 struct bnx2x *bp = netdev_priv(dev);
7715 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7716 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7717 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7718 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7719 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7720 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7721 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7723 if (cmd->autoneg == AUTONEG_ENABLE) {
7724 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7725 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7729 /* advertise the requested speed and duplex if supported */
7730 cmd->advertising &= bp->port.supported;
7732 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7733 bp->link_params.req_duplex = DUPLEX_FULL;
7734 bp->port.advertising |= (ADVERTISED_Autoneg |
7737 } else { /* forced speed */
7738 /* advertise the requested speed and duplex if supported */
7739 switch (cmd->speed) {
7741 if (cmd->duplex == DUPLEX_FULL) {
7742 if (!(bp->port.supported &
7743 SUPPORTED_10baseT_Full)) {
7745 "10M full not supported\n");
7749 advertising = (ADVERTISED_10baseT_Full |
7752 if (!(bp->port.supported &
7753 SUPPORTED_10baseT_Half)) {
7755 "10M half not supported\n");
7759 advertising = (ADVERTISED_10baseT_Half |
7765 if (cmd->duplex == DUPLEX_FULL) {
7766 if (!(bp->port.supported &
7767 SUPPORTED_100baseT_Full)) {
7769 "100M full not supported\n");
7773 advertising = (ADVERTISED_100baseT_Full |
7776 if (!(bp->port.supported &
7777 SUPPORTED_100baseT_Half)) {
7779 "100M half not supported\n");
7783 advertising = (ADVERTISED_100baseT_Half |
7789 if (cmd->duplex != DUPLEX_FULL) {
7790 DP(NETIF_MSG_LINK, "1G half not supported\n");
7794 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7795 DP(NETIF_MSG_LINK, "1G full not supported\n");
7799 advertising = (ADVERTISED_1000baseT_Full |
7804 if (cmd->duplex != DUPLEX_FULL) {
7806 "2.5G half not supported\n");
7810 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7812 "2.5G full not supported\n");
7816 advertising = (ADVERTISED_2500baseX_Full |
7821 if (cmd->duplex != DUPLEX_FULL) {
7822 DP(NETIF_MSG_LINK, "10G half not supported\n");
7826 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7827 DP(NETIF_MSG_LINK, "10G full not supported\n");
7831 advertising = (ADVERTISED_10000baseT_Full |
7836 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7840 bp->link_params.req_line_speed = cmd->speed;
7841 bp->link_params.req_duplex = cmd->duplex;
7842 bp->port.advertising = advertising;
7845 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7846 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7847 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7848 bp->port.advertising);
7850 if (netif_running(dev)) {
7851 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7858 #define PHY_FW_VER_LEN 10
7860 static void bnx2x_get_drvinfo(struct net_device *dev,
7861 struct ethtool_drvinfo *info)
7863 struct bnx2x *bp = netdev_priv(dev);
7864 u8 phy_fw_ver[PHY_FW_VER_LEN];
7866 strcpy(info->driver, DRV_MODULE_NAME);
7867 strcpy(info->version, DRV_MODULE_VERSION);
7869 phy_fw_ver[0] = '\0';
7871 bnx2x_acquire_phy_lock(bp);
7872 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7873 (bp->state != BNX2X_STATE_CLOSED),
7874 phy_fw_ver, PHY_FW_VER_LEN);
7875 bnx2x_release_phy_lock(bp);
7878 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7879 (bp->common.bc_ver & 0xff0000) >> 16,
7880 (bp->common.bc_ver & 0xff00) >> 8,
7881 (bp->common.bc_ver & 0xff),
7882 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7883 strcpy(info->bus_info, pci_name(bp->pdev));
7884 info->n_stats = BNX2X_NUM_STATS;
7885 info->testinfo_len = BNX2X_NUM_TESTS;
7886 info->eedump_len = bp->common.flash_size;
7887 info->regdump_len = 0;
7890 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7892 struct bnx2x *bp = netdev_priv(dev);
7894 if (bp->flags & NO_WOL_FLAG) {
7898 wol->supported = WAKE_MAGIC;
7900 wol->wolopts = WAKE_MAGIC;
7904 memset(&wol->sopass, 0, sizeof(wol->sopass));
7907 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7909 struct bnx2x *bp = netdev_priv(dev);
7911 if (wol->wolopts & ~WAKE_MAGIC)
7914 if (wol->wolopts & WAKE_MAGIC) {
7915 if (bp->flags & NO_WOL_FLAG)
7925 static u32 bnx2x_get_msglevel(struct net_device *dev)
7927 struct bnx2x *bp = netdev_priv(dev);
7929 return bp->msglevel;
7932 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7934 struct bnx2x *bp = netdev_priv(dev);
7936 if (capable(CAP_NET_ADMIN))
7937 bp->msglevel = level;
7940 static int bnx2x_nway_reset(struct net_device *dev)
7942 struct bnx2x *bp = netdev_priv(dev);
7947 if (netif_running(dev)) {
7948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7955 static int bnx2x_get_eeprom_len(struct net_device *dev)
7957 struct bnx2x *bp = netdev_priv(dev);
7959 return bp->common.flash_size;
7962 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7964 int port = BP_PORT(bp);
7968 /* adjust timeout for emulation/FPGA */
7969 count = NVRAM_TIMEOUT_COUNT;
7970 if (CHIP_REV_IS_SLOW(bp))
7973 /* request access to nvram interface */
7974 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7975 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7977 for (i = 0; i < count*10; i++) {
7978 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7979 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7985 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7986 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7993 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7995 int port = BP_PORT(bp);
7999 /* adjust timeout for emulation/FPGA */
8000 count = NVRAM_TIMEOUT_COUNT;
8001 if (CHIP_REV_IS_SLOW(bp))
8004 /* relinquish nvram interface */
8005 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8006 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8008 for (i = 0; i < count*10; i++) {
8009 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8010 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8016 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8017 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8024 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8028 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8030 /* enable both bits, even on read */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8032 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8033 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8036 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8040 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8042 /* disable both bits, even after read */
8043 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8044 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8045 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8048 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8054 /* build the command word */
8055 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8057 /* need to clear DONE bit separately */
8058 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8060 /* address of the NVRAM to read from */
8061 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8062 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8064 /* issue a read command */
8065 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8067 /* adjust timeout for emulation/FPGA */
8068 count = NVRAM_TIMEOUT_COUNT;
8069 if (CHIP_REV_IS_SLOW(bp))
8072 /* wait for completion */
8075 for (i = 0; i < count; i++) {
8077 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8079 if (val & MCPR_NVM_COMMAND_DONE) {
8080 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8081 /* we read nvram data in cpu order
8082 * but ethtool sees it as an array of bytes
8083 * converting to big-endian will do the work */
8084 val = cpu_to_be32(val);
8094 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8101 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8103 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8108 if (offset + buf_size > bp->common.flash_size) {
8109 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8110 " buf_size (0x%x) > flash_size (0x%x)\n",
8111 offset, buf_size, bp->common.flash_size);
8115 /* request access to nvram interface */
8116 rc = bnx2x_acquire_nvram_lock(bp);
8120 /* enable access to nvram interface */
8121 bnx2x_enable_nvram_access(bp);
8123 /* read the first word(s) */
8124 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8125 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8126 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8127 memcpy(ret_buf, &val, 4);
8129 /* advance to the next dword */
8130 offset += sizeof(u32);
8131 ret_buf += sizeof(u32);
8132 buf_size -= sizeof(u32);
8137 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8138 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8139 memcpy(ret_buf, &val, 4);
8142 /* disable access to nvram interface */
8143 bnx2x_disable_nvram_access(bp);
8144 bnx2x_release_nvram_lock(bp);
8149 static int bnx2x_get_eeprom(struct net_device *dev,
8150 struct ethtool_eeprom *eeprom, u8 *eebuf)
8152 struct bnx2x *bp = netdev_priv(dev);
8155 if (!netif_running(dev))
8158 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8159 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8160 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8161 eeprom->len, eeprom->len);
8163 /* parameters already validated in ethtool_get_eeprom */
8165 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8170 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8175 /* build the command word */
8176 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8178 /* need to clear DONE bit separately */
8179 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8181 /* write the data */
8182 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8184 /* address of the NVRAM to write to */
8185 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8186 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8188 /* issue the write command */
8189 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8191 /* adjust timeout for emulation/FPGA */
8192 count = NVRAM_TIMEOUT_COUNT;
8193 if (CHIP_REV_IS_SLOW(bp))
8196 /* wait for completion */
8198 for (i = 0; i < count; i++) {
8200 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8201 if (val & MCPR_NVM_COMMAND_DONE) {
8210 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8212 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8220 if (offset + buf_size > bp->common.flash_size) {
8221 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8222 " buf_size (0x%x) > flash_size (0x%x)\n",
8223 offset, buf_size, bp->common.flash_size);
8227 /* request access to nvram interface */
8228 rc = bnx2x_acquire_nvram_lock(bp);
8232 /* enable access to nvram interface */
8233 bnx2x_enable_nvram_access(bp);
8235 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8236 align_offset = (offset & ~0x03);
8237 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8240 val &= ~(0xff << BYTE_OFFSET(offset));
8241 val |= (*data_buf << BYTE_OFFSET(offset));
8243 /* nvram data is returned as an array of bytes
8244 * convert it back to cpu order */
8245 val = be32_to_cpu(val);
8247 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8251 /* disable access to nvram interface */
8252 bnx2x_disable_nvram_access(bp);
8253 bnx2x_release_nvram_lock(bp);
8258 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8266 if (buf_size == 1) /* ethtool */
8267 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8269 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8271 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8276 if (offset + buf_size > bp->common.flash_size) {
8277 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8278 " buf_size (0x%x) > flash_size (0x%x)\n",
8279 offset, buf_size, bp->common.flash_size);
8283 /* request access to nvram interface */
8284 rc = bnx2x_acquire_nvram_lock(bp);
8288 /* enable access to nvram interface */
8289 bnx2x_enable_nvram_access(bp);
8292 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8293 while ((written_so_far < buf_size) && (rc == 0)) {
8294 if (written_so_far == (buf_size - sizeof(u32)))
8295 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8296 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8297 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8298 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8299 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8301 memcpy(&val, data_buf, 4);
8303 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8305 /* advance to the next dword */
8306 offset += sizeof(u32);
8307 data_buf += sizeof(u32);
8308 written_so_far += sizeof(u32);
8312 /* disable access to nvram interface */
8313 bnx2x_disable_nvram_access(bp);
8314 bnx2x_release_nvram_lock(bp);
8319 static int bnx2x_set_eeprom(struct net_device *dev,
8320 struct ethtool_eeprom *eeprom, u8 *eebuf)
8322 struct bnx2x *bp = netdev_priv(dev);
8325 if (!netif_running(dev))
8328 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8329 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8330 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8331 eeprom->len, eeprom->len);
8333 /* parameters already validated in ethtool_set_eeprom */
8335 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8336 if (eeprom->magic == 0x00504859)
8339 bnx2x_acquire_phy_lock(bp);
8340 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8341 bp->link_params.ext_phy_config,
8342 (bp->state != BNX2X_STATE_CLOSED),
8343 eebuf, eeprom->len);
8344 if ((bp->state == BNX2X_STATE_OPEN) ||
8345 (bp->state == BNX2X_STATE_DISABLED)) {
8346 rc |= bnx2x_link_reset(&bp->link_params,
8348 rc |= bnx2x_phy_init(&bp->link_params,
8351 bnx2x_release_phy_lock(bp);
8353 } else /* Only the PMF can access the PHY */
8356 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8361 static int bnx2x_get_coalesce(struct net_device *dev,
8362 struct ethtool_coalesce *coal)
8364 struct bnx2x *bp = netdev_priv(dev);
8366 memset(coal, 0, sizeof(struct ethtool_coalesce));
8368 coal->rx_coalesce_usecs = bp->rx_ticks;
8369 coal->tx_coalesce_usecs = bp->tx_ticks;
8374 static int bnx2x_set_coalesce(struct net_device *dev,
8375 struct ethtool_coalesce *coal)
8377 struct bnx2x *bp = netdev_priv(dev);
8379 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8380 if (bp->rx_ticks > 3000)
8381 bp->rx_ticks = 3000;
8383 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8384 if (bp->tx_ticks > 0x3000)
8385 bp->tx_ticks = 0x3000;
8387 if (netif_running(dev))
8388 bnx2x_update_coalesce(bp);
8393 static void bnx2x_get_ringparam(struct net_device *dev,
8394 struct ethtool_ringparam *ering)
8396 struct bnx2x *bp = netdev_priv(dev);
8398 ering->rx_max_pending = MAX_RX_AVAIL;
8399 ering->rx_mini_max_pending = 0;
8400 ering->rx_jumbo_max_pending = 0;
8402 ering->rx_pending = bp->rx_ring_size;
8403 ering->rx_mini_pending = 0;
8404 ering->rx_jumbo_pending = 0;
8406 ering->tx_max_pending = MAX_TX_AVAIL;
8407 ering->tx_pending = bp->tx_ring_size;
8410 static int bnx2x_set_ringparam(struct net_device *dev,
8411 struct ethtool_ringparam *ering)
8413 struct bnx2x *bp = netdev_priv(dev);
8416 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8417 (ering->tx_pending > MAX_TX_AVAIL) ||
8418 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8421 bp->rx_ring_size = ering->rx_pending;
8422 bp->tx_ring_size = ering->tx_pending;
8424 if (netif_running(dev)) {
8425 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8426 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8432 static void bnx2x_get_pauseparam(struct net_device *dev,
8433 struct ethtool_pauseparam *epause)
8435 struct bnx2x *bp = netdev_priv(dev);
8437 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8438 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8440 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8441 BNX2X_FLOW_CTRL_RX);
8442 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8443 BNX2X_FLOW_CTRL_TX);
8445 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8446 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8447 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8450 static int bnx2x_set_pauseparam(struct net_device *dev,
8451 struct ethtool_pauseparam *epause)
8453 struct bnx2x *bp = netdev_priv(dev);
8458 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8459 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8460 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8462 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8464 if (epause->rx_pause)
8465 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8467 if (epause->tx_pause)
8468 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8470 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8471 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8473 if (epause->autoneg) {
8474 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8475 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8479 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8480 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8484 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8486 if (netif_running(dev)) {
8487 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8494 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8496 struct bnx2x *bp = netdev_priv(dev);
8500 /* TPA requires Rx CSUM offloading */
8501 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8502 if (!(dev->features & NETIF_F_LRO)) {
8503 dev->features |= NETIF_F_LRO;
8504 bp->flags |= TPA_ENABLE_FLAG;
8508 } else if (dev->features & NETIF_F_LRO) {
8509 dev->features &= ~NETIF_F_LRO;
8510 bp->flags &= ~TPA_ENABLE_FLAG;
8514 if (changed && netif_running(dev)) {
8515 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8516 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8522 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8524 struct bnx2x *bp = netdev_priv(dev);
8529 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8531 struct bnx2x *bp = netdev_priv(dev);
8536 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8537 TPA'ed packets will be discarded due to wrong TCP CSUM */
8539 u32 flags = ethtool_op_get_flags(dev);
8541 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8547 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8550 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8551 dev->features |= NETIF_F_TSO6;
8553 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8554 dev->features &= ~NETIF_F_TSO6;
8560 static const struct {
8561 char string[ETH_GSTRING_LEN];
8562 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8563 { "register_test (offline)" },
8564 { "memory_test (offline)" },
8565 { "loopback_test (offline)" },
8566 { "nvram_test (online)" },
8567 { "interrupt_test (online)" },
8568 { "link_test (online)" },
8569 { "idle check (online)" },
8570 { "MC errors (online)" }
8573 static int bnx2x_self_test_count(struct net_device *dev)
8575 return BNX2X_NUM_TESTS;
8578 static int bnx2x_test_registers(struct bnx2x *bp)
8580 int idx, i, rc = -ENODEV;
8582 int port = BP_PORT(bp);
8583 static const struct {
8588 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8589 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8590 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8591 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8592 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8593 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8594 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8595 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8596 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8597 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8598 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8599 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8600 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8601 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8602 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8603 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8604 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8605 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8606 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8607 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8608 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8609 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8610 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8611 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8612 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8613 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8614 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8615 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8616 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8617 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8618 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8619 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8620 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8621 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8622 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8623 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8624 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8625 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8627 { 0xffffffff, 0, 0x00000000 }
8630 if (!netif_running(bp->dev))
8633 /* Repeat the test twice:
8634 First by writing 0x00000000, second by writing 0xffffffff */
8635 for (idx = 0; idx < 2; idx++) {
8642 wr_val = 0xffffffff;
8646 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8647 u32 offset, mask, save_val, val;
8649 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8650 mask = reg_tbl[i].mask;
8652 save_val = REG_RD(bp, offset);
8654 REG_WR(bp, offset, wr_val);
8655 val = REG_RD(bp, offset);
8657 /* Restore the original register's value */
8658 REG_WR(bp, offset, save_val);
8660 /* verify that value is as expected value */
8661 if ((val & mask) != (wr_val & mask))
8672 static int bnx2x_test_memory(struct bnx2x *bp)
8674 int i, j, rc = -ENODEV;
8676 static const struct {
8680 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8681 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8682 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8683 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8684 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8685 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8686 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8690 static const struct {
8696 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8697 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8698 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8699 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8700 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8701 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8703 { NULL, 0xffffffff, 0, 0 }
8706 if (!netif_running(bp->dev))
8709 /* Go through all the memories */
8710 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8711 for (j = 0; j < mem_tbl[i].size; j++)
8712 REG_RD(bp, mem_tbl[i].offset + j*4);
8714 /* Check the parity status */
8715 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8716 val = REG_RD(bp, prty_tbl[i].offset);
8717 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8718 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8720 "%s is 0x%x\n", prty_tbl[i].name, val);
8731 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8736 while (bnx2x_link_test(bp) && cnt--)
8740 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8742 unsigned int pkt_size, num_pkts, i;
8743 struct sk_buff *skb;
8744 unsigned char *packet;
8745 struct bnx2x_fastpath *fp = &bp->fp[0];
8746 u16 tx_start_idx, tx_idx;
8747 u16 rx_start_idx, rx_idx;
8749 struct sw_tx_bd *tx_buf;
8750 struct eth_tx_bd *tx_bd;
8752 union eth_rx_cqe *cqe;
8754 struct sw_rx_bd *rx_buf;
8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8759 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8760 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8762 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8764 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8765 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8766 /* wait until link state is restored */
8768 while (cnt-- && bnx2x_test_link(&bp->link_params,
8775 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8778 goto test_loopback_exit;
8780 packet = skb_put(skb, pkt_size);
8781 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8782 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8783 for (i = ETH_HLEN; i < pkt_size; i++)
8784 packet[i] = (unsigned char) (i & 0xff);
8787 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8788 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8790 pkt_prod = fp->tx_pkt_prod++;
8791 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8792 tx_buf->first_bd = fp->tx_bd_prod;
8795 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8796 mapping = pci_map_single(bp->pdev, skb->data,
8797 skb_headlen(skb), PCI_DMA_TODEVICE);
8798 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8799 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8800 tx_bd->nbd = cpu_to_le16(1);
8801 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8802 tx_bd->vlan = cpu_to_le16(pkt_prod);
8803 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8804 ETH_TX_BD_FLAGS_END_BD);
8805 tx_bd->general_data = ((UNICAST_ADDRESS <<
8806 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8810 fp->hw_tx_prods->bds_prod =
8811 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8812 mb(); /* FW restriction: must not reorder writing nbd and packets */
8813 fp->hw_tx_prods->packets_prod =
8814 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8815 DOORBELL(bp, FP_IDX(fp), 0);
8821 bp->dev->trans_start = jiffies;
8825 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8826 if (tx_idx != tx_start_idx + num_pkts)
8827 goto test_loopback_exit;
8829 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8830 if (rx_idx != rx_start_idx + num_pkts)
8831 goto test_loopback_exit;
8833 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8834 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8835 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8836 goto test_loopback_rx_exit;
8838 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8839 if (len != pkt_size)
8840 goto test_loopback_rx_exit;
8842 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8844 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8845 for (i = ETH_HLEN; i < pkt_size; i++)
8846 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8847 goto test_loopback_rx_exit;
8851 test_loopback_rx_exit:
8853 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8854 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8855 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8856 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8858 /* Update producers */
8859 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8863 bp->link_params.loopback_mode = LOOPBACK_NONE;
8868 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8872 if (!netif_running(bp->dev))
8873 return BNX2X_LOOPBACK_FAILED;
8875 bnx2x_netif_stop(bp, 1);
8876 bnx2x_acquire_phy_lock(bp);
8878 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8879 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8880 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8883 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8884 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8885 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8888 bnx2x_release_phy_lock(bp);
8889 bnx2x_netif_start(bp);
8894 #define CRC32_RESIDUAL 0xdebb20e3
8896 static int bnx2x_test_nvram(struct bnx2x *bp)
8898 static const struct {
8902 { 0, 0x14 }, /* bootstrap */
8903 { 0x14, 0xec }, /* dir */
8904 { 0x100, 0x350 }, /* manuf_info */
8905 { 0x450, 0xf0 }, /* feature_info */
8906 { 0x640, 0x64 }, /* upgrade_key_info */
8908 { 0x708, 0x70 }, /* manuf_key_info */
8913 u8 *data = (u8 *)buf;
8917 rc = bnx2x_nvram_read(bp, 0, data, 4);
8919 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8920 goto test_nvram_exit;
8923 magic = be32_to_cpu(buf[0]);
8924 if (magic != 0x669955aa) {
8925 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8927 goto test_nvram_exit;
8930 for (i = 0; nvram_tbl[i].size; i++) {
8932 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8936 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8937 goto test_nvram_exit;
8940 csum = ether_crc_le(nvram_tbl[i].size, data);
8941 if (csum != CRC32_RESIDUAL) {
8943 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8945 goto test_nvram_exit;
8953 static int bnx2x_test_intr(struct bnx2x *bp)
8955 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8958 if (!netif_running(bp->dev))
8961 config->hdr.length_6b = 0;
8963 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8965 config->hdr.offset = BP_FUNC(bp);
8966 config->hdr.client_id = BP_CL_ID(bp);
8967 config->hdr.reserved1 = 0;
8969 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8970 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8971 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8973 bp->set_mac_pending++;
8974 for (i = 0; i < 10; i++) {
8975 if (!bp->set_mac_pending)
8977 msleep_interruptible(10);
8986 static void bnx2x_self_test(struct net_device *dev,
8987 struct ethtool_test *etest, u64 *buf)
8989 struct bnx2x *bp = netdev_priv(dev);
8991 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8993 if (!netif_running(dev))
8996 /* offline tests are not supported in MF mode */
8998 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9000 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9003 link_up = bp->link_vars.link_up;
9004 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9005 bnx2x_nic_load(bp, LOAD_DIAG);
9006 /* wait until link state is restored */
9007 bnx2x_wait_for_link(bp, link_up);
9009 if (bnx2x_test_registers(bp) != 0) {
9011 etest->flags |= ETH_TEST_FL_FAILED;
9013 if (bnx2x_test_memory(bp) != 0) {
9015 etest->flags |= ETH_TEST_FL_FAILED;
9017 buf[2] = bnx2x_test_loopback(bp, link_up);
9019 etest->flags |= ETH_TEST_FL_FAILED;
9021 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9022 bnx2x_nic_load(bp, LOAD_NORMAL);
9023 /* wait until link state is restored */
9024 bnx2x_wait_for_link(bp, link_up);
9026 if (bnx2x_test_nvram(bp) != 0) {
9028 etest->flags |= ETH_TEST_FL_FAILED;
9030 if (bnx2x_test_intr(bp) != 0) {
9032 etest->flags |= ETH_TEST_FL_FAILED;
9035 if (bnx2x_link_test(bp) != 0) {
9037 etest->flags |= ETH_TEST_FL_FAILED;
9039 buf[7] = bnx2x_mc_assert(bp);
9041 etest->flags |= ETH_TEST_FL_FAILED;
9043 #ifdef BNX2X_EXTRA_DEBUG
9044 bnx2x_panic_dump(bp);
9048 static const struct {
9052 #define STATS_FLAGS_PORT 1
9053 #define STATS_FLAGS_FUNC 2
9054 u8 string[ETH_GSTRING_LEN];
9055 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9056 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9057 8, STATS_FLAGS_FUNC, "rx_bytes" },
9058 { STATS_OFFSET32(error_bytes_received_hi),
9059 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9060 { STATS_OFFSET32(total_bytes_transmitted_hi),
9061 8, STATS_FLAGS_FUNC, "tx_bytes" },
9062 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9063 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9064 { STATS_OFFSET32(total_unicast_packets_received_hi),
9065 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9066 { STATS_OFFSET32(total_multicast_packets_received_hi),
9067 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9068 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9069 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9070 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9071 8, STATS_FLAGS_FUNC, "tx_packets" },
9072 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9073 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9074 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9075 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9076 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9077 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9078 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9079 8, STATS_FLAGS_PORT, "rx_align_errors" },
9080 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9081 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9082 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9083 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9084 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9085 8, STATS_FLAGS_PORT, "tx_deferred" },
9086 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9087 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9088 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9089 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9090 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9091 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9092 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9093 8, STATS_FLAGS_PORT, "rx_fragments" },
9094 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9095 8, STATS_FLAGS_PORT, "rx_jabbers" },
9096 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9097 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9098 { STATS_OFFSET32(jabber_packets_received),
9099 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9100 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9101 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9102 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9103 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9104 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9105 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9106 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9107 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9108 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9109 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9110 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9111 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9112 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9113 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9114 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9115 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9116 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9117 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9118 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9119 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9120 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9121 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9122 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9123 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9124 { STATS_OFFSET32(mac_filter_discard),
9125 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9126 { STATS_OFFSET32(no_buff_discard),
9127 4, STATS_FLAGS_FUNC, "rx_discards" },
9128 { STATS_OFFSET32(xxoverflow_discard),
9129 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9130 { STATS_OFFSET32(brb_drop_hi),
9131 8, STATS_FLAGS_PORT, "brb_discard" },
9132 { STATS_OFFSET32(brb_truncate_hi),
9133 8, STATS_FLAGS_PORT, "brb_truncate" },
9134 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9135 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9136 { STATS_OFFSET32(rx_skb_alloc_failed),
9137 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9138 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9139 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9142 #define IS_NOT_E1HMF_STAT(bp, i) \
9143 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9145 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9147 struct bnx2x *bp = netdev_priv(dev);
9150 switch (stringset) {
9152 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9153 if (IS_NOT_E1HMF_STAT(bp, i))
9155 strcpy(buf + j*ETH_GSTRING_LEN,
9156 bnx2x_stats_arr[i].string);
9162 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9167 static int bnx2x_get_stats_count(struct net_device *dev)
9169 struct bnx2x *bp = netdev_priv(dev);
9170 int i, num_stats = 0;
9172 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9173 if (IS_NOT_E1HMF_STAT(bp, i))
9180 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9181 struct ethtool_stats *stats, u64 *buf)
9183 struct bnx2x *bp = netdev_priv(dev);
9184 u32 *hw_stats = (u32 *)&bp->eth_stats;
9187 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9188 if (IS_NOT_E1HMF_STAT(bp, i))
9191 if (bnx2x_stats_arr[i].size == 0) {
9192 /* skip this counter */
9197 if (bnx2x_stats_arr[i].size == 4) {
9198 /* 4-byte counter */
9199 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9203 /* 8-byte counter */
9204 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9205 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9210 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9212 struct bnx2x *bp = netdev_priv(dev);
9213 int port = BP_PORT(bp);
9216 if (!netif_running(dev))
9225 for (i = 0; i < (data * 2); i++) {
9227 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9228 bp->link_params.hw_led_mode,
9229 bp->link_params.chip_id);
9231 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9232 bp->link_params.hw_led_mode,
9233 bp->link_params.chip_id);
9235 msleep_interruptible(500);
9236 if (signal_pending(current))
9240 if (bp->link_vars.link_up)
9241 bnx2x_set_led(bp, port, LED_MODE_OPER,
9242 bp->link_vars.line_speed,
9243 bp->link_params.hw_led_mode,
9244 bp->link_params.chip_id);
9249 static struct ethtool_ops bnx2x_ethtool_ops = {
9250 .get_settings = bnx2x_get_settings,
9251 .set_settings = bnx2x_set_settings,
9252 .get_drvinfo = bnx2x_get_drvinfo,
9253 .get_wol = bnx2x_get_wol,
9254 .set_wol = bnx2x_set_wol,
9255 .get_msglevel = bnx2x_get_msglevel,
9256 .set_msglevel = bnx2x_set_msglevel,
9257 .nway_reset = bnx2x_nway_reset,
9258 .get_link = ethtool_op_get_link,
9259 .get_eeprom_len = bnx2x_get_eeprom_len,
9260 .get_eeprom = bnx2x_get_eeprom,
9261 .set_eeprom = bnx2x_set_eeprom,
9262 .get_coalesce = bnx2x_get_coalesce,
9263 .set_coalesce = bnx2x_set_coalesce,
9264 .get_ringparam = bnx2x_get_ringparam,
9265 .set_ringparam = bnx2x_set_ringparam,
9266 .get_pauseparam = bnx2x_get_pauseparam,
9267 .set_pauseparam = bnx2x_set_pauseparam,
9268 .get_rx_csum = bnx2x_get_rx_csum,
9269 .set_rx_csum = bnx2x_set_rx_csum,
9270 .get_tx_csum = ethtool_op_get_tx_csum,
9271 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9272 .set_flags = bnx2x_set_flags,
9273 .get_flags = ethtool_op_get_flags,
9274 .get_sg = ethtool_op_get_sg,
9275 .set_sg = ethtool_op_set_sg,
9276 .get_tso = ethtool_op_get_tso,
9277 .set_tso = bnx2x_set_tso,
9278 .self_test_count = bnx2x_self_test_count,
9279 .self_test = bnx2x_self_test,
9280 .get_strings = bnx2x_get_strings,
9281 .phys_id = bnx2x_phys_id,
9282 .get_stats_count = bnx2x_get_stats_count,
9283 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9286 /* end of ethtool_ops */
9288 /****************************************************************************
9289 * General service functions
9290 ****************************************************************************/
9292 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9296 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9300 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9301 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9302 PCI_PM_CTRL_PME_STATUS));
9304 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9305 /* delay required during transition out of D3hot */
9310 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9314 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9316 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9319 /* No more memory access after this point until
9320 * device is brought back to D0.
9330 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9334 /* Tell compiler that status block fields can change */
9336 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9337 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9339 return (fp->rx_comp_cons != rx_cons_sb);
9343 * net_device service functions
9346 static int bnx2x_poll(struct napi_struct *napi, int budget)
9348 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9350 struct bnx2x *bp = fp->bp;
9353 #ifdef BNX2X_STOP_ON_ERROR
9354 if (unlikely(bp->panic))
9358 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9359 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9360 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9362 bnx2x_update_fpsb_idx(fp);
9364 if (bnx2x_has_tx_work(fp))
9365 bnx2x_tx_int(fp, budget);
9367 if (bnx2x_has_rx_work(fp))
9368 work_done = bnx2x_rx_int(fp, budget);
9369 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9371 /* must not complete if we consumed full budget */
9372 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9374 #ifdef BNX2X_STOP_ON_ERROR
9377 netif_rx_complete(napi);
9379 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9380 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9381 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9382 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9388 /* we split the first BD into headers and data BDs
9389 * to ease the pain of our fellow microcode engineers
9390 * we use one mapping for both BDs
9391 * So far this has only been observed to happen
9392 * in Other Operating Systems(TM)
9394 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9395 struct bnx2x_fastpath *fp,
9396 struct eth_tx_bd **tx_bd, u16 hlen,
9397 u16 bd_prod, int nbd)
9399 struct eth_tx_bd *h_tx_bd = *tx_bd;
9400 struct eth_tx_bd *d_tx_bd;
9402 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9404 /* first fix first BD */
9405 h_tx_bd->nbd = cpu_to_le16(nbd);
9406 h_tx_bd->nbytes = cpu_to_le16(hlen);
9408 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9409 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9410 h_tx_bd->addr_lo, h_tx_bd->nbd);
9412 /* now get a new data BD
9413 * (after the pbd) and fill it */
9414 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9415 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9417 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9418 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9420 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9421 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9422 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9424 /* this marks the BD as one that has no individual mapping
9425 * the FW ignores this flag in a BD not marked start
9427 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9428 DP(NETIF_MSG_TX_QUEUED,
9429 "TSO split data size is %d (%x:%x)\n",
9430 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9432 /* update tx_bd for marking the last BD flag */
9438 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9441 csum = (u16) ~csum_fold(csum_sub(csum,
9442 csum_partial(t_header - fix, fix, 0)));
9445 csum = (u16) ~csum_fold(csum_add(csum,
9446 csum_partial(t_header, -fix, 0)));
9448 return swab16(csum);
9451 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9455 if (skb->ip_summed != CHECKSUM_PARTIAL)
9459 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9461 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9462 rc |= XMIT_CSUM_TCP;
9466 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9467 rc |= XMIT_CSUM_TCP;
9471 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9474 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9480 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9481 /* check if packet requires linearization (packet is too fragmented) */
9482 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9487 int first_bd_sz = 0;
9489 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9490 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9492 if (xmit_type & XMIT_GSO) {
9493 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9494 /* Check if LSO packet needs to be copied:
9495 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9496 int wnd_size = MAX_FETCH_BD - 3;
9497 /* Number of windows to check */
9498 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9503 /* Headers length */
9504 hlen = (int)(skb_transport_header(skb) - skb->data) +
9507 /* Amount of data (w/o headers) on linear part of SKB*/
9508 first_bd_sz = skb_headlen(skb) - hlen;
9510 wnd_sum = first_bd_sz;
9512 /* Calculate the first sum - it's special */
9513 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9515 skb_shinfo(skb)->frags[frag_idx].size;
9517 /* If there was data on linear skb data - check it */
9518 if (first_bd_sz > 0) {
9519 if (unlikely(wnd_sum < lso_mss)) {
9524 wnd_sum -= first_bd_sz;
9527 /* Others are easier: run through the frag list and
9528 check all windows */
9529 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9531 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9533 if (unlikely(wnd_sum < lso_mss)) {
9538 skb_shinfo(skb)->frags[wnd_idx].size;
9542 /* in non-LSO too fragmented packet should always
9549 if (unlikely(to_copy))
9550 DP(NETIF_MSG_TX_QUEUED,
9551 "Linearization IS REQUIRED for %s packet. "
9552 "num_frags %d hlen %d first_bd_sz %d\n",
9553 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9554 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9560 /* called with netif_tx_lock
9561 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9562 * netif_wake_queue()
9564 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9566 struct bnx2x *bp = netdev_priv(dev);
9567 struct bnx2x_fastpath *fp;
9568 struct sw_tx_bd *tx_buf;
9569 struct eth_tx_bd *tx_bd;
9570 struct eth_tx_parse_bd *pbd = NULL;
9571 u16 pkt_prod, bd_prod;
9574 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9575 int vlan_off = (bp->e1hov ? 4 : 0);
9579 #ifdef BNX2X_STOP_ON_ERROR
9580 if (unlikely(bp->panic))
9581 return NETDEV_TX_BUSY;
9584 fp_index = (smp_processor_id() % bp->num_queues);
9585 fp = &bp->fp[fp_index];
9587 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9588 bp->eth_stats.driver_xoff++,
9589 netif_stop_queue(dev);
9590 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9591 return NETDEV_TX_BUSY;
9594 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9595 " gso type %x xmit_type %x\n",
9596 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9599 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9600 /* First, check if we need to linearize the skb
9601 (due to FW restrictions) */
9602 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9603 /* Statistics of linearization */
9605 if (skb_linearize(skb) != 0) {
9606 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9607 "silently dropping this SKB\n");
9608 dev_kfree_skb_any(skb);
9609 return NETDEV_TX_OK;
9615 Please read carefully. First we use one BD which we mark as start,
9616 then for TSO or xsum we have a parsing info BD,
9617 and only then we have the rest of the TSO BDs.
9618 (don't forget to mark the last one as last,
9619 and to unmap only AFTER you write to the BD ...)
9620 And above all, all pdb sizes are in words - NOT DWORDS!
9623 pkt_prod = fp->tx_pkt_prod++;
9624 bd_prod = TX_BD(fp->tx_bd_prod);
9626 /* get a tx_buf and first BD */
9627 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9628 tx_bd = &fp->tx_desc_ring[bd_prod];
9630 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9631 tx_bd->general_data = (UNICAST_ADDRESS <<
9632 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9634 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9636 /* remember the first BD of the packet */
9637 tx_buf->first_bd = fp->tx_bd_prod;
9640 DP(NETIF_MSG_TX_QUEUED,
9641 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9642 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9645 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9646 (bp->flags & HW_VLAN_TX_FLAG)) {
9647 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9648 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9652 tx_bd->vlan = cpu_to_le16(pkt_prod);
9655 /* turn on parsing and get a BD */
9656 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9657 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9659 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9662 if (xmit_type & XMIT_CSUM) {
9663 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9665 /* for now NS flag is not used in Linux */
9666 pbd->global_data = (hlen |
9667 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9668 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9670 pbd->ip_hlen = (skb_transport_header(skb) -
9671 skb_network_header(skb)) / 2;
9673 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9675 pbd->total_hlen = cpu_to_le16(hlen);
9676 hlen = hlen*2 - vlan_off;
9678 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9680 if (xmit_type & XMIT_CSUM_V4)
9681 tx_bd->bd_flags.as_bitfield |=
9682 ETH_TX_BD_FLAGS_IP_CSUM;
9684 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9686 if (xmit_type & XMIT_CSUM_TCP) {
9687 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9690 s8 fix = SKB_CS_OFF(skb); /* signed! */
9692 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9693 pbd->cs_offset = fix / 2;
9695 DP(NETIF_MSG_TX_QUEUED,
9696 "hlen %d offset %d fix %d csum before fix %x\n",
9697 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9700 /* HW bug: fixup the CSUM */
9701 pbd->tcp_pseudo_csum =
9702 bnx2x_csum_fix(skb_transport_header(skb),
9705 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9706 pbd->tcp_pseudo_csum);
9710 mapping = pci_map_single(bp->pdev, skb->data,
9711 skb_headlen(skb), PCI_DMA_TODEVICE);
9713 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9714 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9715 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9716 tx_bd->nbd = cpu_to_le16(nbd);
9717 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9719 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9720 " nbytes %d flags %x vlan %x\n",
9721 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9722 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9723 le16_to_cpu(tx_bd->vlan));
9725 if (xmit_type & XMIT_GSO) {
9727 DP(NETIF_MSG_TX_QUEUED,
9728 "TSO packet len %d hlen %d total len %d tso size %d\n",
9729 skb->len, hlen, skb_headlen(skb),
9730 skb_shinfo(skb)->gso_size);
9732 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9734 if (unlikely(skb_headlen(skb) > hlen))
9735 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9738 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9739 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9740 pbd->tcp_flags = pbd_tcp_flags(skb);
9742 if (xmit_type & XMIT_GSO_V4) {
9743 pbd->ip_id = swab16(ip_hdr(skb)->id);
9744 pbd->tcp_pseudo_csum =
9745 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9747 0, IPPROTO_TCP, 0));
9750 pbd->tcp_pseudo_csum =
9751 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9752 &ipv6_hdr(skb)->daddr,
9753 0, IPPROTO_TCP, 0));
9755 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9758 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9759 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9761 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9762 tx_bd = &fp->tx_desc_ring[bd_prod];
9764 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9765 frag->size, PCI_DMA_TODEVICE);
9767 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9768 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9769 tx_bd->nbytes = cpu_to_le16(frag->size);
9770 tx_bd->vlan = cpu_to_le16(pkt_prod);
9771 tx_bd->bd_flags.as_bitfield = 0;
9773 DP(NETIF_MSG_TX_QUEUED,
9774 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9775 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9776 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9779 /* now at last mark the BD as the last BD */
9780 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9782 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9783 tx_bd, tx_bd->bd_flags.as_bitfield);
9785 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9787 /* now send a tx doorbell, counting the next BD
9788 * if the packet contains or ends with it
9790 if (TX_BD_POFF(bd_prod) < nbd)
9794 DP(NETIF_MSG_TX_QUEUED,
9795 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9796 " tcp_flags %x xsum %x seq %u hlen %u\n",
9797 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9798 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9799 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9801 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9804 * Make sure that the BD data is updated before updating the producer
9805 * since FW might read the BD right after the producer is updated.
9806 * This is only applicable for weak-ordered memory model archs such
9807 * as IA-64. The following barrier is also mandatory since FW will
9808 * assumes packets must have BDs.
9812 fp->hw_tx_prods->bds_prod =
9813 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9814 mb(); /* FW restriction: must not reorder writing nbd and packets */
9815 fp->hw_tx_prods->packets_prod =
9816 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9817 DOORBELL(bp, FP_IDX(fp), 0);
9821 fp->tx_bd_prod += nbd;
9822 dev->trans_start = jiffies;
9824 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9825 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9826 if we put Tx into XOFF state. */
9828 netif_stop_queue(dev);
9829 bp->eth_stats.driver_xoff++;
9830 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9831 netif_wake_queue(dev);
9835 return NETDEV_TX_OK;
9838 /* called with rtnl_lock */
9839 static int bnx2x_open(struct net_device *dev)
9841 struct bnx2x *bp = netdev_priv(dev);
9843 netif_carrier_off(dev);
9845 bnx2x_set_power_state(bp, PCI_D0);
9847 return bnx2x_nic_load(bp, LOAD_OPEN);
9850 /* called with rtnl_lock */
9851 static int bnx2x_close(struct net_device *dev)
9853 struct bnx2x *bp = netdev_priv(dev);
9855 /* Unload the driver, release IRQs */
9856 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9857 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9858 if (!CHIP_REV_IS_SLOW(bp))
9859 bnx2x_set_power_state(bp, PCI_D3hot);
9864 /* called with netif_tx_lock from set_multicast */
9865 static void bnx2x_set_rx_mode(struct net_device *dev)
9867 struct bnx2x *bp = netdev_priv(dev);
9868 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9869 int port = BP_PORT(bp);
9871 if (bp->state != BNX2X_STATE_OPEN) {
9872 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9876 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9878 if (dev->flags & IFF_PROMISC)
9879 rx_mode = BNX2X_RX_MODE_PROMISC;
9881 else if ((dev->flags & IFF_ALLMULTI) ||
9882 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9883 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9885 else { /* some multicasts */
9886 if (CHIP_IS_E1(bp)) {
9888 struct dev_mc_list *mclist;
9889 struct mac_configuration_cmd *config =
9890 bnx2x_sp(bp, mcast_config);
9892 for (i = 0, mclist = dev->mc_list;
9893 mclist && (i < dev->mc_count);
9894 i++, mclist = mclist->next) {
9896 config->config_table[i].
9897 cam_entry.msb_mac_addr =
9898 swab16(*(u16 *)&mclist->dmi_addr[0]);
9899 config->config_table[i].
9900 cam_entry.middle_mac_addr =
9901 swab16(*(u16 *)&mclist->dmi_addr[2]);
9902 config->config_table[i].
9903 cam_entry.lsb_mac_addr =
9904 swab16(*(u16 *)&mclist->dmi_addr[4]);
9905 config->config_table[i].cam_entry.flags =
9907 config->config_table[i].
9908 target_table_entry.flags = 0;
9909 config->config_table[i].
9910 target_table_entry.client_id = 0;
9911 config->config_table[i].
9912 target_table_entry.vlan_id = 0;
9915 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9916 config->config_table[i].
9917 cam_entry.msb_mac_addr,
9918 config->config_table[i].
9919 cam_entry.middle_mac_addr,
9920 config->config_table[i].
9921 cam_entry.lsb_mac_addr);
9923 old = config->hdr.length_6b;
9925 for (; i < old; i++) {
9926 if (CAM_IS_INVALID(config->
9928 /* already invalidated */
9932 CAM_INVALIDATE(config->
9937 if (CHIP_REV_IS_SLOW(bp))
9938 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9940 offset = BNX2X_MAX_MULTICAST*(1 + port);
9942 config->hdr.length_6b = i;
9943 config->hdr.offset = offset;
9944 config->hdr.client_id = BP_CL_ID(bp);
9945 config->hdr.reserved1 = 0;
9947 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9948 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9949 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9952 /* Accept one or more multicasts */
9953 struct dev_mc_list *mclist;
9954 u32 mc_filter[MC_HASH_SIZE];
9955 u32 crc, bit, regidx;
9958 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9960 for (i = 0, mclist = dev->mc_list;
9961 mclist && (i < dev->mc_count);
9962 i++, mclist = mclist->next) {
9964 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9967 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9968 bit = (crc >> 24) & 0xff;
9971 mc_filter[regidx] |= (1 << bit);
9974 for (i = 0; i < MC_HASH_SIZE; i++)
9975 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9980 bp->rx_mode = rx_mode;
9981 bnx2x_set_storm_rx_mode(bp);
9984 /* called with rtnl_lock */
9985 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9987 struct sockaddr *addr = p;
9988 struct bnx2x *bp = netdev_priv(dev);
9990 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9993 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9994 if (netif_running(dev)) {
9996 bnx2x_set_mac_addr_e1(bp, 1);
9998 bnx2x_set_mac_addr_e1h(bp, 1);
10004 /* called with rtnl_lock */
10005 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10007 struct mii_ioctl_data *data = if_mii(ifr);
10008 struct bnx2x *bp = netdev_priv(dev);
10009 int port = BP_PORT(bp);
10014 data->phy_id = bp->port.phy_addr;
10018 case SIOCGMIIREG: {
10021 if (!netif_running(dev))
10024 mutex_lock(&bp->port.phy_mutex);
10025 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10026 DEFAULT_PHY_DEV_ADDR,
10027 (data->reg_num & 0x1f), &mii_regval);
10028 data->val_out = mii_regval;
10029 mutex_unlock(&bp->port.phy_mutex);
10034 if (!capable(CAP_NET_ADMIN))
10037 if (!netif_running(dev))
10040 mutex_lock(&bp->port.phy_mutex);
10041 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10042 DEFAULT_PHY_DEV_ADDR,
10043 (data->reg_num & 0x1f), data->val_in);
10044 mutex_unlock(&bp->port.phy_mutex);
10052 return -EOPNOTSUPP;
10055 /* called with rtnl_lock */
10056 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10058 struct bnx2x *bp = netdev_priv(dev);
10061 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10062 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10065 /* This does not race with packet allocation
10066 * because the actual alloc size is
10067 * only updated as part of load
10069 dev->mtu = new_mtu;
10071 if (netif_running(dev)) {
10072 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10073 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10079 static void bnx2x_tx_timeout(struct net_device *dev)
10081 struct bnx2x *bp = netdev_priv(dev);
10083 #ifdef BNX2X_STOP_ON_ERROR
10087 /* This allows the netif to be shutdown gracefully before resetting */
10088 schedule_work(&bp->reset_task);
10092 /* called with rtnl_lock */
10093 static void bnx2x_vlan_rx_register(struct net_device *dev,
10094 struct vlan_group *vlgrp)
10096 struct bnx2x *bp = netdev_priv(dev);
10100 /* Set flags according to the required capabilities */
10101 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10103 if (dev->features & NETIF_F_HW_VLAN_TX)
10104 bp->flags |= HW_VLAN_TX_FLAG;
10106 if (dev->features & NETIF_F_HW_VLAN_RX)
10107 bp->flags |= HW_VLAN_RX_FLAG;
10109 if (netif_running(dev))
10110 bnx2x_set_client_config(bp);
10115 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10116 static void poll_bnx2x(struct net_device *dev)
10118 struct bnx2x *bp = netdev_priv(dev);
10120 disable_irq(bp->pdev->irq);
10121 bnx2x_interrupt(bp->pdev->irq, dev);
10122 enable_irq(bp->pdev->irq);
10126 static const struct net_device_ops bnx2x_netdev_ops = {
10127 .ndo_open = bnx2x_open,
10128 .ndo_stop = bnx2x_close,
10129 .ndo_start_xmit = bnx2x_start_xmit,
10130 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10131 .ndo_set_mac_address = bnx2x_change_mac_addr,
10132 .ndo_validate_addr = eth_validate_addr,
10133 .ndo_do_ioctl = bnx2x_ioctl,
10134 .ndo_change_mtu = bnx2x_change_mtu,
10135 .ndo_tx_timeout = bnx2x_tx_timeout,
10137 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10139 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10140 .ndo_poll_controller = poll_bnx2x,
10145 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10146 struct net_device *dev)
10151 SET_NETDEV_DEV(dev, &pdev->dev);
10152 bp = netdev_priv(dev);
10157 bp->func = PCI_FUNC(pdev->devfn);
10159 rc = pci_enable_device(pdev);
10161 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10165 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10166 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10169 goto err_out_disable;
10172 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10173 printk(KERN_ERR PFX "Cannot find second PCI device"
10174 " base address, aborting\n");
10176 goto err_out_disable;
10179 if (atomic_read(&pdev->enable_cnt) == 1) {
10180 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10182 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10184 goto err_out_disable;
10187 pci_set_master(pdev);
10188 pci_save_state(pdev);
10191 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10192 if (bp->pm_cap == 0) {
10193 printk(KERN_ERR PFX "Cannot find power management"
10194 " capability, aborting\n");
10196 goto err_out_release;
10199 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10200 if (bp->pcie_cap == 0) {
10201 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10204 goto err_out_release;
10207 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10208 bp->flags |= USING_DAC_FLAG;
10209 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10210 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10211 " failed, aborting\n");
10213 goto err_out_release;
10216 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10217 printk(KERN_ERR PFX "System does not support DMA,"
10220 goto err_out_release;
10223 dev->mem_start = pci_resource_start(pdev, 0);
10224 dev->base_addr = dev->mem_start;
10225 dev->mem_end = pci_resource_end(pdev, 0);
10227 dev->irq = pdev->irq;
10229 bp->regview = pci_ioremap_bar(pdev, 0);
10230 if (!bp->regview) {
10231 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10233 goto err_out_release;
10236 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10237 min_t(u64, BNX2X_DB_SIZE,
10238 pci_resource_len(pdev, 2)));
10239 if (!bp->doorbells) {
10240 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10242 goto err_out_unmap;
10245 bnx2x_set_power_state(bp, PCI_D0);
10247 /* clean indirect addresses */
10248 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10249 PCICFG_VENDOR_ID_OFFSET);
10250 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10251 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10252 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10253 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10255 dev->watchdog_timeo = TX_TIMEOUT;
10257 dev->netdev_ops = &bnx2x_netdev_ops;
10258 dev->ethtool_ops = &bnx2x_ethtool_ops;
10259 dev->features |= NETIF_F_SG;
10260 dev->features |= NETIF_F_HW_CSUM;
10261 if (bp->flags & USING_DAC_FLAG)
10262 dev->features |= NETIF_F_HIGHDMA;
10264 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10265 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10267 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10268 dev->features |= NETIF_F_TSO6;
10274 iounmap(bp->regview);
10275 bp->regview = NULL;
10277 if (bp->doorbells) {
10278 iounmap(bp->doorbells);
10279 bp->doorbells = NULL;
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
10287 pci_disable_device(pdev);
10288 pci_set_drvdata(pdev, NULL);
10294 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10296 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10298 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10302 /* return value of 1=2.5GHz 2=5GHz */
10303 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10305 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10307 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10311 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10312 const struct pci_device_id *ent)
10314 static int version_printed;
10315 struct net_device *dev = NULL;
10319 if (version_printed++ == 0)
10320 printk(KERN_INFO "%s", version);
10322 /* dev zeroed in init_etherdev */
10323 dev = alloc_etherdev(sizeof(*bp));
10325 printk(KERN_ERR PFX "Cannot allocate net device\n");
10329 bp = netdev_priv(dev);
10330 bp->msglevel = debug;
10332 rc = bnx2x_init_dev(pdev, dev);
10338 pci_set_drvdata(pdev, dev);
10340 rc = bnx2x_init_bp(bp);
10342 goto init_one_exit;
10344 rc = register_netdev(dev);
10346 dev_err(&pdev->dev, "Cannot register net device\n");
10347 goto init_one_exit;
10350 bp->common.name = board_info[ent->driver_data].name;
10351 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10352 " IRQ %d, ", dev->name, bp->common.name,
10353 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10354 bnx2x_get_pcie_width(bp),
10355 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10356 dev->base_addr, bp->pdev->irq);
10357 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10362 iounmap(bp->regview);
10365 iounmap(bp->doorbells);
10369 if (atomic_read(&pdev->enable_cnt) == 1)
10370 pci_release_regions(pdev);
10372 pci_disable_device(pdev);
10373 pci_set_drvdata(pdev, NULL);
10378 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10380 struct net_device *dev = pci_get_drvdata(pdev);
10384 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10387 bp = netdev_priv(dev);
10389 unregister_netdev(dev);
10392 iounmap(bp->regview);
10395 iounmap(bp->doorbells);
10399 if (atomic_read(&pdev->enable_cnt) == 1)
10400 pci_release_regions(pdev);
10402 pci_disable_device(pdev);
10403 pci_set_drvdata(pdev, NULL);
10406 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10408 struct net_device *dev = pci_get_drvdata(pdev);
10412 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10415 bp = netdev_priv(dev);
10419 pci_save_state(pdev);
10421 if (!netif_running(dev)) {
10426 netif_device_detach(dev);
10428 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10430 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10437 static int bnx2x_resume(struct pci_dev *pdev)
10439 struct net_device *dev = pci_get_drvdata(pdev);
10444 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10447 bp = netdev_priv(dev);
10451 pci_restore_state(pdev);
10453 if (!netif_running(dev)) {
10458 bnx2x_set_power_state(bp, PCI_D0);
10459 netif_device_attach(dev);
10461 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10468 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10472 bp->state = BNX2X_STATE_ERROR;
10474 bp->rx_mode = BNX2X_RX_MODE_NONE;
10476 bnx2x_netif_stop(bp, 0);
10478 del_timer_sync(&bp->timer);
10479 bp->stats_state = STATS_STATE_DISABLED;
10480 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10483 bnx2x_free_irq(bp);
10485 if (CHIP_IS_E1(bp)) {
10486 struct mac_configuration_cmd *config =
10487 bnx2x_sp(bp, mcast_config);
10489 for (i = 0; i < config->hdr.length_6b; i++)
10490 CAM_INVALIDATE(config->config_table[i]);
10493 /* Free SKBs, SGEs, TPA pool and driver internals */
10494 bnx2x_free_skbs(bp);
10495 for_each_queue(bp, i)
10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10497 for_each_queue(bp, i)
10498 netif_napi_del(&bnx2x_fp(bp, i, napi));
10499 bnx2x_free_mem(bp);
10501 bp->state = BNX2X_STATE_CLOSED;
10503 netif_carrier_off(bp->dev);
10508 static void bnx2x_eeh_recover(struct bnx2x *bp)
10512 mutex_init(&bp->port.phy_mutex);
10514 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10515 bp->link_params.shmem_base = bp->common.shmem_base;
10516 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10518 if (!bp->common.shmem_base ||
10519 (bp->common.shmem_base < 0xA0000) ||
10520 (bp->common.shmem_base >= 0xC0000)) {
10521 BNX2X_DEV_INFO("MCP not active\n");
10522 bp->flags |= NO_MCP_FLAG;
10526 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10527 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10528 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10529 BNX2X_ERR("BAD MCP validity signature\n");
10531 if (!BP_NOMCP(bp)) {
10532 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10533 & DRV_MSG_SEQ_NUMBER_MASK);
10534 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10539 * bnx2x_io_error_detected - called when PCI error is detected
10540 * @pdev: Pointer to PCI device
10541 * @state: The current pci connection state
10543 * This function is called after a PCI bus error affecting
10544 * this device has been detected.
10546 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10547 pci_channel_state_t state)
10549 struct net_device *dev = pci_get_drvdata(pdev);
10550 struct bnx2x *bp = netdev_priv(dev);
10554 netif_device_detach(dev);
10556 if (netif_running(dev))
10557 bnx2x_eeh_nic_unload(bp);
10559 pci_disable_device(pdev);
10563 /* Request a slot reset */
10564 return PCI_ERS_RESULT_NEED_RESET;
10568 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10569 * @pdev: Pointer to PCI device
10571 * Restart the card from scratch, as if from a cold-boot.
10573 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10575 struct net_device *dev = pci_get_drvdata(pdev);
10576 struct bnx2x *bp = netdev_priv(dev);
10580 if (pci_enable_device(pdev)) {
10581 dev_err(&pdev->dev,
10582 "Cannot re-enable PCI device after reset\n");
10584 return PCI_ERS_RESULT_DISCONNECT;
10587 pci_set_master(pdev);
10588 pci_restore_state(pdev);
10590 if (netif_running(dev))
10591 bnx2x_set_power_state(bp, PCI_D0);
10595 return PCI_ERS_RESULT_RECOVERED;
10599 * bnx2x_io_resume - called when traffic can start flowing again
10600 * @pdev: Pointer to PCI device
10602 * This callback is called when the error recovery driver tells us that
10603 * its OK to resume normal operation.
10605 static void bnx2x_io_resume(struct pci_dev *pdev)
10607 struct net_device *dev = pci_get_drvdata(pdev);
10608 struct bnx2x *bp = netdev_priv(dev);
10612 bnx2x_eeh_recover(bp);
10614 if (netif_running(dev))
10615 bnx2x_nic_load(bp, LOAD_NORMAL);
10617 netif_device_attach(dev);
10622 static struct pci_error_handlers bnx2x_err_handler = {
10623 .error_detected = bnx2x_io_error_detected,
10624 .slot_reset = bnx2x_io_slot_reset,
10625 .resume = bnx2x_io_resume,
10628 static struct pci_driver bnx2x_pci_driver = {
10629 .name = DRV_MODULE_NAME,
10630 .id_table = bnx2x_pci_tbl,
10631 .probe = bnx2x_init_one,
10632 .remove = __devexit_p(bnx2x_remove_one),
10633 .suspend = bnx2x_suspend,
10634 .resume = bnx2x_resume,
10635 .err_handler = &bnx2x_err_handler,
10638 static int __init bnx2x_init(void)
10640 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10641 if (bnx2x_wq == NULL) {
10642 printk(KERN_ERR PFX "Cannot create workqueue\n");
10646 return pci_register_driver(&bnx2x_pci_driver);
10649 static void __exit bnx2x_cleanup(void)
10651 pci_unregister_driver(&bnx2x_pci_driver);
10653 destroy_workqueue(bnx2x_wq);
10656 module_init(bnx2x_init);
10657 module_exit(bnx2x_cleanup);