1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig and gpio3 attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1100 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1103 u16 last_max = fp->last_max_sge;
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1109 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1123 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1126 struct bnx2x *bp = fp->bp;
1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
1130 u16 last_max, last_elem, first_elem;
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1176 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1189 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217 #ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219 #ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224 fp->tpa_queue_used);
1228 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1233 struct sw_rx_page *rx_pg, old_rx_pg;
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1242 /* This is needed in order to enable forwarding support */
1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1245 max(frag_size, (u32)len_on_bd));
1247 #ifdef BNX2X_STOP_ON_ERROR
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1266 rx_pg = &fp->rx_page_ring[sge_idx];
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
1273 fp->eth_q_stats.rx_skb_alloc_failed++;
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1288 frag_size -= frag_len;
1294 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1309 if (likely(new_skb)) {
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1321 prefetch(((char *)(skb)) + 128);
1323 #ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1333 skb_reserve(skb, pad);
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 iph = (struct iphdr *)skb->data;
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1363 netif_receive_skb(skb);
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1375 /* else drop the packet and keep the buffer in the bin */
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
1378 fp->eth_q_stats.rx_skb_alloc_failed++;
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1384 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1389 struct ustorm_eth_rx_producers rx_prods = {0};
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1410 ((u32 *)&rx_prods)[i]);
1412 mmiowb(); /* keep prod updates ordered */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1419 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421 struct bnx2x *bp = fp->bp;
1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1426 #ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
1439 bd_prod_fw = bd_prod;
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1452 while (sw_comp_cons != hw_comp_cons) {
1453 struct sw_rx_bd *rx_buf = NULL;
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1473 /* is this a slowpath msg? */
1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1475 bnx2x_sp_event(fp, cqe);
1478 /* this is an rx packet */
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
1490 u16 queue = cqe->fast_path_cqe.queue_index;
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1497 bnx2x_tpa_start(fp, queue, skb,
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1511 /* This is a size of the linear data
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517 #ifdef BNX2X_STOP_ON_ERROR
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1533 prefetch(((char *)(skb)) + 128);
1535 /* is this an error packet? */
1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1537 DP(NETIF_MSG_RX_ERR,
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
1540 fp->eth_q_stats.rx_err_discard_pkt++;
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1551 new_skb = netdev_alloc_skb(bp->dev,
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
1555 "ERROR packet dropped "
1556 "because of alloc failure\n");
1557 fp->eth_q_stats.rx_skb_alloc_failed++;
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1580 DP(NETIF_MSG_RX_ERR,
1581 "ERROR packet dropped because "
1582 "of alloc failure\n");
1583 fp->eth_q_stats.rx_skb_alloc_failed++;
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1591 skb->ip_summed = CHECKSUM_NONE;
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
1596 fp->eth_q_stats.hw_csum_err++;
1600 skb_record_rx_queue(skb, fp->index);
1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1609 netif_receive_skb(skb);
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1623 if (rx_pkt == budget)
1627 fp->rx_bd_cons = bd_cons;
1628 fp->rx_bd_prod = bd_prod_fw;
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1636 fp->rx_pkt += rx_pkt;
1642 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
1646 int index = FP_IDX(fp);
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1658 #ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668 napi_schedule(&bnx2x_fp(bp, index, napi));
1673 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675 struct bnx2x *bp = netdev_priv(dev_instance);
1676 u16 status = bnx2x_ack_int(bp);
1679 /* Return here if interrupt is shared and it's not for us */
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1686 /* Return here if interrupt is disabled */
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1692 #ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
1712 if (unlikely(status & 0x1)) {
1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 /* end of fast path */
1729 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1734 * General service functions
1737 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1740 u32 resource_bit = (1 << resource);
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1760 /* Validating that the resource is not already taken */
1761 lock_status = REG_RD(bp, hw_lock_control_reg);
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
1770 /* Try to acquire the lock */
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
1773 if (lock_status & resource_bit)
1778 DP(NETIF_MSG_HW, "Timeout\n");
1782 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1785 u32 resource_bit = (1 << resource);
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1804 /* Validating that the resource is currently taken */
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
1816 /* HW Lock for shared dual port PHYs */
1817 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1819 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1821 mutex_lock(&bp->port.phy_mutex);
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1828 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1830 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1832 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1833 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1834 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1836 mutex_unlock(&bp->port.phy_mutex);
1839 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1841 /* The GPIO should be swapped if swap register is set and active */
1842 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1843 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1844 int gpio_shift = gpio_num +
1845 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1846 u32 gpio_mask = (1 << gpio_shift);
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1855 /* read GPIO value */
1856 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1858 /* get the requested pin value */
1859 if ((gpio_reg & gpio_mask) == gpio_mask)
1864 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1869 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1871 /* The GPIO should be swapped if swap register is set and active */
1872 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1873 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1874 int gpio_shift = gpio_num +
1875 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1876 u32 gpio_mask = (1 << gpio_shift);
1879 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1880 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1885 /* read GPIO and mask except the float bits */
1886 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1889 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1890 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1891 gpio_num, gpio_shift);
1892 /* clear FLOAT and set CLR */
1893 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1894 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1897 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1898 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1899 gpio_num, gpio_shift);
1900 /* clear FLOAT and set SET */
1901 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1902 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1905 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1907 gpio_num, gpio_shift);
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1916 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1922 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1924 /* The GPIO should be swapped if swap register is set and active */
1925 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1926 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1927 int gpio_shift = gpio_num +
1928 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1929 u32 gpio_mask = (1 << gpio_shift);
1932 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1933 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1939 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1942 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1943 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1944 "output low\n", gpio_num, gpio_shift);
1945 /* clear SET and set CLR */
1946 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1950 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1951 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1952 "output high\n", gpio_num, gpio_shift);
1953 /* clear CLR and set SET */
1954 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1955 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1962 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1963 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1968 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1970 u32 spio_mask = (1 << spio_num);
1973 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1974 (spio_num > MISC_REGISTERS_SPIO_7)) {
1975 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1979 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1980 /* read SPIO and mask except the float bits */
1981 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1984 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1985 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1986 /* clear FLOAT and set CLR */
1987 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1988 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1991 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1992 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1993 /* clear FLOAT and set SET */
1994 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1998 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1999 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2001 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2008 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2009 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2014 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2016 switch (bp->link_vars.ieee_fc &
2017 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2018 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2019 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2022 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2023 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2026 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2027 bp->port.advertising |= ADVERTISED_Asym_Pause;
2030 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2036 static void bnx2x_link_report(struct bnx2x *bp)
2038 if (bp->link_vars.link_up) {
2039 if (bp->state == BNX2X_STATE_OPEN)
2040 netif_carrier_on(bp->dev);
2041 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2043 printk("%d Mbps ", bp->link_vars.line_speed);
2045 if (bp->link_vars.duplex == DUPLEX_FULL)
2046 printk("full duplex");
2048 printk("half duplex");
2050 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2051 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2052 printk(", receive ");
2053 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2054 printk("& transmit ");
2056 printk(", transmit ");
2058 printk("flow control ON");
2062 } else { /* link_down */
2063 netif_carrier_off(bp->dev);
2064 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2068 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2070 if (!BP_NOMCP(bp)) {
2073 /* Initialize link parameters structure variables */
2074 /* It is recommended to turn off RX FC for jumbo frames
2075 for better performance */
2077 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2078 else if (bp->dev->mtu > 5000)
2079 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2081 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2083 bnx2x_acquire_phy_lock(bp);
2084 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2085 bnx2x_release_phy_lock(bp);
2087 bnx2x_calc_fc_adv(bp);
2089 if (bp->link_vars.link_up)
2090 bnx2x_link_report(bp);
2095 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2099 static void bnx2x_link_set(struct bnx2x *bp)
2101 if (!BP_NOMCP(bp)) {
2102 bnx2x_acquire_phy_lock(bp);
2103 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2104 bnx2x_release_phy_lock(bp);
2106 bnx2x_calc_fc_adv(bp);
2108 BNX2X_ERR("Bootcode is missing -not setting link\n");
2111 static void bnx2x__link_reset(struct bnx2x *bp)
2113 if (!BP_NOMCP(bp)) {
2114 bnx2x_acquire_phy_lock(bp);
2115 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2116 bnx2x_release_phy_lock(bp);
2118 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2121 static u8 bnx2x_link_test(struct bnx2x *bp)
2125 bnx2x_acquire_phy_lock(bp);
2126 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2127 bnx2x_release_phy_lock(bp);
2132 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2134 u32 r_param = bp->link_vars.line_speed / 8;
2135 u32 fair_periodic_timeout_usec;
2138 memset(&(bp->cmng.rs_vars), 0,
2139 sizeof(struct rate_shaping_vars_per_port));
2140 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2142 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2143 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2145 /* this is the threshold below which no timer arming will occur
2146 1.25 coefficient is for the threshold to be a little bigger
2147 than the real time, to compensate for timer in-accuracy */
2148 bp->cmng.rs_vars.rs_threshold =
2149 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2151 /* resolution of fairness timer */
2152 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2153 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2154 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2156 /* this is the threshold below which we won't arm the timer anymore */
2157 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2159 /* we multiply by 1e3/8 to get bytes/msec.
2160 We don't want the credits to pass a credit
2161 of the t_fair*FAIR_MEM (algorithm resolution) */
2162 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2163 /* since each tick is 4 usec */
2164 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2167 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2169 struct rate_shaping_vars_per_vn m_rs_vn;
2170 struct fairness_vars_per_vn m_fair_vn;
2171 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2172 u16 vn_min_rate, vn_max_rate;
2175 /* If function is hidden - set min and max to zeroes */
2176 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2181 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2182 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2183 /* If fairness is enabled (not all min rates are zeroes) and
2184 if current min rate is zero - set it to 1.
2185 This is a requirement of the algorithm. */
2186 if (bp->vn_weight_sum && (vn_min_rate == 0))
2187 vn_min_rate = DEF_MIN_RATE;
2188 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2189 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2193 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2194 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2196 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2197 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2199 /* global vn counter - maximal Mbps for this vn */
2200 m_rs_vn.vn_counter.rate = vn_max_rate;
2202 /* quota - number of bytes transmitted in this period */
2203 m_rs_vn.vn_counter.quota =
2204 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2206 if (bp->vn_weight_sum) {
2207 /* credit for each period of the fairness algorithm:
2208 number of bytes in T_FAIR (the vn share the port rate).
2209 vn_weight_sum should not be larger than 10000, thus
2210 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2212 m_fair_vn.vn_credit_delta =
2213 max((u32)(vn_min_rate * (T_FAIR_COEF /
2214 (8 * bp->vn_weight_sum))),
2215 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2216 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2217 m_fair_vn.vn_credit_delta);
2220 /* Store it to internal memory */
2221 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2222 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2224 ((u32 *)(&m_rs_vn))[i]);
2226 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_fair_vn))[i]);
2233 /* This function is called upon link interrupt */
2234 static void bnx2x_link_attn(struct bnx2x *bp)
2236 /* Make sure that we are synced with the current statistics */
2237 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2239 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2241 if (bp->link_vars.link_up) {
2243 /* dropless flow control */
2244 if (CHIP_IS_E1H(bp)) {
2245 int port = BP_PORT(bp);
2246 u32 pause_enabled = 0;
2248 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2251 REG_WR(bp, BAR_USTRORM_INTMEM +
2252 USTORM_PAUSE_ENABLED_OFFSET(port),
2256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2257 struct host_port_stats *pstats;
2259 pstats = bnx2x_sp(bp, port_stats);
2260 /* reset old bmac stats */
2261 memset(&(pstats->mac_stx[0]), 0,
2262 sizeof(struct mac_stx));
2264 if ((bp->state == BNX2X_STATE_OPEN) ||
2265 (bp->state == BNX2X_STATE_DISABLED))
2266 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269 /* indicate link status */
2270 bnx2x_link_report(bp);
2273 int port = BP_PORT(bp);
2277 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2278 if (vn == BP_E1HVN(bp))
2281 func = ((vn << 1) | port);
2283 /* Set the attention towards other drivers
2285 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2286 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2289 if (bp->link_vars.link_up) {
2292 /* Init rate shaping and fairness contexts */
2293 bnx2x_init_port_minmax(bp);
2295 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2296 bnx2x_init_vn_minmax(bp, 2*vn + port);
2298 /* Store it to internal memory */
2300 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2301 REG_WR(bp, BAR_XSTRORM_INTMEM +
2302 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2303 ((u32 *)(&bp->cmng))[i]);
2308 static void bnx2x__link_status_update(struct bnx2x *bp)
2310 if (bp->state != BNX2X_STATE_OPEN)
2313 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2315 if (bp->link_vars.link_up)
2316 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2318 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2320 /* indicate link status */
2321 bnx2x_link_report(bp);
2324 static void bnx2x_pmf_update(struct bnx2x *bp)
2326 int port = BP_PORT(bp);
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2332 /* enable nig attention */
2333 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2334 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2335 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2337 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2345 * General service functions
2348 /* the slow path queue is odd since completions arrive on the fastpath ring */
2349 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2350 u32 data_hi, u32 data_lo, int common)
2352 int func = BP_FUNC(bp);
2354 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2355 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2356 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2357 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2358 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2360 #ifdef BNX2X_STOP_ON_ERROR
2361 if (unlikely(bp->panic))
2365 spin_lock_bh(&bp->spq_lock);
2367 if (!bp->spq_left) {
2368 BNX2X_ERR("BUG! SPQ ring full!\n");
2369 spin_unlock_bh(&bp->spq_lock);
2374 /* CID needs port number to be encoded int it */
2375 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2376 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2378 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2380 bp->spq_prod_bd->hdr.type |=
2381 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2383 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2384 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2388 if (bp->spq_prod_bd == bp->spq_last_bd) {
2389 bp->spq_prod_bd = bp->spq;
2390 bp->spq_prod_idx = 0;
2391 DP(NETIF_MSG_TIMER, "end of spq\n");
2398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2401 spin_unlock_bh(&bp->spq_lock);
2405 /* acquire split MCP access lock register */
2406 static int bnx2x_acquire_alr(struct bnx2x *bp)
2413 for (j = 0; j < i*10; j++) {
2415 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2416 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2417 if (val & (1L << 31))
2422 if (!(val & (1L << 31))) {
2423 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2430 /* release split MCP access lock register */
2431 static void bnx2x_release_alr(struct bnx2x *bp)
2435 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2440 struct host_def_status_block *def_sb = bp->def_status_blk;
2443 barrier(); /* status block is written to by the chip */
2444 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2445 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2448 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2449 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2452 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2453 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2456 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2457 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2460 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2461 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2468 * slow path service functions
2471 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2473 int port = BP_PORT(bp);
2474 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2475 COMMAND_REG_ATTN_BITS_SET);
2476 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2478 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2479 NIG_REG_MASK_INTERRUPT_PORT0;
2483 if (bp->attn_state & asserted)
2484 BNX2X_ERR("IGU ERROR\n");
2486 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2487 aeu_mask = REG_RD(bp, aeu_addr);
2489 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2490 aeu_mask, asserted);
2491 aeu_mask &= ~(asserted & 0xff);
2492 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2494 REG_WR(bp, aeu_addr, aeu_mask);
2495 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2498 bp->attn_state |= asserted;
2499 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2501 if (asserted & ATTN_HARD_WIRED_MASK) {
2502 if (asserted & ATTN_NIG_FOR_FUNC) {
2504 bnx2x_acquire_phy_lock(bp);
2506 /* save nig interrupt mask */
2507 nig_mask = REG_RD(bp, nig_int_mask_addr);
2508 REG_WR(bp, nig_int_mask_addr, 0);
2510 bnx2x_link_attn(bp);
2512 /* handle unicore attn? */
2514 if (asserted & ATTN_SW_TIMER_4_FUNC)
2515 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2517 if (asserted & GPIO_2_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2520 if (asserted & GPIO_3_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2523 if (asserted & GPIO_4_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2527 if (asserted & ATTN_GENERAL_ATTN_1) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2531 if (asserted & ATTN_GENERAL_ATTN_2) {
2532 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2535 if (asserted & ATTN_GENERAL_ATTN_3) {
2536 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2537 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2540 if (asserted & ATTN_GENERAL_ATTN_4) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2544 if (asserted & ATTN_GENERAL_ATTN_5) {
2545 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2546 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2548 if (asserted & ATTN_GENERAL_ATTN_6) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554 } /* if hardwired */
2556 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2558 REG_WR(bp, hc_addr, asserted);
2560 /* now set back the mask */
2561 if (asserted & ATTN_NIG_FOR_FUNC) {
2562 REG_WR(bp, nig_int_mask_addr, nig_mask);
2563 bnx2x_release_phy_lock(bp);
2567 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2569 int port = BP_PORT(bp);
2573 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2574 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2576 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2578 val = REG_RD(bp, reg_offset);
2579 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2580 REG_WR(bp, reg_offset, val);
2582 BNX2X_ERR("SPIO5 hw attention\n");
2584 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2586 /* Fan failure attention */
2588 /* The PHY reset is controlled by GPIO 1 */
2589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2590 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2591 /* Low power mode is controlled by GPIO 2 */
2592 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2593 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2594 /* mark the failure */
2595 bp->link_params.ext_phy_config &=
2596 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2597 bp->link_params.ext_phy_config |=
2598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2600 dev_info.port_hw_config[port].
2601 external_phy_config,
2602 bp->link_params.ext_phy_config);
2603 /* log the failure */
2604 printk(KERN_ERR PFX "Fan Failure on Network"
2605 " Controller %s has caused the driver to"
2606 " shutdown the card to prevent permanent"
2607 " damage. Please contact Dell Support for"
2608 " assistance\n", bp->dev->name);
2616 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2620 REG_WR(bp, reg_offset, val);
2622 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_0));
2628 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2632 if (attn & BNX2X_DOORQ_ASSERT) {
2634 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2635 BNX2X_ERR("DB hw attention 0x%x\n", val);
2636 /* DORQ discard attention */
2638 BNX2X_ERR("FATAL error from DORQ\n");
2641 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2643 int port = BP_PORT(bp);
2646 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2647 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2649 val = REG_RD(bp, reg_offset);
2650 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2651 REG_WR(bp, reg_offset, val);
2653 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2654 (attn & HW_INTERRUT_ASSERT_SET_1));
2659 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2663 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2665 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2666 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2667 /* CFC error attention */
2669 BNX2X_ERR("FATAL error from CFC\n");
2672 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2674 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2675 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2676 /* RQ_USDMDP_FIFO_OVERFLOW */
2678 BNX2X_ERR("FATAL error from PXP\n");
2681 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2683 int port = BP_PORT(bp);
2686 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2687 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2689 val = REG_RD(bp, reg_offset);
2690 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2691 REG_WR(bp, reg_offset, val);
2693 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2694 (attn & HW_INTERRUT_ASSERT_SET_2));
2699 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2703 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2705 if (attn & BNX2X_PMF_LINK_ASSERT) {
2706 int func = BP_FUNC(bp);
2708 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2709 bnx2x__link_status_update(bp);
2710 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2712 bnx2x_pmf_update(bp);
2714 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2716 BNX2X_ERR("MC assert!\n");
2717 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2723 } else if (attn & BNX2X_MCP_ASSERT) {
2725 BNX2X_ERR("MCP assert!\n");
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2730 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2733 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2734 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2735 if (attn & BNX2X_GRC_TIMEOUT) {
2736 val = CHIP_IS_E1H(bp) ?
2737 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2738 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2740 if (attn & BNX2X_GRC_RSV) {
2741 val = CHIP_IS_E1H(bp) ?
2742 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2743 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2745 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2749 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2751 struct attn_route attn;
2752 struct attn_route group_mask;
2753 int port = BP_PORT(bp);
2759 /* need to take HW lock because MCP or other port might also
2760 try to handle this event */
2761 bnx2x_acquire_alr(bp);
2763 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2764 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2765 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2766 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2767 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2768 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2770 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2771 if (deasserted & (1 << index)) {
2772 group_mask = bp->attn_group[index];
2774 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2775 index, group_mask.sig[0], group_mask.sig[1],
2776 group_mask.sig[2], group_mask.sig[3]);
2778 bnx2x_attn_int_deasserted3(bp,
2779 attn.sig[3] & group_mask.sig[3]);
2780 bnx2x_attn_int_deasserted1(bp,
2781 attn.sig[1] & group_mask.sig[1]);
2782 bnx2x_attn_int_deasserted2(bp,
2783 attn.sig[2] & group_mask.sig[2]);
2784 bnx2x_attn_int_deasserted0(bp,
2785 attn.sig[0] & group_mask.sig[0]);
2787 if ((attn.sig[0] & group_mask.sig[0] &
2788 HW_PRTY_ASSERT_SET_0) ||
2789 (attn.sig[1] & group_mask.sig[1] &
2790 HW_PRTY_ASSERT_SET_1) ||
2791 (attn.sig[2] & group_mask.sig[2] &
2792 HW_PRTY_ASSERT_SET_2))
2793 BNX2X_ERR("FATAL HW block parity attention\n");
2797 bnx2x_release_alr(bp);
2799 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2802 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2804 REG_WR(bp, reg_addr, val);
2806 if (~bp->attn_state & deasserted)
2807 BNX2X_ERR("IGU ERROR\n");
2809 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2810 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2812 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2813 aeu_mask = REG_RD(bp, reg_addr);
2815 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2816 aeu_mask, deasserted);
2817 aeu_mask |= (deasserted & 0xff);
2818 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2820 REG_WR(bp, reg_addr, aeu_mask);
2821 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2823 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2824 bp->attn_state &= ~deasserted;
2825 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2828 static void bnx2x_attn_int(struct bnx2x *bp)
2830 /* read local copy of bits */
2831 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2833 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2835 u32 attn_state = bp->attn_state;
2837 /* look for changed bits */
2838 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2839 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2842 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2843 attn_bits, attn_ack, asserted, deasserted);
2845 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2846 BNX2X_ERR("BAD attention state\n");
2848 /* handle bits that were raised */
2850 bnx2x_attn_int_asserted(bp, asserted);
2853 bnx2x_attn_int_deasserted(bp, deasserted);
2856 static void bnx2x_sp_task(struct work_struct *work)
2858 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2862 /* Return here if interrupt is disabled */
2863 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2864 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2868 status = bnx2x_update_dsb_idx(bp);
2869 /* if (status == 0) */
2870 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2872 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2878 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2880 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2882 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2884 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2886 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2891 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2893 struct net_device *dev = dev_instance;
2894 struct bnx2x *bp = netdev_priv(dev);
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2902 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2904 #ifdef BNX2X_STOP_ON_ERROR
2905 if (unlikely(bp->panic))
2909 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2914 /* end of slow path */
2918 /****************************************************************************
2920 ****************************************************************************/
2922 /* sum[hi:lo] += add[hi:lo] */
2923 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2926 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2929 /* difference = minuend - subtrahend */
2930 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2932 if (m_lo < s_lo) { \
2934 d_hi = m_hi - s_hi; \
2936 /* we can 'loan' 1 */ \
2938 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2940 /* m_hi <= s_hi */ \
2945 /* m_lo >= s_lo */ \
2946 if (m_hi < s_hi) { \
2950 /* m_hi >= s_hi */ \
2951 d_hi = m_hi - s_hi; \
2952 d_lo = m_lo - s_lo; \
2957 #define UPDATE_STAT64(s, t) \
2959 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2960 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2961 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2962 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2963 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2964 pstats->mac_stx[1].t##_lo, diff.lo); \
2967 #define UPDATE_STAT64_NIG(s, t) \
2969 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2970 diff.lo, new->s##_lo, old->s##_lo); \
2971 ADD_64(estats->t##_hi, diff.hi, \
2972 estats->t##_lo, diff.lo); \
2975 /* sum[hi:lo] += add */
2976 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2979 s_hi += (s_lo < a) ? 1 : 0; \
2982 #define UPDATE_EXTEND_STAT(s) \
2984 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2985 pstats->mac_stx[1].s##_lo, \
2989 #define UPDATE_EXTEND_TSTAT(s, t) \
2991 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2992 old_tclient->s = le32_to_cpu(tclient->s); \
2993 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2996 #define UPDATE_EXTEND_USTAT(s, t) \
2998 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2999 old_uclient->s = uclient->s; \
3000 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3003 #define UPDATE_EXTEND_XSTAT(s, t) \
3005 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3006 old_xclient->s = le32_to_cpu(xclient->s); \
3007 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3010 /* minuend -= subtrahend */
3011 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3013 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3016 /* minuend[hi:lo] -= subtrahend */
3017 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3019 SUB_64(m_hi, 0, m_lo, s); \
3022 #define SUB_EXTEND_USTAT(s, t) \
3024 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3025 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3029 * General service functions
3032 static inline long bnx2x_hilo(u32 *hiref)
3034 u32 lo = *(hiref + 1);
3035 #if (BITS_PER_LONG == 64)
3038 return HILO_U64(hi, lo);
3045 * Init service functions
3048 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050 if (!bp->stats_pending) {
3051 struct eth_query_ramrod_data ramrod_data = {0};
3054 ramrod_data.drv_counter = bp->stats_counter++;
3055 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3056 for_each_queue(bp, i)
3057 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3063 /* stats ramrod has it's own slot on the spq */
3065 bp->stats_pending = 1;
3070 static void bnx2x_stats_init(struct bnx2x *bp)
3072 int port = BP_PORT(bp);
3075 bp->stats_pending = 0;
3076 bp->executer_idx = 0;
3077 bp->stats_counter = 0;
3081 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3083 bp->port.port_stx = 0;
3084 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3086 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3087 bp->port.old_nig_stats.brb_discard =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3089 bp->port.old_nig_stats.brb_truncate =
3090 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3093 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3094 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3096 /* function stats */
3097 for_each_queue(bp, i) {
3098 struct bnx2x_fastpath *fp = &bp->fp[i];
3100 memset(&fp->old_tclient, 0,
3101 sizeof(struct tstorm_per_client_stats));
3102 memset(&fp->old_uclient, 0,
3103 sizeof(struct ustorm_per_client_stats));
3104 memset(&fp->old_xclient, 0,
3105 sizeof(struct xstorm_per_client_stats));
3106 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3109 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3110 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3112 bp->stats_state = STATS_STATE_DISABLED;
3113 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3114 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3117 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3119 struct dmae_command *dmae = &bp->stats_dmae;
3120 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122 *stats_comp = DMAE_COMP_VAL;
3123 if (CHIP_REV_IS_SLOW(bp))
3127 if (bp->executer_idx) {
3128 int loader_idx = PMF_DMAE_C(bp);
3130 memset(dmae, 0, sizeof(struct dmae_command));
3132 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3133 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3134 DMAE_CMD_DST_RESET |
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3144 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3145 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3146 sizeof(struct dmae_command) *
3147 (loader_idx + 1)) >> 2;
3148 dmae->dst_addr_hi = 0;
3149 dmae->len = sizeof(struct dmae_command) >> 2;
3152 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3153 dmae->comp_addr_hi = 0;
3157 bnx2x_post_dmae(bp, dmae, loader_idx);
3159 } else if (bp->func_stx) {
3161 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3165 static int bnx2x_stats_comp(struct bnx2x *bp)
3167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3171 while (*stats_comp != DMAE_COMP_VAL) {
3173 BNX2X_ERR("timeout waiting for stats finished\n");
3183 * Statistics service functions
3186 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3188 struct dmae_command *dmae;
3190 int loader_idx = PMF_DMAE_C(bp);
3191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3194 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3195 BNX2X_ERR("BUG!\n");
3199 bp->executer_idx = 0;
3201 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3207 DMAE_CMD_ENDIANITY_DW_SWAP |
3209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3212 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3214 dmae->src_addr_lo = bp->port.port_stx >> 2;
3215 dmae->src_addr_hi = 0;
3216 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3217 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->len = DMAE_LEN32_RD_MAX;
3219 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220 dmae->comp_addr_hi = 0;
3223 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3224 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3225 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3226 dmae->src_addr_hi = 0;
3227 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3228 DMAE_LEN32_RD_MAX * 4);
3229 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3230 DMAE_LEN32_RD_MAX * 4);
3231 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3232 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3233 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3234 dmae->comp_val = DMAE_COMP_VAL;
3237 bnx2x_hw_stats_post(bp);
3238 bnx2x_stats_comp(bp);
3241 static void bnx2x_port_stats_init(struct bnx2x *bp)
3243 struct dmae_command *dmae;
3244 int port = BP_PORT(bp);
3245 int vn = BP_E1HVN(bp);
3247 int loader_idx = PMF_DMAE_C(bp);
3249 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3252 if (!bp->link_vars.link_up || !bp->port.pmf) {
3253 BNX2X_ERR("BUG!\n");
3257 bp->executer_idx = 0;
3260 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3261 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3266 DMAE_CMD_ENDIANITY_DW_SWAP |
3268 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3269 (vn << DMAE_CMD_E1HVN_SHIFT));
3271 if (bp->port.port_stx) {
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3278 dmae->dst_addr_hi = 0;
3279 dmae->len = sizeof(struct host_port_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3287 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3288 dmae->opcode = opcode;
3289 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3290 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3291 dmae->dst_addr_lo = bp->func_stx >> 2;
3292 dmae->dst_addr_hi = 0;
3293 dmae->len = sizeof(struct host_func_stats) >> 2;
3294 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3295 dmae->comp_addr_hi = 0;
3300 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3301 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3302 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 DMAE_CMD_ENDIANITY_DW_SWAP |
3308 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3309 (vn << DMAE_CMD_E1HVN_SHIFT));
3311 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3313 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3314 NIG_REG_INGRESS_BMAC0_MEM);
3316 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3317 BIGMAC_REGISTER_TX_STAT_GTBYT */
3318 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3319 dmae->opcode = opcode;
3320 dmae->src_addr_lo = (mac_addr +
3321 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3322 dmae->src_addr_hi = 0;
3323 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3324 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3325 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3326 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328 dmae->comp_addr_hi = 0;
3331 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3332 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3333 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3334 dmae->opcode = opcode;
3335 dmae->src_addr_lo = (mac_addr +
3336 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3337 dmae->src_addr_hi = 0;
3338 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3339 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3341 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3342 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3343 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3348 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3350 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3352 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = opcode;
3355 dmae->src_addr_lo = (mac_addr +
3356 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3357 dmae->src_addr_hi = 0;
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3365 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (mac_addr +
3369 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3372 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3374 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3380 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3382 dmae->opcode = opcode;
3383 dmae->src_addr_lo = (mac_addr +
3384 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3385 dmae->src_addr_hi = 0;
3386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3387 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3389 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3390 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3398 dmae->opcode = opcode;
3399 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3400 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3401 dmae->src_addr_hi = 0;
3402 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3404 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = opcode;
3411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3412 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3413 dmae->src_addr_hi = 0;
3414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3415 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3416 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3417 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3418 dmae->len = (2*sizeof(u32)) >> 2;
3419 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3420 dmae->comp_addr_hi = 0;
3423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3425 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3426 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3428 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3430 DMAE_CMD_ENDIANITY_DW_SWAP |
3432 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3433 (vn << DMAE_CMD_E1HVN_SHIFT));
3434 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3435 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3436 dmae->src_addr_hi = 0;
3437 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3438 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3439 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3440 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3441 dmae->len = (2*sizeof(u32)) >> 2;
3442 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3443 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3444 dmae->comp_val = DMAE_COMP_VAL;
3449 static void bnx2x_func_stats_init(struct bnx2x *bp)
3451 struct dmae_command *dmae = &bp->stats_dmae;
3452 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3455 if (!bp->func_stx) {
3456 BNX2X_ERR("BUG!\n");
3460 bp->executer_idx = 0;
3461 memset(dmae, 0, sizeof(struct dmae_command));
3463 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3464 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3465 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469 DMAE_CMD_ENDIANITY_DW_SWAP |
3471 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3472 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3473 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3474 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3475 dmae->dst_addr_lo = bp->func_stx >> 2;
3476 dmae->dst_addr_hi = 0;
3477 dmae->len = sizeof(struct host_func_stats) >> 2;
3478 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3479 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3480 dmae->comp_val = DMAE_COMP_VAL;
3485 static void bnx2x_stats_start(struct bnx2x *bp)
3488 bnx2x_port_stats_init(bp);
3490 else if (bp->func_stx)
3491 bnx2x_func_stats_init(bp);
3493 bnx2x_hw_stats_post(bp);
3494 bnx2x_storm_stats_post(bp);
3497 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3499 bnx2x_stats_comp(bp);
3500 bnx2x_stats_pmf_update(bp);
3501 bnx2x_stats_start(bp);
3504 static void bnx2x_stats_restart(struct bnx2x *bp)
3506 bnx2x_stats_comp(bp);
3507 bnx2x_stats_start(bp);
3510 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3512 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3513 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3514 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3515 struct regpair diff;
3517 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3518 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3519 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3520 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3521 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3522 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3523 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3524 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3526 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3527 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3528 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3529 UPDATE_STAT64(tx_stat_gt127,
3530 tx_stat_etherstatspkts65octetsto127octets);
3531 UPDATE_STAT64(tx_stat_gt255,
3532 tx_stat_etherstatspkts128octetsto255octets);
3533 UPDATE_STAT64(tx_stat_gt511,
3534 tx_stat_etherstatspkts256octetsto511octets);
3535 UPDATE_STAT64(tx_stat_gt1023,
3536 tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_STAT64(tx_stat_gt1518,
3538 tx_stat_etherstatspkts1024octetsto1522octets);
3539 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3540 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3541 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3542 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3543 UPDATE_STAT64(tx_stat_gterr,
3544 tx_stat_dot3statsinternalmactransmiterrors);
3545 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3547 estats->pause_frames_received_hi =
3548 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3549 estats->pause_frames_received_lo =
3550 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3552 estats->pause_frames_sent_hi =
3553 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3554 estats->pause_frames_sent_lo =
3555 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3558 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3560 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3561 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3562 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3564 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3565 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3566 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3567 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3568 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3569 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3570 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3571 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3572 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3573 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3574 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3575 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3576 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3577 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3578 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3579 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3580 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3581 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3582 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3583 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3584 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3585 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3586 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3587 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3589 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3590 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3591 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3592 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3594 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3596 estats->pause_frames_received_hi =
3597 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3598 estats->pause_frames_received_lo =
3599 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3600 ADD_64(estats->pause_frames_received_hi,
3601 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3602 estats->pause_frames_received_lo,
3603 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3605 estats->pause_frames_sent_hi =
3606 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3607 estats->pause_frames_sent_lo =
3608 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3609 ADD_64(estats->pause_frames_sent_hi,
3610 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3611 estats->pause_frames_sent_lo,
3612 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3615 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3617 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3618 struct nig_stats *old = &(bp->port.old_nig_stats);
3619 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3620 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3621 struct regpair diff;
3624 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3625 bnx2x_bmac_stats_update(bp);
3627 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3628 bnx2x_emac_stats_update(bp);
3630 else { /* unreached */
3631 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3635 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3636 new->brb_discard - old->brb_discard);
3637 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3638 new->brb_truncate - old->brb_truncate);
3640 UPDATE_STAT64_NIG(egress_mac_pkt0,
3641 etherstatspkts1024octetsto1522octets);
3642 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3644 memcpy(old, new, sizeof(struct nig_stats));
3646 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3647 sizeof(struct mac_stx));
3648 estats->brb_drop_hi = pstats->brb_drop_hi;
3649 estats->brb_drop_lo = pstats->brb_drop_lo;
3651 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3653 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3654 if (nig_timer_max != estats->nig_timer_max) {
3655 estats->nig_timer_max = nig_timer_max;
3656 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3662 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3664 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3665 struct tstorm_per_port_stats *tport =
3666 &stats->tstorm_common.port_statistics;
3667 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3668 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3671 memset(&(fstats->total_bytes_received_hi), 0,
3672 sizeof(struct host_func_stats) - 2*sizeof(u32));
3673 estats->error_bytes_received_hi = 0;
3674 estats->error_bytes_received_lo = 0;
3675 estats->etherstatsoverrsizepkts_hi = 0;
3676 estats->etherstatsoverrsizepkts_lo = 0;
3677 estats->no_buff_discard_hi = 0;
3678 estats->no_buff_discard_lo = 0;
3680 for_each_queue(bp, i) {
3681 struct bnx2x_fastpath *fp = &bp->fp[i];
3682 int cl_id = fp->cl_id;
3683 struct tstorm_per_client_stats *tclient =
3684 &stats->tstorm_common.client_statistics[cl_id];
3685 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3686 struct ustorm_per_client_stats *uclient =
3687 &stats->ustorm_common.client_statistics[cl_id];
3688 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3689 struct xstorm_per_client_stats *xclient =
3690 &stats->xstorm_common.client_statistics[cl_id];
3691 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3692 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3695 /* are storm stats valid? */
3696 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3697 bp->stats_counter) {
3698 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3699 " xstorm counter (%d) != stats_counter (%d)\n",
3700 i, xclient->stats_counter, bp->stats_counter);
3703 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3704 bp->stats_counter) {
3705 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3706 " tstorm counter (%d) != stats_counter (%d)\n",
3707 i, tclient->stats_counter, bp->stats_counter);
3710 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3711 bp->stats_counter) {
3712 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3713 " ustorm counter (%d) != stats_counter (%d)\n",
3714 i, uclient->stats_counter, bp->stats_counter);
3718 qstats->total_bytes_received_hi =
3719 qstats->valid_bytes_received_hi =
3720 le32_to_cpu(tclient->total_rcv_bytes.hi);
3721 qstats->total_bytes_received_lo =
3722 qstats->valid_bytes_received_lo =
3723 le32_to_cpu(tclient->total_rcv_bytes.lo);
3725 qstats->error_bytes_received_hi =
3726 le32_to_cpu(tclient->rcv_error_bytes.hi);
3727 qstats->error_bytes_received_lo =
3728 le32_to_cpu(tclient->rcv_error_bytes.lo);
3730 ADD_64(qstats->total_bytes_received_hi,
3731 qstats->error_bytes_received_hi,
3732 qstats->total_bytes_received_lo,
3733 qstats->error_bytes_received_lo);
3735 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3736 total_unicast_packets_received);
3737 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3738 total_multicast_packets_received);
3739 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3740 total_broadcast_packets_received);
3741 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3742 etherstatsoverrsizepkts);
3743 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3745 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3746 total_unicast_packets_received);
3747 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3748 total_multicast_packets_received);
3749 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3750 total_broadcast_packets_received);
3751 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3752 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3753 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3755 qstats->total_bytes_transmitted_hi =
3756 le32_to_cpu(xclient->total_sent_bytes.hi);
3757 qstats->total_bytes_transmitted_lo =
3758 le32_to_cpu(xclient->total_sent_bytes.lo);
3760 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3761 total_unicast_packets_transmitted);
3762 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3763 total_multicast_packets_transmitted);
3764 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3765 total_broadcast_packets_transmitted);
3767 old_tclient->checksum_discard = tclient->checksum_discard;
3768 old_tclient->ttl0_discard = tclient->ttl0_discard;
3770 ADD_64(fstats->total_bytes_received_hi,
3771 qstats->total_bytes_received_hi,
3772 fstats->total_bytes_received_lo,
3773 qstats->total_bytes_received_lo);
3774 ADD_64(fstats->total_bytes_transmitted_hi,
3775 qstats->total_bytes_transmitted_hi,
3776 fstats->total_bytes_transmitted_lo,
3777 qstats->total_bytes_transmitted_lo);
3778 ADD_64(fstats->total_unicast_packets_received_hi,
3779 qstats->total_unicast_packets_received_hi,
3780 fstats->total_unicast_packets_received_lo,
3781 qstats->total_unicast_packets_received_lo);
3782 ADD_64(fstats->total_multicast_packets_received_hi,
3783 qstats->total_multicast_packets_received_hi,
3784 fstats->total_multicast_packets_received_lo,
3785 qstats->total_multicast_packets_received_lo);
3786 ADD_64(fstats->total_broadcast_packets_received_hi,
3787 qstats->total_broadcast_packets_received_hi,
3788 fstats->total_broadcast_packets_received_lo,
3789 qstats->total_broadcast_packets_received_lo);
3790 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3791 qstats->total_unicast_packets_transmitted_hi,
3792 fstats->total_unicast_packets_transmitted_lo,
3793 qstats->total_unicast_packets_transmitted_lo);
3794 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3795 qstats->total_multicast_packets_transmitted_hi,
3796 fstats->total_multicast_packets_transmitted_lo,
3797 qstats->total_multicast_packets_transmitted_lo);
3798 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3799 qstats->total_broadcast_packets_transmitted_hi,
3800 fstats->total_broadcast_packets_transmitted_lo,
3801 qstats->total_broadcast_packets_transmitted_lo);
3802 ADD_64(fstats->valid_bytes_received_hi,
3803 qstats->valid_bytes_received_hi,
3804 fstats->valid_bytes_received_lo,
3805 qstats->valid_bytes_received_lo);
3807 ADD_64(estats->error_bytes_received_hi,
3808 qstats->error_bytes_received_hi,
3809 estats->error_bytes_received_lo,
3810 qstats->error_bytes_received_lo);
3811 ADD_64(estats->etherstatsoverrsizepkts_hi,
3812 qstats->etherstatsoverrsizepkts_hi,
3813 estats->etherstatsoverrsizepkts_lo,
3814 qstats->etherstatsoverrsizepkts_lo);
3815 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3816 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3819 ADD_64(fstats->total_bytes_received_hi,
3820 estats->rx_stat_ifhcinbadoctets_hi,
3821 fstats->total_bytes_received_lo,
3822 estats->rx_stat_ifhcinbadoctets_lo);
3824 memcpy(estats, &(fstats->total_bytes_received_hi),
3825 sizeof(struct host_func_stats) - 2*sizeof(u32));
3827 ADD_64(estats->etherstatsoverrsizepkts_hi,
3828 estats->rx_stat_dot3statsframestoolong_hi,
3829 estats->etherstatsoverrsizepkts_lo,
3830 estats->rx_stat_dot3statsframestoolong_lo);
3831 ADD_64(estats->error_bytes_received_hi,
3832 estats->rx_stat_ifhcinbadoctets_hi,
3833 estats->error_bytes_received_lo,
3834 estats->rx_stat_ifhcinbadoctets_lo);
3837 estats->mac_filter_discard =
3838 le32_to_cpu(tport->mac_filter_discard);
3839 estats->xxoverflow_discard =
3840 le32_to_cpu(tport->xxoverflow_discard);
3841 estats->brb_truncate_discard =
3842 le32_to_cpu(tport->brb_truncate_discard);
3843 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3846 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3848 bp->stats_pending = 0;
3853 static void bnx2x_net_stats_update(struct bnx2x *bp)
3855 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3856 struct net_device_stats *nstats = &bp->dev->stats;
3859 nstats->rx_packets =
3860 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3861 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3862 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3864 nstats->tx_packets =
3865 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3866 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3867 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3869 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3871 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3873 nstats->rx_dropped = estats->mac_discard;
3874 for_each_queue(bp, i)
3875 nstats->rx_dropped +=
3876 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3878 nstats->tx_dropped = 0;
3881 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3883 nstats->collisions =
3884 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3886 nstats->rx_length_errors =
3887 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3888 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3889 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3890 bnx2x_hilo(&estats->brb_truncate_hi);
3891 nstats->rx_crc_errors =
3892 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3893 nstats->rx_frame_errors =
3894 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3895 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3896 nstats->rx_missed_errors = estats->xxoverflow_discard;
3898 nstats->rx_errors = nstats->rx_length_errors +
3899 nstats->rx_over_errors +
3900 nstats->rx_crc_errors +
3901 nstats->rx_frame_errors +
3902 nstats->rx_fifo_errors +
3903 nstats->rx_missed_errors;
3905 nstats->tx_aborted_errors =
3906 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3907 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3908 nstats->tx_carrier_errors =
3909 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3910 nstats->tx_fifo_errors = 0;
3911 nstats->tx_heartbeat_errors = 0;
3912 nstats->tx_window_errors = 0;
3914 nstats->tx_errors = nstats->tx_aborted_errors +
3915 nstats->tx_carrier_errors +
3916 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3919 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3921 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3924 estats->driver_xoff = 0;
3925 estats->rx_err_discard_pkt = 0;
3926 estats->rx_skb_alloc_failed = 0;
3927 estats->hw_csum_err = 0;
3928 for_each_queue(bp, i) {
3929 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3931 estats->driver_xoff += qstats->driver_xoff;
3932 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3933 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3934 estats->hw_csum_err += qstats->hw_csum_err;
3938 static void bnx2x_stats_update(struct bnx2x *bp)
3940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3942 if (*stats_comp != DMAE_COMP_VAL)
3946 bnx2x_hw_stats_update(bp);
3948 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3949 BNX2X_ERR("storm stats were not updated for 3 times\n");
3954 bnx2x_net_stats_update(bp);
3955 bnx2x_drv_stats_update(bp);
3957 if (bp->msglevel & NETIF_MSG_TIMER) {
3958 struct tstorm_per_client_stats *old_tclient =
3959 &bp->fp->old_tclient;
3960 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962 struct net_device_stats *nstats = &bp->dev->stats;
3965 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3966 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3968 bnx2x_tx_avail(bp->fp),
3969 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3970 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3972 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3973 bp->fp->rx_comp_cons),
3974 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3975 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3976 "brb truncate %u\n",
3977 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3978 qstats->driver_xoff,
3979 estats->brb_drop_lo, estats->brb_truncate_lo);
3980 printk(KERN_DEBUG "tstats: checksum_discard %u "
3981 "packets_too_big_discard %lu no_buff_discard %lu "
3982 "mac_discard %u mac_filter_discard %u "
3983 "xxovrflow_discard %u brb_truncate_discard %u "
3984 "ttl0_discard %u\n",
3985 old_tclient->checksum_discard,
3986 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3987 bnx2x_hilo(&qstats->no_buff_discard_hi),
3988 estats->mac_discard, estats->mac_filter_discard,
3989 estats->xxoverflow_discard, estats->brb_truncate_discard,
3990 old_tclient->ttl0_discard);
3992 for_each_queue(bp, i) {
3993 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3994 bnx2x_fp(bp, i, tx_pkt),
3995 bnx2x_fp(bp, i, rx_pkt),
3996 bnx2x_fp(bp, i, rx_calls));
4000 bnx2x_hw_stats_post(bp);
4001 bnx2x_storm_stats_post(bp);
4004 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4006 struct dmae_command *dmae;
4008 int loader_idx = PMF_DMAE_C(bp);
4009 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4011 bp->executer_idx = 0;
4013 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4015 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4017 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4019 DMAE_CMD_ENDIANITY_DW_SWAP |
4021 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4022 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4024 if (bp->port.port_stx) {
4026 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4028 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4030 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4031 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4032 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4033 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4034 dmae->dst_addr_hi = 0;
4035 dmae->len = sizeof(struct host_port_stats) >> 2;
4037 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4038 dmae->comp_addr_hi = 0;
4041 dmae->comp_addr_lo =
4042 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4043 dmae->comp_addr_hi =
4044 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4045 dmae->comp_val = DMAE_COMP_VAL;
4053 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4054 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4055 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4056 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4057 dmae->dst_addr_lo = bp->func_stx >> 2;
4058 dmae->dst_addr_hi = 0;
4059 dmae->len = sizeof(struct host_func_stats) >> 2;
4060 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4061 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4062 dmae->comp_val = DMAE_COMP_VAL;
4068 static void bnx2x_stats_stop(struct bnx2x *bp)
4072 bnx2x_stats_comp(bp);
4075 update = (bnx2x_hw_stats_update(bp) == 0);
4077 update |= (bnx2x_storm_stats_update(bp) == 0);
4080 bnx2x_net_stats_update(bp);
4083 bnx2x_port_stats_stop(bp);
4085 bnx2x_hw_stats_post(bp);
4086 bnx2x_stats_comp(bp);
4090 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4094 static const struct {
4095 void (*action)(struct bnx2x *bp);
4096 enum bnx2x_stats_state next_state;
4097 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4100 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4101 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4102 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4103 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4106 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4107 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4108 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4109 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4113 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4115 enum bnx2x_stats_state state = bp->stats_state;
4117 bnx2x_stats_stm[state][event].action(bp);
4118 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4120 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4121 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4122 state, event, bp->stats_state);
4125 static void bnx2x_timer(unsigned long data)
4127 struct bnx2x *bp = (struct bnx2x *) data;
4129 if (!netif_running(bp->dev))
4132 if (atomic_read(&bp->intr_sem) != 0)
4136 struct bnx2x_fastpath *fp = &bp->fp[0];
4139 bnx2x_tx_int(fp, 1000);
4140 rc = bnx2x_rx_int(fp, 1000);
4143 if (!BP_NOMCP(bp)) {
4144 int func = BP_FUNC(bp);
4148 ++bp->fw_drv_pulse_wr_seq;
4149 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4150 /* TBD - add SYSTEM_TIME */
4151 drv_pulse = bp->fw_drv_pulse_wr_seq;
4152 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4154 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4155 MCP_PULSE_SEQ_MASK);
4156 /* The delta between driver pulse and mcp response
4157 * should be 1 (before mcp response) or 0 (after mcp response)
4159 if ((drv_pulse != mcp_pulse) &&
4160 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4161 /* someone lost a heartbeat... */
4162 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4163 drv_pulse, mcp_pulse);
4167 if ((bp->state == BNX2X_STATE_OPEN) ||
4168 (bp->state == BNX2X_STATE_DISABLED))
4169 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4172 mod_timer(&bp->timer, jiffies + bp->current_interval);
4175 /* end of Statistics */
4180 * nic init service functions
4183 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4185 int port = BP_PORT(bp);
4187 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4188 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4189 sizeof(struct ustorm_status_block)/4);
4190 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4191 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4192 sizeof(struct cstorm_status_block)/4);
4195 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4196 dma_addr_t mapping, int sb_id)
4198 int port = BP_PORT(bp);
4199 int func = BP_FUNC(bp);
4204 section = ((u64)mapping) + offsetof(struct host_status_block,
4206 sb->u_status_block.status_block_id = sb_id;
4208 REG_WR(bp, BAR_USTRORM_INTMEM +
4209 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4210 REG_WR(bp, BAR_USTRORM_INTMEM +
4211 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4213 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4214 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4216 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4217 REG_WR16(bp, BAR_USTRORM_INTMEM +
4218 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4221 section = ((u64)mapping) + offsetof(struct host_status_block,
4223 sb->c_status_block.status_block_id = sb_id;
4225 REG_WR(bp, BAR_CSTRORM_INTMEM +
4226 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4227 REG_WR(bp, BAR_CSTRORM_INTMEM +
4228 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4233 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4237 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4240 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4242 int func = BP_FUNC(bp);
4244 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4245 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4246 sizeof(struct ustorm_def_status_block)/4);
4247 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4248 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4249 sizeof(struct cstorm_def_status_block)/4);
4250 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4251 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4252 sizeof(struct xstorm_def_status_block)/4);
4253 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4254 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4255 sizeof(struct tstorm_def_status_block)/4);
4258 static void bnx2x_init_def_sb(struct bnx2x *bp,
4259 struct host_def_status_block *def_sb,
4260 dma_addr_t mapping, int sb_id)
4262 int port = BP_PORT(bp);
4263 int func = BP_FUNC(bp);
4264 int index, val, reg_offset;
4268 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4269 atten_status_block);
4270 def_sb->atten_status_block.status_block_id = sb_id;
4274 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4275 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4277 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4278 bp->attn_group[index].sig[0] = REG_RD(bp,
4279 reg_offset + 0x10*index);
4280 bp->attn_group[index].sig[1] = REG_RD(bp,
4281 reg_offset + 0x4 + 0x10*index);
4282 bp->attn_group[index].sig[2] = REG_RD(bp,
4283 reg_offset + 0x8 + 0x10*index);
4284 bp->attn_group[index].sig[3] = REG_RD(bp,
4285 reg_offset + 0xc + 0x10*index);
4288 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4289 HC_REG_ATTN_MSG0_ADDR_L);
4291 REG_WR(bp, reg_offset, U64_LO(section));
4292 REG_WR(bp, reg_offset + 4, U64_HI(section));
4294 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4296 val = REG_RD(bp, reg_offset);
4298 REG_WR(bp, reg_offset, val);
4301 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4302 u_def_status_block);
4303 def_sb->u_def_status_block.status_block_id = sb_id;
4305 REG_WR(bp, BAR_USTRORM_INTMEM +
4306 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4307 REG_WR(bp, BAR_USTRORM_INTMEM +
4308 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4310 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4311 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4313 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4314 REG_WR16(bp, BAR_USTRORM_INTMEM +
4315 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4318 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4319 c_def_status_block);
4320 def_sb->c_def_status_block.status_block_id = sb_id;
4322 REG_WR(bp, BAR_CSTRORM_INTMEM +
4323 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4324 REG_WR(bp, BAR_CSTRORM_INTMEM +
4325 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4327 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4328 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4330 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4331 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4332 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4335 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336 t_def_status_block);
4337 def_sb->t_def_status_block.status_block_id = sb_id;
4339 REG_WR(bp, BAR_TSTRORM_INTMEM +
4340 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4341 REG_WR(bp, BAR_TSTRORM_INTMEM +
4342 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4344 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4345 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4347 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4348 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4349 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4352 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4353 x_def_status_block);
4354 def_sb->x_def_status_block.status_block_id = sb_id;
4356 REG_WR(bp, BAR_XSTRORM_INTMEM +
4357 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4358 REG_WR(bp, BAR_XSTRORM_INTMEM +
4359 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4361 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4362 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4364 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4365 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4366 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4368 bp->stats_pending = 0;
4369 bp->set_mac_pending = 0;
4371 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4374 static void bnx2x_update_coalesce(struct bnx2x *bp)
4376 int port = BP_PORT(bp);
4379 for_each_queue(bp, i) {
4380 int sb_id = bp->fp[i].sb_id;
4382 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4383 REG_WR8(bp, BAR_USTRORM_INTMEM +
4384 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4385 U_SB_ETH_RX_CQ_INDEX),
4387 REG_WR16(bp, BAR_USTRORM_INTMEM +
4388 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4389 U_SB_ETH_RX_CQ_INDEX),
4390 bp->rx_ticks ? 0 : 1);
4392 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4393 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4394 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4395 C_SB_ETH_TX_CQ_INDEX),
4397 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4398 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4399 C_SB_ETH_TX_CQ_INDEX),
4400 bp->tx_ticks ? 0 : 1);
4404 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4405 struct bnx2x_fastpath *fp, int last)
4409 for (i = 0; i < last; i++) {
4410 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4411 struct sk_buff *skb = rx_buf->skb;
4414 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4418 if (fp->tpa_state[i] == BNX2X_TPA_START)
4419 pci_unmap_single(bp->pdev,
4420 pci_unmap_addr(rx_buf, mapping),
4422 PCI_DMA_FROMDEVICE);
4429 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4431 int func = BP_FUNC(bp);
4432 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4433 ETH_MAX_AGGREGATION_QUEUES_E1H;
4434 u16 ring_prod, cqe_ring_prod;
4437 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4439 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4441 if (bp->flags & TPA_ENABLE_FLAG) {
4443 for_each_rx_queue(bp, j) {
4444 struct bnx2x_fastpath *fp = &bp->fp[j];
4446 for (i = 0; i < max_agg_queues; i++) {
4447 fp->tpa_pool[i].skb =
4448 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4449 if (!fp->tpa_pool[i].skb) {
4450 BNX2X_ERR("Failed to allocate TPA "
4451 "skb pool for queue[%d] - "
4452 "disabling TPA on this "
4454 bnx2x_free_tpa_pool(bp, fp, i);
4455 fp->disable_tpa = 1;
4458 pci_unmap_addr_set((struct sw_rx_bd *)
4459 &bp->fp->tpa_pool[i],
4461 fp->tpa_state[i] = BNX2X_TPA_STOP;
4466 for_each_rx_queue(bp, j) {
4467 struct bnx2x_fastpath *fp = &bp->fp[j];
4470 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4471 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4473 /* "next page" elements initialization */
4475 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4476 struct eth_rx_sge *sge;
4478 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4480 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4481 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4483 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4484 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4487 bnx2x_init_sge_ring_bit_mask(fp);
4490 for (i = 1; i <= NUM_RX_RINGS; i++) {
4491 struct eth_rx_bd *rx_bd;
4493 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4495 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4496 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4498 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4499 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4503 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4504 struct eth_rx_cqe_next_page *nextpg;
4506 nextpg = (struct eth_rx_cqe_next_page *)
4507 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4509 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4510 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4512 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4513 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4516 /* Allocate SGEs and initialize the ring elements */
4517 for (i = 0, ring_prod = 0;
4518 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4520 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4521 BNX2X_ERR("was only able to allocate "
4523 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4524 /* Cleanup already allocated elements */
4525 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4526 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4527 fp->disable_tpa = 1;
4531 ring_prod = NEXT_SGE_IDX(ring_prod);
4533 fp->rx_sge_prod = ring_prod;
4535 /* Allocate BDs and initialize BD ring */
4536 fp->rx_comp_cons = 0;
4537 cqe_ring_prod = ring_prod = 0;
4538 for (i = 0; i < bp->rx_ring_size; i++) {
4539 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4540 BNX2X_ERR("was only able to allocate "
4541 "%d rx skbs on queue[%d]\n", i, j);
4542 fp->eth_q_stats.rx_skb_alloc_failed++;
4545 ring_prod = NEXT_RX_IDX(ring_prod);
4546 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4547 WARN_ON(ring_prod <= i);
4550 fp->rx_bd_prod = ring_prod;
4551 /* must not have more available CQEs than BDs */
4552 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4554 fp->rx_pkt = fp->rx_calls = 0;
4557 * this will generate an interrupt (to the TSTORM)
4558 * must only be done after chip is initialized
4560 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4565 REG_WR(bp, BAR_USTRORM_INTMEM +
4566 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4567 U64_LO(fp->rx_comp_mapping));
4568 REG_WR(bp, BAR_USTRORM_INTMEM +
4569 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4570 U64_HI(fp->rx_comp_mapping));
4574 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4578 for_each_tx_queue(bp, j) {
4579 struct bnx2x_fastpath *fp = &bp->fp[j];
4581 for (i = 1; i <= NUM_TX_RINGS; i++) {
4582 struct eth_tx_bd *tx_bd =
4583 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4586 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4587 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4589 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4590 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4593 fp->tx_pkt_prod = 0;
4594 fp->tx_pkt_cons = 0;
4597 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4602 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4604 int func = BP_FUNC(bp);
4606 spin_lock_init(&bp->spq_lock);
4608 bp->spq_left = MAX_SPQ_PENDING;
4609 bp->spq_prod_idx = 0;
4610 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4611 bp->spq_prod_bd = bp->spq;
4612 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4614 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4615 U64_LO(bp->spq_mapping));
4617 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4618 U64_HI(bp->spq_mapping));
4620 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4624 static void bnx2x_init_context(struct bnx2x *bp)
4628 for_each_queue(bp, i) {
4629 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4630 struct bnx2x_fastpath *fp = &bp->fp[i];
4631 u8 cl_id = fp->cl_id;
4632 u8 sb_id = FP_SB_ID(fp);
4634 context->ustorm_st_context.common.sb_index_numbers =
4635 BNX2X_RX_SB_INDEX_NUM;
4636 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4637 context->ustorm_st_context.common.status_block_id = sb_id;
4638 context->ustorm_st_context.common.flags =
4639 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4640 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4641 context->ustorm_st_context.common.statistics_counter_id =
4643 context->ustorm_st_context.common.mc_alignment_log_size =
4644 BNX2X_RX_ALIGN_SHIFT;
4645 context->ustorm_st_context.common.bd_buff_size =
4647 context->ustorm_st_context.common.bd_page_base_hi =
4648 U64_HI(fp->rx_desc_mapping);
4649 context->ustorm_st_context.common.bd_page_base_lo =
4650 U64_LO(fp->rx_desc_mapping);
4651 if (!fp->disable_tpa) {
4652 context->ustorm_st_context.common.flags |=
4653 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4654 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4655 context->ustorm_st_context.common.sge_buff_size =
4656 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4658 context->ustorm_st_context.common.sge_page_base_hi =
4659 U64_HI(fp->rx_sge_mapping);
4660 context->ustorm_st_context.common.sge_page_base_lo =
4661 U64_LO(fp->rx_sge_mapping);
4664 context->ustorm_ag_context.cdu_usage =
4665 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4666 CDU_REGION_NUMBER_UCM_AG,
4667 ETH_CONNECTION_TYPE);
4669 context->xstorm_st_context.tx_bd_page_base_hi =
4670 U64_HI(fp->tx_desc_mapping);
4671 context->xstorm_st_context.tx_bd_page_base_lo =
4672 U64_LO(fp->tx_desc_mapping);
4673 context->xstorm_st_context.db_data_addr_hi =
4674 U64_HI(fp->tx_prods_mapping);
4675 context->xstorm_st_context.db_data_addr_lo =
4676 U64_LO(fp->tx_prods_mapping);
4677 context->xstorm_st_context.statistics_data = (fp->cl_id |
4678 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4679 context->cstorm_st_context.sb_index_number =
4680 C_SB_ETH_TX_CQ_INDEX;
4681 context->cstorm_st_context.status_block_id = sb_id;
4683 context->xstorm_ag_context.cdu_reserved =
4684 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4685 CDU_REGION_NUMBER_XCM_AG,
4686 ETH_CONNECTION_TYPE);
4690 static void bnx2x_init_ind_table(struct bnx2x *bp)
4692 int func = BP_FUNC(bp);
4695 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4699 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4700 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4701 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4703 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4706 static void bnx2x_set_client_config(struct bnx2x *bp)
4708 struct tstorm_eth_client_config tstorm_client = {0};
4709 int port = BP_PORT(bp);
4712 tstorm_client.mtu = bp->dev->mtu;
4713 tstorm_client.config_flags =
4714 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4715 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4717 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4718 tstorm_client.config_flags |=
4719 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4720 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4724 if (bp->flags & TPA_ENABLE_FLAG) {
4725 tstorm_client.max_sges_for_packet =
4726 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4727 tstorm_client.max_sges_for_packet =
4728 ((tstorm_client.max_sges_for_packet +
4729 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4730 PAGES_PER_SGE_SHIFT;
4732 tstorm_client.config_flags |=
4733 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4736 for_each_queue(bp, i) {
4737 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4739 REG_WR(bp, BAR_TSTRORM_INTMEM +
4740 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4741 ((u32 *)&tstorm_client)[0]);
4742 REG_WR(bp, BAR_TSTRORM_INTMEM +
4743 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4744 ((u32 *)&tstorm_client)[1]);
4747 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4748 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4751 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4753 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4754 int mode = bp->rx_mode;
4755 int mask = (1 << BP_L_ID(bp));
4756 int func = BP_FUNC(bp);
4759 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4762 case BNX2X_RX_MODE_NONE: /* no Rx */
4763 tstorm_mac_filter.ucast_drop_all = mask;
4764 tstorm_mac_filter.mcast_drop_all = mask;
4765 tstorm_mac_filter.bcast_drop_all = mask;
4767 case BNX2X_RX_MODE_NORMAL:
4768 tstorm_mac_filter.bcast_accept_all = mask;
4770 case BNX2X_RX_MODE_ALLMULTI:
4771 tstorm_mac_filter.mcast_accept_all = mask;
4772 tstorm_mac_filter.bcast_accept_all = mask;
4774 case BNX2X_RX_MODE_PROMISC:
4775 tstorm_mac_filter.ucast_accept_all = mask;
4776 tstorm_mac_filter.mcast_accept_all = mask;
4777 tstorm_mac_filter.bcast_accept_all = mask;
4780 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4784 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4785 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4787 ((u32 *)&tstorm_mac_filter)[i]);
4789 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4790 ((u32 *)&tstorm_mac_filter)[i]); */
4793 if (mode != BNX2X_RX_MODE_NONE)
4794 bnx2x_set_client_config(bp);
4797 static void bnx2x_init_internal_common(struct bnx2x *bp)
4801 if (bp->flags & TPA_ENABLE_FLAG) {
4802 struct tstorm_eth_tpa_exist tpa = {0};
4806 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4808 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4812 /* Zero this manually as its initialization is
4813 currently missing in the initTool */
4814 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4815 REG_WR(bp, BAR_USTRORM_INTMEM +
4816 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4819 static void bnx2x_init_internal_port(struct bnx2x *bp)
4821 int port = BP_PORT(bp);
4823 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4824 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4829 /* Calculates the sum of vn_min_rates.
4830 It's needed for further normalizing of the min_rates.
4832 sum of vn_min_rates.
4834 0 - if all the min_rates are 0.
4835 In the later case fainess algorithm should be deactivated.
4836 If not all min_rates are zero then those that are zeroes will be set to 1.
4838 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4841 int port = BP_PORT(bp);
4844 bp->vn_weight_sum = 0;
4845 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4846 int func = 2*vn + port;
4848 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4849 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4850 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4852 /* Skip hidden vns */
4853 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4856 /* If min rate is zero - set it to 1 */
4858 vn_min_rate = DEF_MIN_RATE;
4862 bp->vn_weight_sum += vn_min_rate;
4865 /* ... only if all min rates are zeros - disable fairness */
4867 bp->vn_weight_sum = 0;
4870 static void bnx2x_init_internal_func(struct bnx2x *bp)
4872 struct tstorm_eth_function_common_config tstorm_config = {0};
4873 struct stats_indication_flags stats_flags = {0};
4874 int port = BP_PORT(bp);
4875 int func = BP_FUNC(bp);
4881 tstorm_config.config_flags = MULTI_FLAGS(bp);
4882 tstorm_config.rss_result_mask = MULTI_MASK;
4885 tstorm_config.config_flags |=
4886 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4888 tstorm_config.leading_client_id = BP_L_ID(bp);
4890 REG_WR(bp, BAR_TSTRORM_INTMEM +
4891 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4892 (*(u32 *)&tstorm_config));
4894 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4895 bnx2x_set_storm_rx_mode(bp);
4897 for_each_queue(bp, i) {
4898 u8 cl_id = bp->fp[i].cl_id;
4900 /* reset xstorm per client statistics */
4901 offset = BAR_XSTRORM_INTMEM +
4902 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4904 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4905 REG_WR(bp, offset + j*4, 0);
4907 /* reset tstorm per client statistics */
4908 offset = BAR_TSTRORM_INTMEM +
4909 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4911 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4912 REG_WR(bp, offset + j*4, 0);
4914 /* reset ustorm per client statistics */
4915 offset = BAR_USTRORM_INTMEM +
4916 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4918 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4919 REG_WR(bp, offset + j*4, 0);
4922 /* Init statistics related context */
4923 stats_flags.collect_eth = 1;
4925 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4926 ((u32 *)&stats_flags)[0]);
4927 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4928 ((u32 *)&stats_flags)[1]);
4930 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4931 ((u32 *)&stats_flags)[0]);
4932 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4933 ((u32 *)&stats_flags)[1]);
4935 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4936 ((u32 *)&stats_flags)[0]);
4937 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4938 ((u32 *)&stats_flags)[1]);
4940 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4941 ((u32 *)&stats_flags)[0]);
4942 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4943 ((u32 *)&stats_flags)[1]);
4945 REG_WR(bp, BAR_XSTRORM_INTMEM +
4946 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4947 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4948 REG_WR(bp, BAR_XSTRORM_INTMEM +
4949 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4950 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4952 REG_WR(bp, BAR_TSTRORM_INTMEM +
4953 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4954 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4955 REG_WR(bp, BAR_TSTRORM_INTMEM +
4956 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4957 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4959 REG_WR(bp, BAR_USTRORM_INTMEM +
4960 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4961 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4962 REG_WR(bp, BAR_USTRORM_INTMEM +
4963 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4964 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4966 if (CHIP_IS_E1H(bp)) {
4967 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4969 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4971 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4973 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4976 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4980 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4982 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4983 SGE_PAGE_SIZE * PAGES_PER_SGE),
4985 for_each_rx_queue(bp, i) {
4986 struct bnx2x_fastpath *fp = &bp->fp[i];
4988 REG_WR(bp, BAR_USTRORM_INTMEM +
4989 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4990 U64_LO(fp->rx_comp_mapping));
4991 REG_WR(bp, BAR_USTRORM_INTMEM +
4992 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4993 U64_HI(fp->rx_comp_mapping));
4995 REG_WR16(bp, BAR_USTRORM_INTMEM +
4996 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5000 /* dropless flow control */
5001 if (CHIP_IS_E1H(bp)) {
5002 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5004 rx_pause.bd_thr_low = 250;
5005 rx_pause.cqe_thr_low = 250;
5007 rx_pause.sge_thr_low = 0;
5008 rx_pause.bd_thr_high = 350;
5009 rx_pause.cqe_thr_high = 350;
5010 rx_pause.sge_thr_high = 0;
5012 for_each_rx_queue(bp, i) {
5013 struct bnx2x_fastpath *fp = &bp->fp[i];
5015 if (!fp->disable_tpa) {
5016 rx_pause.sge_thr_low = 150;
5017 rx_pause.sge_thr_high = 250;
5021 offset = BAR_USTRORM_INTMEM +
5022 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5025 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5027 REG_WR(bp, offset + j*4,
5028 ((u32 *)&rx_pause)[j]);
5032 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5034 /* Init rate shaping and fairness contexts */
5038 /* During init there is no active link
5039 Until link is up, set link rate to 10Gbps */
5040 bp->link_vars.line_speed = SPEED_10000;
5041 bnx2x_init_port_minmax(bp);
5043 bnx2x_calc_vn_weight_sum(bp);
5045 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5046 bnx2x_init_vn_minmax(bp, 2*vn + port);
5048 /* Enable rate shaping and fairness */
5049 bp->cmng.flags.cmng_enables =
5050 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5051 if (bp->vn_weight_sum)
5052 bp->cmng.flags.cmng_enables |=
5053 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5055 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5056 " fairness will be disabled\n");
5058 /* rate shaping and fairness are disabled */
5060 "single function mode minmax will be disabled\n");
5064 /* Store it to internal memory */
5066 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5067 REG_WR(bp, BAR_XSTRORM_INTMEM +
5068 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5069 ((u32 *)(&bp->cmng))[i]);
5072 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5074 switch (load_code) {
5075 case FW_MSG_CODE_DRV_LOAD_COMMON:
5076 bnx2x_init_internal_common(bp);
5079 case FW_MSG_CODE_DRV_LOAD_PORT:
5080 bnx2x_init_internal_port(bp);
5083 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5084 bnx2x_init_internal_func(bp);
5088 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5093 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5097 for_each_queue(bp, i) {
5098 struct bnx2x_fastpath *fp = &bp->fp[i];
5101 fp->state = BNX2X_FP_STATE_CLOSED;
5103 fp->cl_id = BP_L_ID(bp) + i;
5104 fp->sb_id = fp->cl_id;
5106 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5107 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5108 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5110 bnx2x_update_fpsb_idx(fp);
5113 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5115 bnx2x_update_dsb_idx(bp);
5116 bnx2x_update_coalesce(bp);
5117 bnx2x_init_rx_rings(bp);
5118 bnx2x_init_tx_ring(bp);
5119 bnx2x_init_sp_ring(bp);
5120 bnx2x_init_context(bp);
5121 bnx2x_init_internal(bp, load_code);
5122 bnx2x_init_ind_table(bp);
5123 bnx2x_stats_init(bp);
5125 /* At this point, we are ready for interrupts */
5126 atomic_set(&bp->intr_sem, 0);
5128 /* flush all before enabling interrupts */
5132 bnx2x_int_enable(bp);
5135 /* end of nic init */
5138 * gzip service functions
5141 static int bnx2x_gunzip_init(struct bnx2x *bp)
5143 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5144 &bp->gunzip_mapping);
5145 if (bp->gunzip_buf == NULL)
5148 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5149 if (bp->strm == NULL)
5152 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5154 if (bp->strm->workspace == NULL)
5164 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5165 bp->gunzip_mapping);
5166 bp->gunzip_buf = NULL;
5169 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5170 " un-compression\n", bp->dev->name);
5174 static void bnx2x_gunzip_end(struct bnx2x *bp)
5176 kfree(bp->strm->workspace);
5181 if (bp->gunzip_buf) {
5182 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5183 bp->gunzip_mapping);
5184 bp->gunzip_buf = NULL;
5188 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5192 /* check gzip header */
5193 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5200 if (zbuf[3] & FNAME)
5201 while ((zbuf[n++] != 0) && (n < len));
5203 bp->strm->next_in = zbuf + n;
5204 bp->strm->avail_in = len - n;
5205 bp->strm->next_out = bp->gunzip_buf;
5206 bp->strm->avail_out = FW_BUF_SIZE;
5208 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5212 rc = zlib_inflate(bp->strm, Z_FINISH);
5213 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5214 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5215 bp->dev->name, bp->strm->msg);
5217 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5218 if (bp->gunzip_outlen & 0x3)
5219 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5220 " gunzip_outlen (%d) not aligned\n",
5221 bp->dev->name, bp->gunzip_outlen);
5222 bp->gunzip_outlen >>= 2;
5224 zlib_inflateEnd(bp->strm);
5226 if (rc == Z_STREAM_END)
5232 /* nic load/unload */
5235 * General service functions
5238 /* send a NIG loopback debug packet */
5239 static void bnx2x_lb_pckt(struct bnx2x *bp)
5243 /* Ethernet source and destination addresses */
5244 wb_write[0] = 0x55555555;
5245 wb_write[1] = 0x55555555;
5246 wb_write[2] = 0x20; /* SOP */
5247 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5249 /* NON-IP protocol */
5250 wb_write[0] = 0x09000000;
5251 wb_write[1] = 0x55555555;
5252 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5253 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5256 /* some of the internal memories
5257 * are not directly readable from the driver
5258 * to test them we send debug packets
5260 static int bnx2x_int_mem_test(struct bnx2x *bp)
5266 if (CHIP_REV_IS_FPGA(bp))
5268 else if (CHIP_REV_IS_EMUL(bp))
5273 DP(NETIF_MSG_HW, "start part1\n");
5275 /* Disable inputs of parser neighbor blocks */
5276 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5277 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5278 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5279 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5281 /* Write 0 to parser credits for CFC search request */
5282 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5284 /* send Ethernet packet */
5287 /* TODO do i reset NIG statistic? */
5288 /* Wait until NIG register shows 1 packet of size 0x10 */
5289 count = 1000 * factor;
5292 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5293 val = *bnx2x_sp(bp, wb_data[0]);
5301 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5305 /* Wait until PRS register shows 1 packet */
5306 count = 1000 * factor;
5308 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5316 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5320 /* Reset and init BRB, PRS */
5321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5323 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5325 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5326 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5328 DP(NETIF_MSG_HW, "part2\n");
5330 /* Disable inputs of parser neighbor blocks */
5331 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5332 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5333 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5334 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5336 /* Write 0 to parser credits for CFC search request */
5337 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339 /* send 10 Ethernet packets */
5340 for (i = 0; i < 10; i++)
5343 /* Wait until NIG register shows 10 + 1
5344 packets of size 11*0x10 = 0xb0 */
5345 count = 1000 * factor;
5348 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349 val = *bnx2x_sp(bp, wb_data[0]);
5357 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5361 /* Wait until PRS register shows 2 packets */
5362 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5364 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5366 /* Write 1 to parser credits for CFC search request */
5367 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5369 /* Wait until PRS register shows 3 packets */
5370 msleep(10 * factor);
5371 /* Wait until NIG register shows 1 packet of size 0x10 */
5372 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5374 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5376 /* clear NIG EOP FIFO */
5377 for (i = 0; i < 11; i++)
5378 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5379 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5381 BNX2X_ERR("clear of NIG failed\n");
5385 /* Reset and init BRB, PRS, NIG */
5386 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5388 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5390 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5391 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5394 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5397 /* Enable inputs of parser neighbor blocks */
5398 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5399 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5400 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5401 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5403 DP(NETIF_MSG_HW, "done\n");
5408 static void enable_blocks_attention(struct bnx2x *bp)
5410 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5411 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5412 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5413 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5414 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5415 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5416 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5417 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5418 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5419 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5420 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5421 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5422 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5423 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5424 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5425 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5426 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5427 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5428 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5429 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5430 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5431 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5432 if (CHIP_REV_IS_FPGA(bp))
5433 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5435 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5436 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5437 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5438 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5439 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5440 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5441 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5442 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5443 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5444 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5448 static void bnx2x_reset_common(struct bnx2x *bp)
5451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5453 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5456 static int bnx2x_init_common(struct bnx2x *bp)
5460 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5462 bnx2x_reset_common(bp);
5463 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5466 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5467 if (CHIP_IS_E1H(bp))
5468 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5470 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5472 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5474 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5475 if (CHIP_IS_E1(bp)) {
5476 /* enable HW interrupt from PXP on USDM overflow
5477 bit 16 on INT_MASK_0 */
5478 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5481 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5485 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5486 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5487 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5488 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5489 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5490 /* make sure this value is 0 */
5491 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5493 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5494 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5495 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5496 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5497 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5500 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5502 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5503 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5504 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5507 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5508 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5510 /* let the HW do it's magic ... */
5512 /* finish PXP init */
5513 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5515 BNX2X_ERR("PXP2 CFG failed\n");
5518 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5520 BNX2X_ERR("PXP2 RD_INIT failed\n");
5524 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5525 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5527 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5529 /* clean the DMAE memory */
5531 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5533 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5534 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5535 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5536 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5538 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5539 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5540 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5541 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5543 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5544 /* soft reset pulse */
5545 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5546 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5549 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5552 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5553 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5554 if (!CHIP_REV_IS_SLOW(bp)) {
5555 /* enable hw interrupt from doorbell Q */
5556 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5559 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5560 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5561 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5563 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5564 if (CHIP_IS_E1H(bp))
5565 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5567 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5568 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5569 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5570 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5572 if (CHIP_IS_E1H(bp)) {
5573 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5574 STORM_INTMEM_SIZE_E1H/2);
5576 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5577 0, STORM_INTMEM_SIZE_E1H/2);
5578 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5579 STORM_INTMEM_SIZE_E1H/2);
5581 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5582 0, STORM_INTMEM_SIZE_E1H/2);
5583 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5584 STORM_INTMEM_SIZE_E1H/2);
5586 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5587 0, STORM_INTMEM_SIZE_E1H/2);
5588 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5589 STORM_INTMEM_SIZE_E1H/2);
5591 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5592 0, STORM_INTMEM_SIZE_E1H/2);
5594 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5595 STORM_INTMEM_SIZE_E1);
5596 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5597 STORM_INTMEM_SIZE_E1);
5598 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5599 STORM_INTMEM_SIZE_E1);
5600 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5601 STORM_INTMEM_SIZE_E1);
5604 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5605 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5606 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5607 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5615 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5616 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5617 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5619 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5620 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5621 REG_WR(bp, i, 0xc0cac01a);
5622 /* TODO: replace with something meaningful */
5624 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5625 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5627 if (sizeof(union cdu_context) != 1024)
5628 /* we currently assume that a context is 1024 bytes */
5629 printk(KERN_ALERT PFX "please adjust the size of"
5630 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5632 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5633 val = (4 << 24) + (0 << 12) + 1024;
5634 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5635 if (CHIP_IS_E1(bp)) {
5636 /* !!! fix pxp client crdit until excel update */
5637 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5638 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5641 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5642 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5643 /* enable context validation interrupt from CFC */
5644 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5646 /* set the thresholds to prevent CFC/CDU race */
5647 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5649 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5650 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5652 /* PXPCS COMMON comes here */
5653 /* Reset PCIE errors for debug */
5654 REG_WR(bp, 0x2814, 0xffffffff);
5655 REG_WR(bp, 0x3820, 0xffffffff);
5657 /* EMAC0 COMMON comes here */
5658 /* EMAC1 COMMON comes here */
5659 /* DBU COMMON comes here */
5660 /* DBG COMMON comes here */
5662 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5663 if (CHIP_IS_E1H(bp)) {
5664 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5665 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5668 if (CHIP_REV_IS_SLOW(bp))
5671 /* finish CFC init */
5672 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5674 BNX2X_ERR("CFC LL_INIT failed\n");
5677 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5679 BNX2X_ERR("CFC AC_INIT failed\n");
5682 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5684 BNX2X_ERR("CFC CAM_INIT failed\n");
5687 REG_WR(bp, CFC_REG_DEBUG0, 0);
5689 /* read NIG statistic
5690 to see if this is our first up since powerup */
5691 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5692 val = *bnx2x_sp(bp, wb_data[0]);
5694 /* do internal memory self test */
5695 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5696 BNX2X_ERR("internal mem self test failed\n");
5700 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5701 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5702 /* Fan failure is indicated by SPIO 5 */
5703 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5704 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5706 /* set to active low mode */
5707 val = REG_RD(bp, MISC_REG_SPIO_INT);
5708 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5709 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5710 REG_WR(bp, MISC_REG_SPIO_INT, val);
5712 /* enable interrupt to signal the IGU */
5713 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5714 val |= (1 << MISC_REGISTERS_SPIO_5);
5715 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5722 /* clear PXP2 attentions */
5723 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5725 enable_blocks_attention(bp);
5727 if (!BP_NOMCP(bp)) {
5728 bnx2x_acquire_phy_lock(bp);
5729 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5730 bnx2x_release_phy_lock(bp);
5732 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5737 static int bnx2x_init_port(struct bnx2x *bp)
5739 int port = BP_PORT(bp);
5743 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5745 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5747 /* Port PXP comes here */
5748 /* Port PXP2 comes here */
5753 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5754 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5755 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5756 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5761 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5762 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5763 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5764 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5769 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5770 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5771 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5772 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5774 /* Port CMs come here */
5775 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5776 (port ? XCM_PORT1_END : XCM_PORT0_END));
5778 /* Port QM comes here */
5780 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5781 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5783 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5784 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5786 /* Port DQ comes here */
5788 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5789 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5790 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5791 /* no pause for emulation and FPGA */
5796 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5797 else if (bp->dev->mtu > 4096) {
5798 if (bp->flags & ONE_PORT_FLAG)
5802 /* (24*1024 + val*4)/256 */
5803 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5806 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5807 high = low + 56; /* 14*1024/256 */
5809 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5810 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5813 /* Port PRS comes here */
5814 /* Port TSDM comes here */
5815 /* Port CSDM comes here */
5816 /* Port USDM comes here */
5817 /* Port XSDM comes here */
5818 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5819 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5820 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5821 port ? USEM_PORT1_END : USEM_PORT0_END);
5822 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5823 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5824 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5825 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5826 /* Port UPB comes here */
5827 /* Port XPB comes here */
5829 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5830 port ? PBF_PORT1_END : PBF_PORT0_END);
5832 /* configure PBF to work without PAUSE mtu 9000 */
5833 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5835 /* update threshold */
5836 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5837 /* update init credit */
5838 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5841 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5843 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5846 /* tell the searcher where the T2 table is */
5847 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5849 wb_write[0] = U64_LO(bp->t2_mapping);
5850 wb_write[1] = U64_HI(bp->t2_mapping);
5851 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5852 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5853 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5854 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5856 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5857 /* Port SRCH comes here */
5859 /* Port CDU comes here */
5860 /* Port CFC comes here */
5862 if (CHIP_IS_E1(bp)) {
5863 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5864 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5866 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5867 port ? HC_PORT1_END : HC_PORT0_END);
5869 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5870 MISC_AEU_PORT0_START,
5871 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5872 /* init aeu_mask_attn_func_0/1:
5873 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5874 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5875 * bits 4-7 are used for "per vn group attention" */
5876 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5877 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5879 /* Port PXPCS comes here */
5880 /* Port EMAC0 comes here */
5881 /* Port EMAC1 comes here */
5882 /* Port DBU comes here */
5883 /* Port DBG comes here */
5884 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5885 port ? NIG_PORT1_END : NIG_PORT0_END);
5887 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5889 if (CHIP_IS_E1H(bp)) {
5890 /* 0x2 disable e1hov, 0x1 enable */
5891 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5892 (IS_E1HMF(bp) ? 0x1 : 0x2));
5894 /* support pause requests from USDM, TSDM and BRB */
5895 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5898 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5899 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5900 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5904 /* Port MCP comes here */
5905 /* Port DMAE comes here */
5907 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5908 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5909 /* add SPIO 5 to group 0 */
5910 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5911 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5912 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5919 bnx2x__link_reset(bp);
5924 #define ILT_PER_FUNC (768/2)
5925 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5926 /* the phys address is shifted right 12 bits and has an added
5927 1=valid bit added to the 53rd bit
5928 then since this is a wide register(TM)
5929 we split it into two 32 bit writes
5931 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5932 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5933 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5934 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5936 #define CNIC_ILT_LINES 0
5938 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5942 if (CHIP_IS_E1H(bp))
5943 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5945 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5947 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5950 static int bnx2x_init_func(struct bnx2x *bp)
5952 int port = BP_PORT(bp);
5953 int func = BP_FUNC(bp);
5957 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5959 /* set MSI reconfigure capability */
5960 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5961 val = REG_RD(bp, addr);
5962 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5963 REG_WR(bp, addr, val);
5965 i = FUNC_ILT_BASE(func);
5967 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5968 if (CHIP_IS_E1H(bp)) {
5969 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5970 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5972 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5973 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5976 if (CHIP_IS_E1H(bp)) {
5977 for (i = 0; i < 9; i++)
5978 bnx2x_init_block(bp,
5979 cm_start[func][i], cm_end[func][i]);
5981 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5982 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5985 /* HC init per function */
5986 if (CHIP_IS_E1H(bp)) {
5987 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5989 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5990 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5992 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5994 /* Reset PCIE errors for debug */
5995 REG_WR(bp, 0x2114, 0xffffffff);
5996 REG_WR(bp, 0x2120, 0xffffffff);
6001 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6005 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6006 BP_FUNC(bp), load_code);
6009 mutex_init(&bp->dmae_mutex);
6010 bnx2x_gunzip_init(bp);
6012 switch (load_code) {
6013 case FW_MSG_CODE_DRV_LOAD_COMMON:
6014 rc = bnx2x_init_common(bp);
6019 case FW_MSG_CODE_DRV_LOAD_PORT:
6021 rc = bnx2x_init_port(bp);
6026 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6028 rc = bnx2x_init_func(bp);
6034 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6038 if (!BP_NOMCP(bp)) {
6039 int func = BP_FUNC(bp);
6041 bp->fw_drv_pulse_wr_seq =
6042 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6043 DRV_PULSE_SEQ_MASK);
6044 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6045 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6046 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6050 /* this needs to be done before gunzip end */
6051 bnx2x_zero_def_sb(bp);
6052 for_each_queue(bp, i)
6053 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6056 bnx2x_gunzip_end(bp);
6061 /* send the MCP a request, block until there is a reply */
6062 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6064 int func = BP_FUNC(bp);
6065 u32 seq = ++bp->fw_seq;
6068 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6070 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6071 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6074 /* let the FW do it's magic ... */
6077 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6079 /* Give the FW up to 2 second (200*10ms) */
6080 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6082 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6083 cnt*delay, rc, seq);
6085 /* is this a reply to our command? */
6086 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6087 rc &= FW_MSG_CODE_MASK;
6091 BNX2X_ERR("FW failed to respond!\n");
6099 static void bnx2x_free_mem(struct bnx2x *bp)
6102 #define BNX2X_PCI_FREE(x, y, size) \
6105 pci_free_consistent(bp->pdev, size, x, y); \
6111 #define BNX2X_FREE(x) \
6123 for_each_queue(bp, i) {
6126 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6127 bnx2x_fp(bp, i, status_blk_mapping),
6128 sizeof(struct host_status_block) +
6129 sizeof(struct eth_tx_db_data));
6132 for_each_rx_queue(bp, i) {
6134 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6135 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6136 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6137 bnx2x_fp(bp, i, rx_desc_mapping),
6138 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6140 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6141 bnx2x_fp(bp, i, rx_comp_mapping),
6142 sizeof(struct eth_fast_path_rx_cqe) *
6146 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6147 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6148 bnx2x_fp(bp, i, rx_sge_mapping),
6149 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6152 for_each_tx_queue(bp, i) {
6154 /* fastpath tx rings: tx_buf tx_desc */
6155 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6156 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6157 bnx2x_fp(bp, i, tx_desc_mapping),
6158 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6160 /* end of fastpath */
6162 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6163 sizeof(struct host_def_status_block));
6165 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6166 sizeof(struct bnx2x_slowpath));
6169 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6170 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6171 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6172 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6174 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6176 #undef BNX2X_PCI_FREE
6180 static int bnx2x_alloc_mem(struct bnx2x *bp)
6183 #define BNX2X_PCI_ALLOC(x, y, size) \
6185 x = pci_alloc_consistent(bp->pdev, size, y); \
6187 goto alloc_mem_err; \
6188 memset(x, 0, size); \
6191 #define BNX2X_ALLOC(x, size) \
6193 x = vmalloc(size); \
6195 goto alloc_mem_err; \
6196 memset(x, 0, size); \
6203 for_each_queue(bp, i) {
6204 bnx2x_fp(bp, i, bp) = bp;
6207 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6208 &bnx2x_fp(bp, i, status_blk_mapping),
6209 sizeof(struct host_status_block) +
6210 sizeof(struct eth_tx_db_data));
6213 for_each_rx_queue(bp, i) {
6215 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6216 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6217 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6218 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6219 &bnx2x_fp(bp, i, rx_desc_mapping),
6220 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6223 &bnx2x_fp(bp, i, rx_comp_mapping),
6224 sizeof(struct eth_fast_path_rx_cqe) *
6228 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6229 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6230 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6231 &bnx2x_fp(bp, i, rx_sge_mapping),
6232 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6235 for_each_tx_queue(bp, i) {
6237 bnx2x_fp(bp, i, hw_tx_prods) =
6238 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6240 bnx2x_fp(bp, i, tx_prods_mapping) =
6241 bnx2x_fp(bp, i, status_blk_mapping) +
6242 sizeof(struct host_status_block);
6244 /* fastpath tx rings: tx_buf tx_desc */
6245 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6246 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6247 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6248 &bnx2x_fp(bp, i, tx_desc_mapping),
6249 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6251 /* end of fastpath */
6253 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6254 sizeof(struct host_def_status_block));
6256 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6257 sizeof(struct bnx2x_slowpath));
6260 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6263 for (i = 0; i < 64*1024; i += 64) {
6264 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6265 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6268 /* allocate searcher T2 table
6269 we allocate 1/4 of alloc num for T2
6270 (which is not entered into the ILT) */
6271 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6274 for (i = 0; i < 16*1024; i += 64)
6275 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6277 /* now fixup the last line in the block to point to the next block */
6278 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6280 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6281 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6283 /* QM queues (128*MAX_CONN) */
6284 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6287 /* Slow path ring */
6288 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6296 #undef BNX2X_PCI_ALLOC
6300 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6304 for_each_tx_queue(bp, i) {
6305 struct bnx2x_fastpath *fp = &bp->fp[i];
6307 u16 bd_cons = fp->tx_bd_cons;
6308 u16 sw_prod = fp->tx_pkt_prod;
6309 u16 sw_cons = fp->tx_pkt_cons;
6311 while (sw_cons != sw_prod) {
6312 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6318 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6322 for_each_rx_queue(bp, j) {
6323 struct bnx2x_fastpath *fp = &bp->fp[j];
6325 for (i = 0; i < NUM_RX_BD; i++) {
6326 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6327 struct sk_buff *skb = rx_buf->skb;
6332 pci_unmap_single(bp->pdev,
6333 pci_unmap_addr(rx_buf, mapping),
6335 PCI_DMA_FROMDEVICE);
6340 if (!fp->disable_tpa)
6341 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6342 ETH_MAX_AGGREGATION_QUEUES_E1 :
6343 ETH_MAX_AGGREGATION_QUEUES_E1H);
6347 static void bnx2x_free_skbs(struct bnx2x *bp)
6349 bnx2x_free_tx_skbs(bp);
6350 bnx2x_free_rx_skbs(bp);
6353 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6357 free_irq(bp->msix_table[0].vector, bp->dev);
6358 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6359 bp->msix_table[0].vector);
6361 for_each_queue(bp, i) {
6362 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6363 "state %x\n", i, bp->msix_table[i + offset].vector,
6364 bnx2x_fp(bp, i, state));
6366 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6370 static void bnx2x_free_irq(struct bnx2x *bp)
6372 if (bp->flags & USING_MSIX_FLAG) {
6373 bnx2x_free_msix_irqs(bp);
6374 pci_disable_msix(bp->pdev);
6375 bp->flags &= ~USING_MSIX_FLAG;
6377 } else if (bp->flags & USING_MSI_FLAG) {
6378 free_irq(bp->pdev->irq, bp->dev);
6379 pci_disable_msi(bp->pdev);
6380 bp->flags &= ~USING_MSI_FLAG;
6383 free_irq(bp->pdev->irq, bp->dev);
6386 static int bnx2x_enable_msix(struct bnx2x *bp)
6388 int i, rc, offset = 1;
6391 bp->msix_table[0].entry = igu_vec;
6392 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6394 for_each_queue(bp, i) {
6395 igu_vec = BP_L_ID(bp) + offset + i;
6396 bp->msix_table[i + offset].entry = igu_vec;
6397 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6398 "(fastpath #%u)\n", i + offset, igu_vec, i);
6401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6402 BNX2X_NUM_QUEUES(bp) + offset);
6404 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6408 bp->flags |= USING_MSIX_FLAG;
6413 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6415 int i, rc, offset = 1;
6417 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6418 bp->dev->name, bp->dev);
6420 BNX2X_ERR("request sp irq failed\n");
6424 for_each_queue(bp, i) {
6425 struct bnx2x_fastpath *fp = &bp->fp[i];
6427 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6428 rc = request_irq(bp->msix_table[i + offset].vector,
6429 bnx2x_msix_fp_int, 0, fp->name, fp);
6431 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6432 bnx2x_free_msix_irqs(bp);
6436 fp->state = BNX2X_FP_STATE_IRQ;
6439 i = BNX2X_NUM_QUEUES(bp);
6441 printk(KERN_INFO PFX
6442 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6443 bp->dev->name, bp->msix_table[0].vector,
6444 bp->msix_table[offset].vector,
6445 bp->msix_table[offset + i - 1].vector);
6447 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6448 bp->dev->name, bp->msix_table[0].vector,
6449 bp->msix_table[offset + i - 1].vector);
6454 static int bnx2x_enable_msi(struct bnx2x *bp)
6458 rc = pci_enable_msi(bp->pdev);
6460 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6463 bp->flags |= USING_MSI_FLAG;
6468 static int bnx2x_req_irq(struct bnx2x *bp)
6470 unsigned long flags;
6473 if (bp->flags & USING_MSI_FLAG)
6476 flags = IRQF_SHARED;
6478 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6479 bp->dev->name, bp->dev);
6481 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6486 static void bnx2x_napi_enable(struct bnx2x *bp)
6490 for_each_rx_queue(bp, i)
6491 napi_enable(&bnx2x_fp(bp, i, napi));
6494 static void bnx2x_napi_disable(struct bnx2x *bp)
6498 for_each_rx_queue(bp, i)
6499 napi_disable(&bnx2x_fp(bp, i, napi));
6502 static void bnx2x_netif_start(struct bnx2x *bp)
6504 if (atomic_dec_and_test(&bp->intr_sem)) {
6505 if (netif_running(bp->dev)) {
6506 bnx2x_napi_enable(bp);
6507 bnx2x_int_enable(bp);
6508 if (bp->state == BNX2X_STATE_OPEN)
6509 netif_tx_wake_all_queues(bp->dev);
6514 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6516 bnx2x_int_disable_sync(bp, disable_hw);
6517 bnx2x_napi_disable(bp);
6518 if (netif_running(bp->dev)) {
6519 netif_tx_disable(bp->dev);
6520 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6525 * Init service functions
6528 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6530 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6531 int port = BP_PORT(bp);
6534 * unicasts 0-31:port0 32-63:port1
6535 * multicast 64-127:port0 128-191:port1
6537 config->hdr.length = 2;
6538 config->hdr.offset = port ? 32 : 0;
6539 config->hdr.client_id = BP_CL_ID(bp);
6540 config->hdr.reserved1 = 0;
6543 config->config_table[0].cam_entry.msb_mac_addr =
6544 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6545 config->config_table[0].cam_entry.middle_mac_addr =
6546 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6547 config->config_table[0].cam_entry.lsb_mac_addr =
6548 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6549 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6551 config->config_table[0].target_table_entry.flags = 0;
6553 CAM_INVALIDATE(config->config_table[0]);
6554 config->config_table[0].target_table_entry.client_id = 0;
6555 config->config_table[0].target_table_entry.vlan_id = 0;
6557 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6558 (set ? "setting" : "clearing"),
6559 config->config_table[0].cam_entry.msb_mac_addr,
6560 config->config_table[0].cam_entry.middle_mac_addr,
6561 config->config_table[0].cam_entry.lsb_mac_addr);
6564 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6565 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6566 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6567 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6569 config->config_table[1].target_table_entry.flags =
6570 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6572 CAM_INVALIDATE(config->config_table[1]);
6573 config->config_table[1].target_table_entry.client_id = 0;
6574 config->config_table[1].target_table_entry.vlan_id = 0;
6576 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6577 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6578 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6581 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6583 struct mac_configuration_cmd_e1h *config =
6584 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6586 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6591 /* CAM allocation for E1H
6592 * unicasts: by func number
6593 * multicast: 20+FUNC*20, 20 each
6595 config->hdr.length = 1;
6596 config->hdr.offset = BP_FUNC(bp);
6597 config->hdr.client_id = BP_CL_ID(bp);
6598 config->hdr.reserved1 = 0;
6601 config->config_table[0].msb_mac_addr =
6602 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6603 config->config_table[0].middle_mac_addr =
6604 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6605 config->config_table[0].lsb_mac_addr =
6606 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6607 config->config_table[0].client_id = BP_L_ID(bp);
6608 config->config_table[0].vlan_id = 0;
6609 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6611 config->config_table[0].flags = BP_PORT(bp);
6613 config->config_table[0].flags =
6614 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6616 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6617 (set ? "setting" : "clearing"),
6618 config->config_table[0].msb_mac_addr,
6619 config->config_table[0].middle_mac_addr,
6620 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6622 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6623 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6624 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6627 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6628 int *state_p, int poll)
6630 /* can take a while if any port is running */
6633 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6634 poll ? "polling" : "waiting", state, idx);
6639 bnx2x_rx_int(bp->fp, 10);
6640 /* if index is different from 0
6641 * the reply for some commands will
6642 * be on the non default queue
6645 bnx2x_rx_int(&bp->fp[idx], 10);
6648 mb(); /* state is changed by bnx2x_sp_event() */
6649 if (*state_p == state)
6656 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6657 poll ? "polling" : "waiting", state, idx);
6658 #ifdef BNX2X_STOP_ON_ERROR
6665 static int bnx2x_setup_leading(struct bnx2x *bp)
6669 /* reset IGU state */
6670 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6675 /* Wait for completion */
6676 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6681 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6683 struct bnx2x_fastpath *fp = &bp->fp[index];
6685 /* reset IGU state */
6686 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6689 fp->state = BNX2X_FP_STATE_OPENING;
6690 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6693 /* Wait for completion */
6694 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6698 static int bnx2x_poll(struct napi_struct *napi, int budget);
6700 static void bnx2x_set_int_mode(struct bnx2x *bp)
6708 bp->num_rx_queues = num_queues;
6709 bp->num_tx_queues = num_queues;
6711 "set number of queues to %d\n", num_queues);
6716 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6717 num_queues = min_t(u32, num_online_cpus(),
6718 BNX2X_MAX_QUEUES(bp));
6721 bp->num_rx_queues = num_queues;
6722 bp->num_tx_queues = num_queues;
6723 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6724 " number of tx queues to %d\n",
6725 bp->num_rx_queues, bp->num_tx_queues);
6726 /* if we can't use MSI-X we only need one fp,
6727 * so try to enable MSI-X with the requested number of fp's
6728 * and fallback to MSI or legacy INTx with one fp
6730 if (bnx2x_enable_msix(bp)) {
6731 /* failed to enable MSI-X */
6733 bp->num_rx_queues = num_queues;
6734 bp->num_tx_queues = num_queues;
6736 BNX2X_ERR("Multi requested but failed to "
6737 "enable MSI-X set number of "
6738 "queues to %d\n", num_queues);
6742 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6745 static void bnx2x_set_rx_mode(struct net_device *dev);
6747 /* must be called with rtnl_lock */
6748 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6752 #ifdef BNX2X_STOP_ON_ERROR
6753 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6754 if (unlikely(bp->panic))
6758 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6760 bnx2x_set_int_mode(bp);
6762 if (bnx2x_alloc_mem(bp))
6765 for_each_rx_queue(bp, i)
6766 bnx2x_fp(bp, i, disable_tpa) =
6767 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6769 for_each_rx_queue(bp, i)
6770 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6773 #ifdef BNX2X_STOP_ON_ERROR
6774 for_each_rx_queue(bp, i) {
6775 struct bnx2x_fastpath *fp = &bp->fp[i];
6777 fp->poll_no_work = 0;
6779 fp->poll_max_calls = 0;
6780 fp->poll_complete = 0;
6784 bnx2x_napi_enable(bp);
6786 if (bp->flags & USING_MSIX_FLAG) {
6787 rc = bnx2x_req_msix_irqs(bp);
6789 pci_disable_msix(bp->pdev);
6793 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6794 bnx2x_enable_msi(bp);
6796 rc = bnx2x_req_irq(bp);
6798 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6799 if (bp->flags & USING_MSI_FLAG)
6800 pci_disable_msi(bp->pdev);
6803 if (bp->flags & USING_MSI_FLAG) {
6804 bp->dev->irq = bp->pdev->irq;
6805 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6806 bp->dev->name, bp->pdev->irq);
6810 /* Send LOAD_REQUEST command to MCP
6811 Returns the type of LOAD command:
6812 if it is the first port to be initialized
6813 common blocks should be initialized, otherwise - not
6815 if (!BP_NOMCP(bp)) {
6816 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6818 BNX2X_ERR("MCP response failure, aborting\n");
6822 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6823 rc = -EBUSY; /* other port in diagnostic mode */
6828 int port = BP_PORT(bp);
6830 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6831 load_count[0], load_count[1], load_count[2]);
6833 load_count[1 + port]++;
6834 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6835 load_count[0], load_count[1], load_count[2]);
6836 if (load_count[0] == 1)
6837 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6838 else if (load_count[1 + port] == 1)
6839 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6841 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6844 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6845 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6849 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6852 rc = bnx2x_init_hw(bp, load_code);
6854 BNX2X_ERR("HW init failed, aborting\n");
6858 /* Setup NIC internals and enable interrupts */
6859 bnx2x_nic_init(bp, load_code);
6861 /* Send LOAD_DONE command to MCP */
6862 if (!BP_NOMCP(bp)) {
6863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6865 BNX2X_ERR("MCP response failure, aborting\n");
6871 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6873 rc = bnx2x_setup_leading(bp);
6875 BNX2X_ERR("Setup leading failed!\n");
6879 if (CHIP_IS_E1H(bp))
6880 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6881 BNX2X_ERR("!!! mf_cfg function disabled\n");
6882 bp->state = BNX2X_STATE_DISABLED;
6885 if (bp->state == BNX2X_STATE_OPEN)
6886 for_each_nondefault_queue(bp, i) {
6887 rc = bnx2x_setup_multi(bp, i);
6893 bnx2x_set_mac_addr_e1(bp, 1);
6895 bnx2x_set_mac_addr_e1h(bp, 1);
6898 bnx2x_initial_phy_init(bp);
6900 /* Start fast path */
6901 switch (load_mode) {
6903 /* Tx queue should be only reenabled */
6904 netif_tx_wake_all_queues(bp->dev);
6905 /* Initialize the receive filter. */
6906 bnx2x_set_rx_mode(bp->dev);
6910 netif_tx_start_all_queues(bp->dev);
6911 /* Initialize the receive filter. */
6912 bnx2x_set_rx_mode(bp->dev);
6916 /* Initialize the receive filter. */
6917 bnx2x_set_rx_mode(bp->dev);
6918 bp->state = BNX2X_STATE_DIAG;
6926 bnx2x__link_status_update(bp);
6928 /* start the timer */
6929 mod_timer(&bp->timer, jiffies + bp->current_interval);
6935 bnx2x_int_disable_sync(bp, 1);
6936 if (!BP_NOMCP(bp)) {
6937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6941 /* Free SKBs, SGEs, TPA pool and driver internals */
6942 bnx2x_free_skbs(bp);
6943 for_each_rx_queue(bp, i)
6944 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6949 bnx2x_napi_disable(bp);
6950 for_each_rx_queue(bp, i)
6951 netif_napi_del(&bnx2x_fp(bp, i, napi));
6954 /* TBD we really need to reset the chip
6955 if we want to recover from this */
6959 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6961 struct bnx2x_fastpath *fp = &bp->fp[index];
6964 /* halt the connection */
6965 fp->state = BNX2X_FP_STATE_HALTING;
6966 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6968 /* Wait for completion */
6969 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6971 if (rc) /* timeout */
6974 /* delete cfc entry */
6975 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6977 /* Wait for completion */
6978 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6983 static int bnx2x_stop_leading(struct bnx2x *bp)
6985 u16 dsb_sp_prod_idx;
6986 /* if the other port is handling traffic,
6987 this can take a lot of time */
6993 /* Send HALT ramrod */
6994 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6995 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6997 /* Wait for completion */
6998 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6999 &(bp->fp[0].state), 1);
7000 if (rc) /* timeout */
7003 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7005 /* Send PORT_DELETE ramrod */
7006 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7008 /* Wait for completion to arrive on default status block
7009 we are going to reset the chip anyway
7010 so there is not much to do if this times out
7012 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7014 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7015 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7016 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7017 #ifdef BNX2X_STOP_ON_ERROR
7026 rmb(); /* Refresh the dsb_sp_prod */
7028 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7029 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7034 static void bnx2x_reset_func(struct bnx2x *bp)
7036 int port = BP_PORT(bp);
7037 int func = BP_FUNC(bp);
7041 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7042 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7045 base = FUNC_ILT_BASE(func);
7046 for (i = base; i < base + ILT_PER_FUNC; i++)
7047 bnx2x_ilt_wr(bp, i, 0);
7050 static void bnx2x_reset_port(struct bnx2x *bp)
7052 int port = BP_PORT(bp);
7055 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7057 /* Do not rcv packets to BRB */
7058 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7059 /* Do not direct rcv packets that are not for MCP to the BRB */
7060 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7061 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7064 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7067 /* Check for BRB port occupancy */
7068 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7070 DP(NETIF_MSG_IFDOWN,
7071 "BRB1 is not empty %d blocks are occupied\n", val);
7073 /* TODO: Close Doorbell port? */
7076 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7078 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7079 BP_FUNC(bp), reset_code);
7081 switch (reset_code) {
7082 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7083 bnx2x_reset_port(bp);
7084 bnx2x_reset_func(bp);
7085 bnx2x_reset_common(bp);
7088 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7089 bnx2x_reset_port(bp);
7090 bnx2x_reset_func(bp);
7093 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7094 bnx2x_reset_func(bp);
7098 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7103 /* must be called with rtnl_lock */
7104 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7106 int port = BP_PORT(bp);
7110 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7112 bp->rx_mode = BNX2X_RX_MODE_NONE;
7113 bnx2x_set_storm_rx_mode(bp);
7115 bnx2x_netif_stop(bp, 1);
7117 del_timer_sync(&bp->timer);
7118 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7119 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7120 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7125 /* Wait until tx fastpath tasks complete */
7126 for_each_tx_queue(bp, i) {
7127 struct bnx2x_fastpath *fp = &bp->fp[i];
7131 while (bnx2x_has_tx_work_unload(fp)) {
7133 bnx2x_tx_int(fp, 1000);
7135 BNX2X_ERR("timeout waiting for queue[%d]\n",
7137 #ifdef BNX2X_STOP_ON_ERROR
7149 /* Give HW time to discard old tx messages */
7152 if (CHIP_IS_E1(bp)) {
7153 struct mac_configuration_cmd *config =
7154 bnx2x_sp(bp, mcast_config);
7156 bnx2x_set_mac_addr_e1(bp, 0);
7158 for (i = 0; i < config->hdr.length; i++)
7159 CAM_INVALIDATE(config->config_table[i]);
7161 config->hdr.length = i;
7162 if (CHIP_REV_IS_SLOW(bp))
7163 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7165 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7166 config->hdr.client_id = BP_CL_ID(bp);
7167 config->hdr.reserved1 = 0;
7169 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7170 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7171 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7174 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7176 bnx2x_set_mac_addr_e1h(bp, 0);
7178 for (i = 0; i < MC_HASH_SIZE; i++)
7179 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7182 if (unload_mode == UNLOAD_NORMAL)
7183 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7185 else if (bp->flags & NO_WOL_FLAG) {
7186 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7187 if (CHIP_IS_E1H(bp))
7188 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7190 } else if (bp->wol) {
7191 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7192 u8 *mac_addr = bp->dev->dev_addr;
7194 /* The mac address is written to entries 1-4 to
7195 preserve entry 0 which is used by the PMF */
7196 u8 entry = (BP_E1HVN(bp) + 1)*8;
7198 val = (mac_addr[0] << 8) | mac_addr[1];
7199 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7201 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7202 (mac_addr[4] << 8) | mac_addr[5];
7203 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7205 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7208 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7210 /* Close multi and leading connections
7211 Completions for ramrods are collected in a synchronous way */
7212 for_each_nondefault_queue(bp, i)
7213 if (bnx2x_stop_multi(bp, i))
7216 rc = bnx2x_stop_leading(bp);
7218 BNX2X_ERR("Stop leading failed!\n");
7219 #ifdef BNX2X_STOP_ON_ERROR
7228 reset_code = bnx2x_fw_command(bp, reset_code);
7230 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7231 load_count[0], load_count[1], load_count[2]);
7233 load_count[1 + port]--;
7234 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7235 load_count[0], load_count[1], load_count[2]);
7236 if (load_count[0] == 0)
7237 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7238 else if (load_count[1 + port] == 0)
7239 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7241 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7244 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7245 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7246 bnx2x__link_reset(bp);
7248 /* Reset the chip */
7249 bnx2x_reset_chip(bp, reset_code);
7251 /* Report UNLOAD_DONE to MCP */
7253 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7256 /* Free SKBs, SGEs, TPA pool and driver internals */
7257 bnx2x_free_skbs(bp);
7258 for_each_rx_queue(bp, i)
7259 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7260 for_each_rx_queue(bp, i)
7261 netif_napi_del(&bnx2x_fp(bp, i, napi));
7264 bp->state = BNX2X_STATE_CLOSED;
7266 netif_carrier_off(bp->dev);
7271 static void bnx2x_reset_task(struct work_struct *work)
7273 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7275 #ifdef BNX2X_STOP_ON_ERROR
7276 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7277 " so reset not done to allow debug dump,\n"
7278 KERN_ERR " you will need to reboot when done\n");
7284 if (!netif_running(bp->dev))
7285 goto reset_task_exit;
7287 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7288 bnx2x_nic_load(bp, LOAD_NORMAL);
7294 /* end of nic load/unload */
7299 * Init service functions
7302 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7305 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7306 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7307 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7308 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7309 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7310 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7311 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7312 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7314 BNX2X_ERR("Unsupported function index: %d\n", func);
7319 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7321 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7323 /* Flush all outstanding writes */
7326 /* Pretend to be function 0 */
7328 /* Flush the GRC transaction (in the chip) */
7329 new_val = REG_RD(bp, reg);
7331 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7336 /* From now we are in the "like-E1" mode */
7337 bnx2x_int_disable(bp);
7339 /* Flush all outstanding writes */
7342 /* Restore the original funtion settings */
7343 REG_WR(bp, reg, orig_func);
7344 new_val = REG_RD(bp, reg);
7345 if (new_val != orig_func) {
7346 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7347 orig_func, new_val);
7352 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7354 if (CHIP_IS_E1H(bp))
7355 bnx2x_undi_int_disable_e1h(bp, func);
7357 bnx2x_int_disable(bp);
7360 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7364 /* Check if there is any driver already loaded */
7365 val = REG_RD(bp, MISC_REG_UNPREPARED);
7367 /* Check if it is the UNDI driver
7368 * UNDI driver initializes CID offset for normal bell to 0x7
7370 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7371 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7373 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7375 int func = BP_FUNC(bp);
7379 /* clear the UNDI indication */
7380 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7382 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7384 /* try unload UNDI on port 0 */
7387 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7388 DRV_MSG_SEQ_NUMBER_MASK);
7389 reset_code = bnx2x_fw_command(bp, reset_code);
7391 /* if UNDI is loaded on the other port */
7392 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7394 /* send "DONE" for previous unload */
7395 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7397 /* unload UNDI on port 1 */
7400 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7401 DRV_MSG_SEQ_NUMBER_MASK);
7402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7404 bnx2x_fw_command(bp, reset_code);
7407 /* now it's safe to release the lock */
7408 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7410 bnx2x_undi_int_disable(bp, func);
7412 /* close input traffic and wait for it */
7413 /* Do not rcv packets to BRB */
7415 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7416 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7417 /* Do not direct rcv packets that are not for MCP to
7420 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7421 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7424 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7425 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7428 /* save NIG port swap info */
7429 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7430 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7433 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7436 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7438 /* take the NIG out of reset and restore swap values */
7440 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7441 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7442 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7443 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7445 /* send unload done to the MCP */
7446 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7448 /* restore our func and fw_seq */
7451 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7452 DRV_MSG_SEQ_NUMBER_MASK);
7455 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7459 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7461 u32 val, val2, val3, val4, id;
7464 /* Get the chip revision id and number. */
7465 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7466 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7467 id = ((val & 0xffff) << 16);
7468 val = REG_RD(bp, MISC_REG_CHIP_REV);
7469 id |= ((val & 0xf) << 12);
7470 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7471 id |= ((val & 0xff) << 4);
7472 val = REG_RD(bp, MISC_REG_BOND_ID);
7474 bp->common.chip_id = id;
7475 bp->link_params.chip_id = bp->common.chip_id;
7476 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7478 val = (REG_RD(bp, 0x2874) & 0x55);
7479 if ((bp->common.chip_id & 0x1) ||
7480 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7481 bp->flags |= ONE_PORT_FLAG;
7482 BNX2X_DEV_INFO("single port device\n");
7485 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7486 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7487 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7488 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7489 bp->common.flash_size, bp->common.flash_size);
7491 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7492 bp->link_params.shmem_base = bp->common.shmem_base;
7493 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7495 if (!bp->common.shmem_base ||
7496 (bp->common.shmem_base < 0xA0000) ||
7497 (bp->common.shmem_base >= 0xC0000)) {
7498 BNX2X_DEV_INFO("MCP not active\n");
7499 bp->flags |= NO_MCP_FLAG;
7503 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7504 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7505 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7506 BNX2X_ERR("BAD MCP validity signature\n");
7508 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7509 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7511 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7512 SHARED_HW_CFG_LED_MODE_MASK) >>
7513 SHARED_HW_CFG_LED_MODE_SHIFT);
7515 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7516 bp->common.bc_ver = val;
7517 BNX2X_DEV_INFO("bc_ver %X\n", val);
7518 if (val < BNX2X_BC_VER) {
7519 /* for now only warn
7520 * later we might need to enforce this */
7521 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7522 " please upgrade BC\n", BNX2X_BC_VER, val);
7525 if (BP_E1HVN(bp) == 0) {
7526 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7527 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7529 /* no WOL capability for E1HVN != 0 */
7530 bp->flags |= NO_WOL_FLAG;
7532 BNX2X_DEV_INFO("%sWoL capable\n",
7533 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7535 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7536 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7537 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7538 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7540 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7541 val, val2, val3, val4);
7544 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7547 int port = BP_PORT(bp);
7550 switch (switch_cfg) {
7552 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7555 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7556 switch (ext_phy_type) {
7557 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7558 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7561 bp->port.supported |= (SUPPORTED_10baseT_Half |
7562 SUPPORTED_10baseT_Full |
7563 SUPPORTED_100baseT_Half |
7564 SUPPORTED_100baseT_Full |
7565 SUPPORTED_1000baseT_Full |
7566 SUPPORTED_2500baseX_Full |
7571 SUPPORTED_Asym_Pause);
7574 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7575 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7578 bp->port.supported |= (SUPPORTED_10baseT_Half |
7579 SUPPORTED_10baseT_Full |
7580 SUPPORTED_100baseT_Half |
7581 SUPPORTED_100baseT_Full |
7582 SUPPORTED_1000baseT_Full |
7587 SUPPORTED_Asym_Pause);
7591 BNX2X_ERR("NVRAM config error. "
7592 "BAD SerDes ext_phy_config 0x%x\n",
7593 bp->link_params.ext_phy_config);
7597 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7599 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7602 case SWITCH_CFG_10G:
7603 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7606 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7607 switch (ext_phy_type) {
7608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7609 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7612 bp->port.supported |= (SUPPORTED_10baseT_Half |
7613 SUPPORTED_10baseT_Full |
7614 SUPPORTED_100baseT_Half |
7615 SUPPORTED_100baseT_Full |
7616 SUPPORTED_1000baseT_Full |
7617 SUPPORTED_2500baseX_Full |
7618 SUPPORTED_10000baseT_Full |
7623 SUPPORTED_Asym_Pause);
7626 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7627 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7630 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7633 SUPPORTED_Asym_Pause);
7636 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7637 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7640 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7641 SUPPORTED_1000baseT_Full |
7644 SUPPORTED_Asym_Pause);
7647 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7648 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7651 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7652 SUPPORTED_1000baseT_Full |
7656 SUPPORTED_Asym_Pause);
7659 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7660 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7663 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7664 SUPPORTED_2500baseX_Full |
7665 SUPPORTED_1000baseT_Full |
7669 SUPPORTED_Asym_Pause);
7672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7673 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7676 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7680 SUPPORTED_Asym_Pause);
7683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7684 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7685 bp->link_params.ext_phy_config);
7689 BNX2X_ERR("NVRAM config error. "
7690 "BAD XGXS ext_phy_config 0x%x\n",
7691 bp->link_params.ext_phy_config);
7695 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7697 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7702 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7703 bp->port.link_config);
7706 bp->link_params.phy_addr = bp->port.phy_addr;
7708 /* mask what we support according to speed_cap_mask */
7709 if (!(bp->link_params.speed_cap_mask &
7710 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7711 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7713 if (!(bp->link_params.speed_cap_mask &
7714 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7715 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7717 if (!(bp->link_params.speed_cap_mask &
7718 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7719 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7721 if (!(bp->link_params.speed_cap_mask &
7722 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7723 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7725 if (!(bp->link_params.speed_cap_mask &
7726 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7727 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7728 SUPPORTED_1000baseT_Full);
7730 if (!(bp->link_params.speed_cap_mask &
7731 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7732 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7734 if (!(bp->link_params.speed_cap_mask &
7735 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7736 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7738 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7741 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7743 bp->link_params.req_duplex = DUPLEX_FULL;
7745 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7746 case PORT_FEATURE_LINK_SPEED_AUTO:
7747 if (bp->port.supported & SUPPORTED_Autoneg) {
7748 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7749 bp->port.advertising = bp->port.supported;
7752 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7754 if ((ext_phy_type ==
7755 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7757 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7758 /* force 10G, no AN */
7759 bp->link_params.req_line_speed = SPEED_10000;
7760 bp->port.advertising =
7761 (ADVERTISED_10000baseT_Full |
7765 BNX2X_ERR("NVRAM config error. "
7766 "Invalid link_config 0x%x"
7767 " Autoneg not supported\n",
7768 bp->port.link_config);
7773 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7774 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7775 bp->link_params.req_line_speed = SPEED_10;
7776 bp->port.advertising = (ADVERTISED_10baseT_Full |
7779 BNX2X_ERR("NVRAM config error. "
7780 "Invalid link_config 0x%x"
7781 " speed_cap_mask 0x%x\n",
7782 bp->port.link_config,
7783 bp->link_params.speed_cap_mask);
7788 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7789 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7790 bp->link_params.req_line_speed = SPEED_10;
7791 bp->link_params.req_duplex = DUPLEX_HALF;
7792 bp->port.advertising = (ADVERTISED_10baseT_Half |
7795 BNX2X_ERR("NVRAM config error. "
7796 "Invalid link_config 0x%x"
7797 " speed_cap_mask 0x%x\n",
7798 bp->port.link_config,
7799 bp->link_params.speed_cap_mask);
7804 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7805 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7806 bp->link_params.req_line_speed = SPEED_100;
7807 bp->port.advertising = (ADVERTISED_100baseT_Full |
7810 BNX2X_ERR("NVRAM config error. "
7811 "Invalid link_config 0x%x"
7812 " speed_cap_mask 0x%x\n",
7813 bp->port.link_config,
7814 bp->link_params.speed_cap_mask);
7819 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7820 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7821 bp->link_params.req_line_speed = SPEED_100;
7822 bp->link_params.req_duplex = DUPLEX_HALF;
7823 bp->port.advertising = (ADVERTISED_100baseT_Half |
7826 BNX2X_ERR("NVRAM config error. "
7827 "Invalid link_config 0x%x"
7828 " speed_cap_mask 0x%x\n",
7829 bp->port.link_config,
7830 bp->link_params.speed_cap_mask);
7835 case PORT_FEATURE_LINK_SPEED_1G:
7836 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7837 bp->link_params.req_line_speed = SPEED_1000;
7838 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7841 BNX2X_ERR("NVRAM config error. "
7842 "Invalid link_config 0x%x"
7843 " speed_cap_mask 0x%x\n",
7844 bp->port.link_config,
7845 bp->link_params.speed_cap_mask);
7850 case PORT_FEATURE_LINK_SPEED_2_5G:
7851 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7852 bp->link_params.req_line_speed = SPEED_2500;
7853 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7856 BNX2X_ERR("NVRAM config error. "
7857 "Invalid link_config 0x%x"
7858 " speed_cap_mask 0x%x\n",
7859 bp->port.link_config,
7860 bp->link_params.speed_cap_mask);
7865 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7866 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7867 case PORT_FEATURE_LINK_SPEED_10G_KR:
7868 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7869 bp->link_params.req_line_speed = SPEED_10000;
7870 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7873 BNX2X_ERR("NVRAM config error. "
7874 "Invalid link_config 0x%x"
7875 " speed_cap_mask 0x%x\n",
7876 bp->port.link_config,
7877 bp->link_params.speed_cap_mask);
7883 BNX2X_ERR("NVRAM config error. "
7884 "BAD link speed link_config 0x%x\n",
7885 bp->port.link_config);
7886 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7887 bp->port.advertising = bp->port.supported;
7891 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7892 PORT_FEATURE_FLOW_CONTROL_MASK);
7893 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7894 !(bp->port.supported & SUPPORTED_Autoneg))
7895 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7897 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7898 " advertising 0x%x\n",
7899 bp->link_params.req_line_speed,
7900 bp->link_params.req_duplex,
7901 bp->link_params.req_flow_ctrl, bp->port.advertising);
7904 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7906 int port = BP_PORT(bp);
7909 bp->link_params.bp = bp;
7910 bp->link_params.port = port;
7912 bp->link_params.serdes_config =
7913 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7914 bp->link_params.lane_config =
7915 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7916 bp->link_params.ext_phy_config =
7918 dev_info.port_hw_config[port].external_phy_config);
7919 bp->link_params.speed_cap_mask =
7921 dev_info.port_hw_config[port].speed_capability_mask);
7923 bp->port.link_config =
7924 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7926 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7927 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7928 " link_config 0x%08x\n",
7929 bp->link_params.serdes_config,
7930 bp->link_params.lane_config,
7931 bp->link_params.ext_phy_config,
7932 bp->link_params.speed_cap_mask, bp->port.link_config);
7934 bp->link_params.switch_cfg = (bp->port.link_config &
7935 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7936 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7938 bnx2x_link_settings_requested(bp);
7940 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7941 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7942 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7943 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7944 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7945 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7946 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7947 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7948 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7949 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7952 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7954 int func = BP_FUNC(bp);
7958 bnx2x_get_common_hwinfo(bp);
7962 if (CHIP_IS_E1H(bp)) {
7964 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7966 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7967 FUNC_MF_CFG_E1HOV_TAG_MASK);
7968 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7972 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7974 func, bp->e1hov, bp->e1hov);
7976 BNX2X_DEV_INFO("Single function mode\n");
7978 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7979 " aborting\n", func);
7985 if (!BP_NOMCP(bp)) {
7986 bnx2x_get_port_hwinfo(bp);
7988 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7989 DRV_MSG_SEQ_NUMBER_MASK);
7990 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7994 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7995 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7996 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7997 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7998 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7999 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8000 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8001 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8002 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8003 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8004 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8006 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8014 /* only supposed to happen on emulation/FPGA */
8015 BNX2X_ERR("warning random MAC workaround active\n");
8016 random_ether_addr(bp->dev->dev_addr);
8017 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8023 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8025 int func = BP_FUNC(bp);
8029 /* Disable interrupt handling until HW is initialized */
8030 atomic_set(&bp->intr_sem, 1);
8032 mutex_init(&bp->port.phy_mutex);
8034 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8035 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8037 rc = bnx2x_get_hwinfo(bp);
8039 /* need to reset chip if undi was active */
8041 bnx2x_undi_unload(bp);
8043 if (CHIP_REV_IS_FPGA(bp))
8044 printk(KERN_ERR PFX "FPGA detected\n");
8046 if (BP_NOMCP(bp) && (func == 0))
8048 "MCP disabled, must load devices in order!\n");
8050 /* Set multi queue mode */
8051 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8052 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8054 "Multi disabled since int_mode requested is not MSI-X\n");
8055 multi_mode = ETH_RSS_MODE_DISABLED;
8057 bp->multi_mode = multi_mode;
8062 bp->flags &= ~TPA_ENABLE_FLAG;
8063 bp->dev->features &= ~NETIF_F_LRO;
8065 bp->flags |= TPA_ENABLE_FLAG;
8066 bp->dev->features |= NETIF_F_LRO;
8070 bp->tx_ring_size = MAX_TX_AVAIL;
8071 bp->rx_ring_size = MAX_RX_AVAIL;
8078 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8079 bp->current_interval = (poll ? poll : timer_interval);
8081 init_timer(&bp->timer);
8082 bp->timer.expires = jiffies + bp->current_interval;
8083 bp->timer.data = (unsigned long) bp;
8084 bp->timer.function = bnx2x_timer;
8090 * ethtool service functions
8093 /* All ethtool functions called with rtnl_lock */
8095 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8097 struct bnx2x *bp = netdev_priv(dev);
8099 cmd->supported = bp->port.supported;
8100 cmd->advertising = bp->port.advertising;
8102 if (netif_carrier_ok(dev)) {
8103 cmd->speed = bp->link_vars.line_speed;
8104 cmd->duplex = bp->link_vars.duplex;
8106 cmd->speed = bp->link_params.req_line_speed;
8107 cmd->duplex = bp->link_params.req_duplex;
8112 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8113 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8114 if (vn_max_rate < cmd->speed)
8115 cmd->speed = vn_max_rate;
8118 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8120 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8122 switch (ext_phy_type) {
8123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8126 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8127 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8128 cmd->port = PORT_FIBRE;
8131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8132 cmd->port = PORT_TP;
8135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8136 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8137 bp->link_params.ext_phy_config);
8141 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8142 bp->link_params.ext_phy_config);
8146 cmd->port = PORT_TP;
8148 cmd->phy_address = bp->port.phy_addr;
8149 cmd->transceiver = XCVR_INTERNAL;
8151 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8152 cmd->autoneg = AUTONEG_ENABLE;
8154 cmd->autoneg = AUTONEG_DISABLE;
8159 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8160 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8161 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8162 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8163 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8164 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8165 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8170 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8172 struct bnx2x *bp = netdev_priv(dev);
8178 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8179 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8180 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8181 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8182 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8183 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8184 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8186 if (cmd->autoneg == AUTONEG_ENABLE) {
8187 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8188 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8192 /* advertise the requested speed and duplex if supported */
8193 cmd->advertising &= bp->port.supported;
8195 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8196 bp->link_params.req_duplex = DUPLEX_FULL;
8197 bp->port.advertising |= (ADVERTISED_Autoneg |
8200 } else { /* forced speed */
8201 /* advertise the requested speed and duplex if supported */
8202 switch (cmd->speed) {
8204 if (cmd->duplex == DUPLEX_FULL) {
8205 if (!(bp->port.supported &
8206 SUPPORTED_10baseT_Full)) {
8208 "10M full not supported\n");
8212 advertising = (ADVERTISED_10baseT_Full |
8215 if (!(bp->port.supported &
8216 SUPPORTED_10baseT_Half)) {
8218 "10M half not supported\n");
8222 advertising = (ADVERTISED_10baseT_Half |
8228 if (cmd->duplex == DUPLEX_FULL) {
8229 if (!(bp->port.supported &
8230 SUPPORTED_100baseT_Full)) {
8232 "100M full not supported\n");
8236 advertising = (ADVERTISED_100baseT_Full |
8239 if (!(bp->port.supported &
8240 SUPPORTED_100baseT_Half)) {
8242 "100M half not supported\n");
8246 advertising = (ADVERTISED_100baseT_Half |
8252 if (cmd->duplex != DUPLEX_FULL) {
8253 DP(NETIF_MSG_LINK, "1G half not supported\n");
8257 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8258 DP(NETIF_MSG_LINK, "1G full not supported\n");
8262 advertising = (ADVERTISED_1000baseT_Full |
8267 if (cmd->duplex != DUPLEX_FULL) {
8269 "2.5G half not supported\n");
8273 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8275 "2.5G full not supported\n");
8279 advertising = (ADVERTISED_2500baseX_Full |
8284 if (cmd->duplex != DUPLEX_FULL) {
8285 DP(NETIF_MSG_LINK, "10G half not supported\n");
8289 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8290 DP(NETIF_MSG_LINK, "10G full not supported\n");
8294 advertising = (ADVERTISED_10000baseT_Full |
8299 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8303 bp->link_params.req_line_speed = cmd->speed;
8304 bp->link_params.req_duplex = cmd->duplex;
8305 bp->port.advertising = advertising;
8308 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8309 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8310 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8311 bp->port.advertising);
8313 if (netif_running(dev)) {
8314 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8321 #define PHY_FW_VER_LEN 10
8323 static void bnx2x_get_drvinfo(struct net_device *dev,
8324 struct ethtool_drvinfo *info)
8326 struct bnx2x *bp = netdev_priv(dev);
8327 u8 phy_fw_ver[PHY_FW_VER_LEN];
8329 strcpy(info->driver, DRV_MODULE_NAME);
8330 strcpy(info->version, DRV_MODULE_VERSION);
8332 phy_fw_ver[0] = '\0';
8334 bnx2x_acquire_phy_lock(bp);
8335 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8336 (bp->state != BNX2X_STATE_CLOSED),
8337 phy_fw_ver, PHY_FW_VER_LEN);
8338 bnx2x_release_phy_lock(bp);
8341 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8342 (bp->common.bc_ver & 0xff0000) >> 16,
8343 (bp->common.bc_ver & 0xff00) >> 8,
8344 (bp->common.bc_ver & 0xff),
8345 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8346 strcpy(info->bus_info, pci_name(bp->pdev));
8347 info->n_stats = BNX2X_NUM_STATS;
8348 info->testinfo_len = BNX2X_NUM_TESTS;
8349 info->eedump_len = bp->common.flash_size;
8350 info->regdump_len = 0;
8353 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8355 struct bnx2x *bp = netdev_priv(dev);
8357 if (bp->flags & NO_WOL_FLAG) {
8361 wol->supported = WAKE_MAGIC;
8363 wol->wolopts = WAKE_MAGIC;
8367 memset(&wol->sopass, 0, sizeof(wol->sopass));
8370 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8372 struct bnx2x *bp = netdev_priv(dev);
8374 if (wol->wolopts & ~WAKE_MAGIC)
8377 if (wol->wolopts & WAKE_MAGIC) {
8378 if (bp->flags & NO_WOL_FLAG)
8388 static u32 bnx2x_get_msglevel(struct net_device *dev)
8390 struct bnx2x *bp = netdev_priv(dev);
8392 return bp->msglevel;
8395 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8397 struct bnx2x *bp = netdev_priv(dev);
8399 if (capable(CAP_NET_ADMIN))
8400 bp->msglevel = level;
8403 static int bnx2x_nway_reset(struct net_device *dev)
8405 struct bnx2x *bp = netdev_priv(dev);
8410 if (netif_running(dev)) {
8411 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8418 static int bnx2x_get_eeprom_len(struct net_device *dev)
8420 struct bnx2x *bp = netdev_priv(dev);
8422 return bp->common.flash_size;
8425 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8427 int port = BP_PORT(bp);
8431 /* adjust timeout for emulation/FPGA */
8432 count = NVRAM_TIMEOUT_COUNT;
8433 if (CHIP_REV_IS_SLOW(bp))
8436 /* request access to nvram interface */
8437 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8438 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8440 for (i = 0; i < count*10; i++) {
8441 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8442 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8448 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8449 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8456 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8458 int port = BP_PORT(bp);
8462 /* adjust timeout for emulation/FPGA */
8463 count = NVRAM_TIMEOUT_COUNT;
8464 if (CHIP_REV_IS_SLOW(bp))
8467 /* relinquish nvram interface */
8468 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8469 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8471 for (i = 0; i < count*10; i++) {
8472 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8473 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8479 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8480 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8487 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8491 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8493 /* enable both bits, even on read */
8494 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8495 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8496 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8499 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8503 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8505 /* disable both bits, even after read */
8506 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8507 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8508 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8511 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8517 /* build the command word */
8518 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8520 /* need to clear DONE bit separately */
8521 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8523 /* address of the NVRAM to read from */
8524 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8525 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8527 /* issue a read command */
8528 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8530 /* adjust timeout for emulation/FPGA */
8531 count = NVRAM_TIMEOUT_COUNT;
8532 if (CHIP_REV_IS_SLOW(bp))
8535 /* wait for completion */
8538 for (i = 0; i < count; i++) {
8540 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8542 if (val & MCPR_NVM_COMMAND_DONE) {
8543 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8544 /* we read nvram data in cpu order
8545 * but ethtool sees it as an array of bytes
8546 * converting to big-endian will do the work */
8547 val = cpu_to_be32(val);
8557 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8564 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8566 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8571 if (offset + buf_size > bp->common.flash_size) {
8572 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8573 " buf_size (0x%x) > flash_size (0x%x)\n",
8574 offset, buf_size, bp->common.flash_size);
8578 /* request access to nvram interface */
8579 rc = bnx2x_acquire_nvram_lock(bp);
8583 /* enable access to nvram interface */
8584 bnx2x_enable_nvram_access(bp);
8586 /* read the first word(s) */
8587 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8588 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8589 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8590 memcpy(ret_buf, &val, 4);
8592 /* advance to the next dword */
8593 offset += sizeof(u32);
8594 ret_buf += sizeof(u32);
8595 buf_size -= sizeof(u32);
8600 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8601 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8602 memcpy(ret_buf, &val, 4);
8605 /* disable access to nvram interface */
8606 bnx2x_disable_nvram_access(bp);
8607 bnx2x_release_nvram_lock(bp);
8612 static int bnx2x_get_eeprom(struct net_device *dev,
8613 struct ethtool_eeprom *eeprom, u8 *eebuf)
8615 struct bnx2x *bp = netdev_priv(dev);
8618 if (!netif_running(dev))
8621 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8622 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8623 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8624 eeprom->len, eeprom->len);
8626 /* parameters already validated in ethtool_get_eeprom */
8628 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8633 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8638 /* build the command word */
8639 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8641 /* need to clear DONE bit separately */
8642 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8644 /* write the data */
8645 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8647 /* address of the NVRAM to write to */
8648 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8649 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8651 /* issue the write command */
8652 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8654 /* adjust timeout for emulation/FPGA */
8655 count = NVRAM_TIMEOUT_COUNT;
8656 if (CHIP_REV_IS_SLOW(bp))
8659 /* wait for completion */
8661 for (i = 0; i < count; i++) {
8663 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8664 if (val & MCPR_NVM_COMMAND_DONE) {
8673 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8675 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8683 if (offset + buf_size > bp->common.flash_size) {
8684 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8685 " buf_size (0x%x) > flash_size (0x%x)\n",
8686 offset, buf_size, bp->common.flash_size);
8690 /* request access to nvram interface */
8691 rc = bnx2x_acquire_nvram_lock(bp);
8695 /* enable access to nvram interface */
8696 bnx2x_enable_nvram_access(bp);
8698 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8699 align_offset = (offset & ~0x03);
8700 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8703 val &= ~(0xff << BYTE_OFFSET(offset));
8704 val |= (*data_buf << BYTE_OFFSET(offset));
8706 /* nvram data is returned as an array of bytes
8707 * convert it back to cpu order */
8708 val = be32_to_cpu(val);
8710 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8714 /* disable access to nvram interface */
8715 bnx2x_disable_nvram_access(bp);
8716 bnx2x_release_nvram_lock(bp);
8721 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8729 if (buf_size == 1) /* ethtool */
8730 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8732 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8734 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8739 if (offset + buf_size > bp->common.flash_size) {
8740 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8741 " buf_size (0x%x) > flash_size (0x%x)\n",
8742 offset, buf_size, bp->common.flash_size);
8746 /* request access to nvram interface */
8747 rc = bnx2x_acquire_nvram_lock(bp);
8751 /* enable access to nvram interface */
8752 bnx2x_enable_nvram_access(bp);
8755 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8756 while ((written_so_far < buf_size) && (rc == 0)) {
8757 if (written_so_far == (buf_size - sizeof(u32)))
8758 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8759 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8760 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8761 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8762 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8764 memcpy(&val, data_buf, 4);
8766 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8768 /* advance to the next dword */
8769 offset += sizeof(u32);
8770 data_buf += sizeof(u32);
8771 written_so_far += sizeof(u32);
8775 /* disable access to nvram interface */
8776 bnx2x_disable_nvram_access(bp);
8777 bnx2x_release_nvram_lock(bp);
8782 static int bnx2x_set_eeprom(struct net_device *dev,
8783 struct ethtool_eeprom *eeprom, u8 *eebuf)
8785 struct bnx2x *bp = netdev_priv(dev);
8788 if (!netif_running(dev))
8791 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8792 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8793 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8794 eeprom->len, eeprom->len);
8796 /* parameters already validated in ethtool_set_eeprom */
8798 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8799 if (eeprom->magic == 0x00504859)
8802 bnx2x_acquire_phy_lock(bp);
8803 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8804 bp->link_params.ext_phy_config,
8805 (bp->state != BNX2X_STATE_CLOSED),
8806 eebuf, eeprom->len);
8807 if ((bp->state == BNX2X_STATE_OPEN) ||
8808 (bp->state == BNX2X_STATE_DISABLED)) {
8809 rc |= bnx2x_link_reset(&bp->link_params,
8811 rc |= bnx2x_phy_init(&bp->link_params,
8814 bnx2x_release_phy_lock(bp);
8816 } else /* Only the PMF can access the PHY */
8819 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8824 static int bnx2x_get_coalesce(struct net_device *dev,
8825 struct ethtool_coalesce *coal)
8827 struct bnx2x *bp = netdev_priv(dev);
8829 memset(coal, 0, sizeof(struct ethtool_coalesce));
8831 coal->rx_coalesce_usecs = bp->rx_ticks;
8832 coal->tx_coalesce_usecs = bp->tx_ticks;
8837 static int bnx2x_set_coalesce(struct net_device *dev,
8838 struct ethtool_coalesce *coal)
8840 struct bnx2x *bp = netdev_priv(dev);
8842 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8843 if (bp->rx_ticks > 3000)
8844 bp->rx_ticks = 3000;
8846 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8847 if (bp->tx_ticks > 0x3000)
8848 bp->tx_ticks = 0x3000;
8850 if (netif_running(dev))
8851 bnx2x_update_coalesce(bp);
8856 static void bnx2x_get_ringparam(struct net_device *dev,
8857 struct ethtool_ringparam *ering)
8859 struct bnx2x *bp = netdev_priv(dev);
8861 ering->rx_max_pending = MAX_RX_AVAIL;
8862 ering->rx_mini_max_pending = 0;
8863 ering->rx_jumbo_max_pending = 0;
8865 ering->rx_pending = bp->rx_ring_size;
8866 ering->rx_mini_pending = 0;
8867 ering->rx_jumbo_pending = 0;
8869 ering->tx_max_pending = MAX_TX_AVAIL;
8870 ering->tx_pending = bp->tx_ring_size;
8873 static int bnx2x_set_ringparam(struct net_device *dev,
8874 struct ethtool_ringparam *ering)
8876 struct bnx2x *bp = netdev_priv(dev);
8879 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8880 (ering->tx_pending > MAX_TX_AVAIL) ||
8881 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8884 bp->rx_ring_size = ering->rx_pending;
8885 bp->tx_ring_size = ering->tx_pending;
8887 if (netif_running(dev)) {
8888 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8889 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8895 static void bnx2x_get_pauseparam(struct net_device *dev,
8896 struct ethtool_pauseparam *epause)
8898 struct bnx2x *bp = netdev_priv(dev);
8900 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8904 BNX2X_FLOW_CTRL_RX);
8905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8906 BNX2X_FLOW_CTRL_TX);
8908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8913 static int bnx2x_set_pauseparam(struct net_device *dev,
8914 struct ethtool_pauseparam *epause)
8916 struct bnx2x *bp = netdev_priv(dev);
8921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8927 if (epause->rx_pause)
8928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8930 if (epause->tx_pause)
8931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8936 if (epause->autoneg) {
8937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8949 if (netif_running(dev)) {
8950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8959 struct bnx2x *bp = netdev_priv(dev);
8963 /* TPA requires Rx CSUM offloading */
8964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8965 if (!(dev->features & NETIF_F_LRO)) {
8966 dev->features |= NETIF_F_LRO;
8967 bp->flags |= TPA_ENABLE_FLAG;
8971 } else if (dev->features & NETIF_F_LRO) {
8972 dev->features &= ~NETIF_F_LRO;
8973 bp->flags &= ~TPA_ENABLE_FLAG;
8977 if (changed && netif_running(dev)) {
8978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8987 struct bnx2x *bp = netdev_priv(dev);
8992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8994 struct bnx2x *bp = netdev_priv(dev);
8999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9000 TPA'ed packets will be discarded due to wrong TCP CSUM */
9002 u32 flags = ethtool_op_get_flags(dev);
9004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9014 dev->features |= NETIF_F_TSO6;
9016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9017 dev->features &= ~NETIF_F_TSO6;
9023 static const struct {
9024 char string[ETH_GSTRING_LEN];
9025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9026 { "register_test (offline)" },
9027 { "memory_test (offline)" },
9028 { "loopback_test (offline)" },
9029 { "nvram_test (online)" },
9030 { "interrupt_test (online)" },
9031 { "link_test (online)" },
9032 { "idle check (online)" }
9035 static int bnx2x_self_test_count(struct net_device *dev)
9037 return BNX2X_NUM_TESTS;
9040 static int bnx2x_test_registers(struct bnx2x *bp)
9042 int idx, i, rc = -ENODEV;
9044 int port = BP_PORT(bp);
9045 static const struct {
9050 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9051 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9052 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9053 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9054 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9055 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9056 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9057 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9058 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9059 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9060 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9061 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9062 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9063 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9064 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9065 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9066 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9067 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9068 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9069 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9070 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9071 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9072 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9073 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9074 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9075 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9076 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9077 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9078 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9079 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9080 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9081 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9082 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9083 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9084 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9085 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9086 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9087 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9089 { 0xffffffff, 0, 0x00000000 }
9092 if (!netif_running(bp->dev))
9095 /* Repeat the test twice:
9096 First by writing 0x00000000, second by writing 0xffffffff */
9097 for (idx = 0; idx < 2; idx++) {
9104 wr_val = 0xffffffff;
9108 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9109 u32 offset, mask, save_val, val;
9111 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9112 mask = reg_tbl[i].mask;
9114 save_val = REG_RD(bp, offset);
9116 REG_WR(bp, offset, wr_val);
9117 val = REG_RD(bp, offset);
9119 /* Restore the original register's value */
9120 REG_WR(bp, offset, save_val);
9122 /* verify that value is as expected value */
9123 if ((val & mask) != (wr_val & mask))
9134 static int bnx2x_test_memory(struct bnx2x *bp)
9136 int i, j, rc = -ENODEV;
9138 static const struct {
9142 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9143 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9144 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9145 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9146 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9147 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9148 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9152 static const struct {
9158 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9159 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9160 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9161 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9162 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9163 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9165 { NULL, 0xffffffff, 0, 0 }
9168 if (!netif_running(bp->dev))
9171 /* Go through all the memories */
9172 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9173 for (j = 0; j < mem_tbl[i].size; j++)
9174 REG_RD(bp, mem_tbl[i].offset + j*4);
9176 /* Check the parity status */
9177 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9178 val = REG_RD(bp, prty_tbl[i].offset);
9179 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9180 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9182 "%s is 0x%x\n", prty_tbl[i].name, val);
9193 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9198 while (bnx2x_link_test(bp) && cnt--)
9202 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9204 unsigned int pkt_size, num_pkts, i;
9205 struct sk_buff *skb;
9206 unsigned char *packet;
9207 struct bnx2x_fastpath *fp = &bp->fp[0];
9208 u16 tx_start_idx, tx_idx;
9209 u16 rx_start_idx, rx_idx;
9211 struct sw_tx_bd *tx_buf;
9212 struct eth_tx_bd *tx_bd;
9214 union eth_rx_cqe *cqe;
9216 struct sw_rx_bd *rx_buf;
9220 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9221 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9222 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9224 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9226 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9227 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9228 /* wait until link state is restored */
9230 while (cnt-- && bnx2x_test_link(&bp->link_params,
9237 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9240 goto test_loopback_exit;
9242 packet = skb_put(skb, pkt_size);
9243 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9244 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9245 for (i = ETH_HLEN; i < pkt_size; i++)
9246 packet[i] = (unsigned char) (i & 0xff);
9249 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9250 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9252 pkt_prod = fp->tx_pkt_prod++;
9253 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9254 tx_buf->first_bd = fp->tx_bd_prod;
9257 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9258 mapping = pci_map_single(bp->pdev, skb->data,
9259 skb_headlen(skb), PCI_DMA_TODEVICE);
9260 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9261 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9262 tx_bd->nbd = cpu_to_le16(1);
9263 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9264 tx_bd->vlan = cpu_to_le16(pkt_prod);
9265 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9266 ETH_TX_BD_FLAGS_END_BD);
9267 tx_bd->general_data = ((UNICAST_ADDRESS <<
9268 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9272 fp->hw_tx_prods->bds_prod =
9273 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9274 mb(); /* FW restriction: must not reorder writing nbd and packets */
9275 fp->hw_tx_prods->packets_prod =
9276 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9277 DOORBELL(bp, FP_IDX(fp), 0);
9283 bp->dev->trans_start = jiffies;
9287 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9288 if (tx_idx != tx_start_idx + num_pkts)
9289 goto test_loopback_exit;
9291 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9292 if (rx_idx != rx_start_idx + num_pkts)
9293 goto test_loopback_exit;
9295 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9296 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9297 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9298 goto test_loopback_rx_exit;
9300 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9301 if (len != pkt_size)
9302 goto test_loopback_rx_exit;
9304 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9306 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9307 for (i = ETH_HLEN; i < pkt_size; i++)
9308 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9309 goto test_loopback_rx_exit;
9313 test_loopback_rx_exit:
9315 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9316 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9317 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9318 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9320 /* Update producers */
9321 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9325 bp->link_params.loopback_mode = LOOPBACK_NONE;
9330 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9334 if (!netif_running(bp->dev))
9335 return BNX2X_LOOPBACK_FAILED;
9337 bnx2x_netif_stop(bp, 1);
9338 bnx2x_acquire_phy_lock(bp);
9340 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9341 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9342 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9345 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9346 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9347 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9350 bnx2x_release_phy_lock(bp);
9351 bnx2x_netif_start(bp);
9356 #define CRC32_RESIDUAL 0xdebb20e3
9358 static int bnx2x_test_nvram(struct bnx2x *bp)
9360 static const struct {
9364 { 0, 0x14 }, /* bootstrap */
9365 { 0x14, 0xec }, /* dir */
9366 { 0x100, 0x350 }, /* manuf_info */
9367 { 0x450, 0xf0 }, /* feature_info */
9368 { 0x640, 0x64 }, /* upgrade_key_info */
9370 { 0x708, 0x70 }, /* manuf_key_info */
9375 u8 *data = (u8 *)buf;
9379 rc = bnx2x_nvram_read(bp, 0, data, 4);
9381 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9382 goto test_nvram_exit;
9385 magic = be32_to_cpu(buf[0]);
9386 if (magic != 0x669955aa) {
9387 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9389 goto test_nvram_exit;
9392 for (i = 0; nvram_tbl[i].size; i++) {
9394 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9398 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9399 goto test_nvram_exit;
9402 csum = ether_crc_le(nvram_tbl[i].size, data);
9403 if (csum != CRC32_RESIDUAL) {
9405 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9407 goto test_nvram_exit;
9415 static int bnx2x_test_intr(struct bnx2x *bp)
9417 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9420 if (!netif_running(bp->dev))
9423 config->hdr.length = 0;
9425 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9427 config->hdr.offset = BP_FUNC(bp);
9428 config->hdr.client_id = BP_CL_ID(bp);
9429 config->hdr.reserved1 = 0;
9431 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9432 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9433 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9435 bp->set_mac_pending++;
9436 for (i = 0; i < 10; i++) {
9437 if (!bp->set_mac_pending)
9439 msleep_interruptible(10);
9448 static void bnx2x_self_test(struct net_device *dev,
9449 struct ethtool_test *etest, u64 *buf)
9451 struct bnx2x *bp = netdev_priv(dev);
9453 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9455 if (!netif_running(dev))
9458 /* offline tests are not supported in MF mode */
9460 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9462 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9465 link_up = bp->link_vars.link_up;
9466 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9467 bnx2x_nic_load(bp, LOAD_DIAG);
9468 /* wait until link state is restored */
9469 bnx2x_wait_for_link(bp, link_up);
9471 if (bnx2x_test_registers(bp) != 0) {
9473 etest->flags |= ETH_TEST_FL_FAILED;
9475 if (bnx2x_test_memory(bp) != 0) {
9477 etest->flags |= ETH_TEST_FL_FAILED;
9479 buf[2] = bnx2x_test_loopback(bp, link_up);
9481 etest->flags |= ETH_TEST_FL_FAILED;
9483 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9484 bnx2x_nic_load(bp, LOAD_NORMAL);
9485 /* wait until link state is restored */
9486 bnx2x_wait_for_link(bp, link_up);
9488 if (bnx2x_test_nvram(bp) != 0) {
9490 etest->flags |= ETH_TEST_FL_FAILED;
9492 if (bnx2x_test_intr(bp) != 0) {
9494 etest->flags |= ETH_TEST_FL_FAILED;
9497 if (bnx2x_link_test(bp) != 0) {
9499 etest->flags |= ETH_TEST_FL_FAILED;
9502 #ifdef BNX2X_EXTRA_DEBUG
9503 bnx2x_panic_dump(bp);
9507 static const struct {
9510 u8 string[ETH_GSTRING_LEN];
9511 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9512 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9513 { Q_STATS_OFFSET32(error_bytes_received_hi),
9514 8, "[%d]: rx_error_bytes" },
9515 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9516 8, "[%d]: rx_ucast_packets" },
9517 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9518 8, "[%d]: rx_mcast_packets" },
9519 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9520 8, "[%d]: rx_bcast_packets" },
9521 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9522 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9523 4, "[%d]: rx_phy_ip_err_discards"},
9524 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9525 4, "[%d]: rx_skb_alloc_discard" },
9526 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9528 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9529 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9530 8, "[%d]: tx_packets" }
9533 static const struct {
9537 #define STATS_FLAGS_PORT 1
9538 #define STATS_FLAGS_FUNC 2
9539 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9540 u8 string[ETH_GSTRING_LEN];
9541 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9542 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9543 8, STATS_FLAGS_BOTH, "rx_bytes" },
9544 { STATS_OFFSET32(error_bytes_received_hi),
9545 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9546 { STATS_OFFSET32(total_unicast_packets_received_hi),
9547 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9548 { STATS_OFFSET32(total_multicast_packets_received_hi),
9549 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9550 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9551 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9552 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9553 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9554 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9555 8, STATS_FLAGS_PORT, "rx_align_errors" },
9556 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9557 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9558 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9559 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9560 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9561 8, STATS_FLAGS_PORT, "rx_fragments" },
9562 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9563 8, STATS_FLAGS_PORT, "rx_jabbers" },
9564 { STATS_OFFSET32(no_buff_discard_hi),
9565 8, STATS_FLAGS_BOTH, "rx_discards" },
9566 { STATS_OFFSET32(mac_filter_discard),
9567 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9568 { STATS_OFFSET32(xxoverflow_discard),
9569 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9570 { STATS_OFFSET32(brb_drop_hi),
9571 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9572 { STATS_OFFSET32(brb_truncate_hi),
9573 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9574 { STATS_OFFSET32(pause_frames_received_hi),
9575 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9576 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9577 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9578 { STATS_OFFSET32(nig_timer_max),
9579 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9580 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9581 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9582 { STATS_OFFSET32(rx_skb_alloc_failed),
9583 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9584 { STATS_OFFSET32(hw_csum_err),
9585 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9587 { STATS_OFFSET32(total_bytes_transmitted_hi),
9588 8, STATS_FLAGS_BOTH, "tx_bytes" },
9589 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9590 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9591 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9592 8, STATS_FLAGS_BOTH, "tx_packets" },
9593 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9594 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9595 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9596 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9597 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9598 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9599 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9600 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9601 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9602 8, STATS_FLAGS_PORT, "tx_deferred" },
9603 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9604 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9605 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9606 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9607 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9608 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9609 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9610 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9611 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9612 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9613 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9614 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9615 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9616 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9617 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9618 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9619 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9620 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9621 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9622 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9623 { STATS_OFFSET32(pause_frames_sent_hi),
9624 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9627 #define IS_PORT_STAT(i) \
9628 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9629 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9630 #define IS_E1HMF_MODE_STAT(bp) \
9631 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9633 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9635 struct bnx2x *bp = netdev_priv(dev);
9638 switch (stringset) {
9642 for_each_queue(bp, i) {
9643 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9644 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9645 bnx2x_q_stats_arr[j].string, i);
9646 k += BNX2X_NUM_Q_STATS;
9648 if (IS_E1HMF_MODE_STAT(bp))
9650 for (j = 0; j < BNX2X_NUM_STATS; j++)
9651 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9652 bnx2x_stats_arr[j].string);
9654 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9655 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9657 strcpy(buf + j*ETH_GSTRING_LEN,
9658 bnx2x_stats_arr[i].string);
9665 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9670 static int bnx2x_get_stats_count(struct net_device *dev)
9672 struct bnx2x *bp = netdev_priv(dev);
9676 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9677 if (!IS_E1HMF_MODE_STAT(bp))
9678 num_stats += BNX2X_NUM_STATS;
9680 if (IS_E1HMF_MODE_STAT(bp)) {
9682 for (i = 0; i < BNX2X_NUM_STATS; i++)
9683 if (IS_FUNC_STAT(i))
9686 num_stats = BNX2X_NUM_STATS;
9692 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9693 struct ethtool_stats *stats, u64 *buf)
9695 struct bnx2x *bp = netdev_priv(dev);
9696 u32 *hw_stats, *offset;
9701 for_each_queue(bp, i) {
9702 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9704 if (bnx2x_q_stats_arr[j].size == 0) {
9705 /* skip this counter */
9709 offset = (hw_stats +
9710 bnx2x_q_stats_arr[j].offset);
9711 if (bnx2x_q_stats_arr[j].size == 4) {
9712 /* 4-byte counter */
9713 buf[k + j] = (u64) *offset;
9716 /* 8-byte counter */
9717 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9719 k += BNX2X_NUM_Q_STATS;
9721 if (IS_E1HMF_MODE_STAT(bp))
9723 hw_stats = (u32 *)&bp->eth_stats;
9724 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9725 if (bnx2x_stats_arr[j].size == 0) {
9726 /* skip this counter */
9730 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9731 if (bnx2x_stats_arr[j].size == 4) {
9732 /* 4-byte counter */
9733 buf[k + j] = (u64) *offset;
9736 /* 8-byte counter */
9737 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9740 hw_stats = (u32 *)&bp->eth_stats;
9741 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9742 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9744 if (bnx2x_stats_arr[i].size == 0) {
9745 /* skip this counter */
9750 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9751 if (bnx2x_stats_arr[i].size == 4) {
9752 /* 4-byte counter */
9753 buf[j] = (u64) *offset;
9757 /* 8-byte counter */
9758 buf[j] = HILO_U64(*offset, *(offset + 1));
9764 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9766 struct bnx2x *bp = netdev_priv(dev);
9767 int port = BP_PORT(bp);
9770 if (!netif_running(dev))
9779 for (i = 0; i < (data * 2); i++) {
9781 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9782 bp->link_params.hw_led_mode,
9783 bp->link_params.chip_id);
9785 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9786 bp->link_params.hw_led_mode,
9787 bp->link_params.chip_id);
9789 msleep_interruptible(500);
9790 if (signal_pending(current))
9794 if (bp->link_vars.link_up)
9795 bnx2x_set_led(bp, port, LED_MODE_OPER,
9796 bp->link_vars.line_speed,
9797 bp->link_params.hw_led_mode,
9798 bp->link_params.chip_id);
9803 static struct ethtool_ops bnx2x_ethtool_ops = {
9804 .get_settings = bnx2x_get_settings,
9805 .set_settings = bnx2x_set_settings,
9806 .get_drvinfo = bnx2x_get_drvinfo,
9807 .get_wol = bnx2x_get_wol,
9808 .set_wol = bnx2x_set_wol,
9809 .get_msglevel = bnx2x_get_msglevel,
9810 .set_msglevel = bnx2x_set_msglevel,
9811 .nway_reset = bnx2x_nway_reset,
9812 .get_link = ethtool_op_get_link,
9813 .get_eeprom_len = bnx2x_get_eeprom_len,
9814 .get_eeprom = bnx2x_get_eeprom,
9815 .set_eeprom = bnx2x_set_eeprom,
9816 .get_coalesce = bnx2x_get_coalesce,
9817 .set_coalesce = bnx2x_set_coalesce,
9818 .get_ringparam = bnx2x_get_ringparam,
9819 .set_ringparam = bnx2x_set_ringparam,
9820 .get_pauseparam = bnx2x_get_pauseparam,
9821 .set_pauseparam = bnx2x_set_pauseparam,
9822 .get_rx_csum = bnx2x_get_rx_csum,
9823 .set_rx_csum = bnx2x_set_rx_csum,
9824 .get_tx_csum = ethtool_op_get_tx_csum,
9825 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9826 .set_flags = bnx2x_set_flags,
9827 .get_flags = ethtool_op_get_flags,
9828 .get_sg = ethtool_op_get_sg,
9829 .set_sg = ethtool_op_set_sg,
9830 .get_tso = ethtool_op_get_tso,
9831 .set_tso = bnx2x_set_tso,
9832 .self_test_count = bnx2x_self_test_count,
9833 .self_test = bnx2x_self_test,
9834 .get_strings = bnx2x_get_strings,
9835 .phys_id = bnx2x_phys_id,
9836 .get_stats_count = bnx2x_get_stats_count,
9837 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9840 /* end of ethtool_ops */
9842 /****************************************************************************
9843 * General service functions
9844 ****************************************************************************/
9846 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9850 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9854 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9855 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9856 PCI_PM_CTRL_PME_STATUS));
9858 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9859 /* delay required during transition out of D3hot */
9864 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9868 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9870 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9873 /* No more memory access after this point until
9874 * device is brought back to D0.
9884 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9888 /* Tell compiler that status block fields can change */
9890 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9891 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9893 return (fp->rx_comp_cons != rx_cons_sb);
9897 * net_device service functions
9900 static int bnx2x_poll(struct napi_struct *napi, int budget)
9902 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9904 struct bnx2x *bp = fp->bp;
9907 #ifdef BNX2X_STOP_ON_ERROR
9908 if (unlikely(bp->panic))
9912 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9913 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9914 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9916 bnx2x_update_fpsb_idx(fp);
9918 if (bnx2x_has_tx_work(fp))
9919 bnx2x_tx_int(fp, budget);
9921 if (bnx2x_has_rx_work(fp))
9922 work_done = bnx2x_rx_int(fp, budget);
9923 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9925 /* must not complete if we consumed full budget */
9926 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9928 #ifdef BNX2X_STOP_ON_ERROR
9931 napi_complete(napi);
9933 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9934 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9935 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9936 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9942 /* we split the first BD into headers and data BDs
9943 * to ease the pain of our fellow microcode engineers
9944 * we use one mapping for both BDs
9945 * So far this has only been observed to happen
9946 * in Other Operating Systems(TM)
9948 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9949 struct bnx2x_fastpath *fp,
9950 struct eth_tx_bd **tx_bd, u16 hlen,
9951 u16 bd_prod, int nbd)
9953 struct eth_tx_bd *h_tx_bd = *tx_bd;
9954 struct eth_tx_bd *d_tx_bd;
9956 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9958 /* first fix first BD */
9959 h_tx_bd->nbd = cpu_to_le16(nbd);
9960 h_tx_bd->nbytes = cpu_to_le16(hlen);
9962 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9963 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9964 h_tx_bd->addr_lo, h_tx_bd->nbd);
9966 /* now get a new data BD
9967 * (after the pbd) and fill it */
9968 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9969 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9971 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9972 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9974 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9975 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9976 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9978 /* this marks the BD as one that has no individual mapping
9979 * the FW ignores this flag in a BD not marked start
9981 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9982 DP(NETIF_MSG_TX_QUEUED,
9983 "TSO split data size is %d (%x:%x)\n",
9984 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9986 /* update tx_bd for marking the last BD flag */
9992 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9995 csum = (u16) ~csum_fold(csum_sub(csum,
9996 csum_partial(t_header - fix, fix, 0)));
9999 csum = (u16) ~csum_fold(csum_add(csum,
10000 csum_partial(t_header, -fix, 0)));
10002 return swab16(csum);
10005 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10009 if (skb->ip_summed != CHECKSUM_PARTIAL)
10013 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10015 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10016 rc |= XMIT_CSUM_TCP;
10020 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10021 rc |= XMIT_CSUM_TCP;
10025 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10028 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10034 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10035 /* check if packet requires linearization (packet is too fragmented) */
10036 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10041 int first_bd_sz = 0;
10043 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10044 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10046 if (xmit_type & XMIT_GSO) {
10047 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10048 /* Check if LSO packet needs to be copied:
10049 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10050 int wnd_size = MAX_FETCH_BD - 3;
10051 /* Number of windows to check */
10052 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10057 /* Headers length */
10058 hlen = (int)(skb_transport_header(skb) - skb->data) +
10061 /* Amount of data (w/o headers) on linear part of SKB*/
10062 first_bd_sz = skb_headlen(skb) - hlen;
10064 wnd_sum = first_bd_sz;
10066 /* Calculate the first sum - it's special */
10067 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10069 skb_shinfo(skb)->frags[frag_idx].size;
10071 /* If there was data on linear skb data - check it */
10072 if (first_bd_sz > 0) {
10073 if (unlikely(wnd_sum < lso_mss)) {
10078 wnd_sum -= first_bd_sz;
10081 /* Others are easier: run through the frag list and
10082 check all windows */
10083 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10085 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10087 if (unlikely(wnd_sum < lso_mss)) {
10092 skb_shinfo(skb)->frags[wnd_idx].size;
10096 /* in non-LSO too fragmented packet should always
10103 if (unlikely(to_copy))
10104 DP(NETIF_MSG_TX_QUEUED,
10105 "Linearization IS REQUIRED for %s packet. "
10106 "num_frags %d hlen %d first_bd_sz %d\n",
10107 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10108 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10114 /* called with netif_tx_lock
10115 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10116 * netif_wake_queue()
10118 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10120 struct bnx2x *bp = netdev_priv(dev);
10121 struct bnx2x_fastpath *fp;
10122 struct netdev_queue *txq;
10123 struct sw_tx_bd *tx_buf;
10124 struct eth_tx_bd *tx_bd;
10125 struct eth_tx_parse_bd *pbd = NULL;
10126 u16 pkt_prod, bd_prod;
10128 dma_addr_t mapping;
10129 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10130 int vlan_off = (bp->e1hov ? 4 : 0);
10134 #ifdef BNX2X_STOP_ON_ERROR
10135 if (unlikely(bp->panic))
10136 return NETDEV_TX_BUSY;
10139 fp_index = skb_get_queue_mapping(skb);
10140 txq = netdev_get_tx_queue(dev, fp_index);
10142 fp = &bp->fp[fp_index];
10144 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10145 fp->eth_q_stats.driver_xoff++,
10146 netif_tx_stop_queue(txq);
10147 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10148 return NETDEV_TX_BUSY;
10151 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10152 " gso type %x xmit_type %x\n",
10153 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10154 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10156 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10157 /* First, check if we need to linearize the skb
10158 (due to FW restrictions) */
10159 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10160 /* Statistics of linearization */
10162 if (skb_linearize(skb) != 0) {
10163 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10164 "silently dropping this SKB\n");
10165 dev_kfree_skb_any(skb);
10166 return NETDEV_TX_OK;
10172 Please read carefully. First we use one BD which we mark as start,
10173 then for TSO or xsum we have a parsing info BD,
10174 and only then we have the rest of the TSO BDs.
10175 (don't forget to mark the last one as last,
10176 and to unmap only AFTER you write to the BD ...)
10177 And above all, all pdb sizes are in words - NOT DWORDS!
10180 pkt_prod = fp->tx_pkt_prod++;
10181 bd_prod = TX_BD(fp->tx_bd_prod);
10183 /* get a tx_buf and first BD */
10184 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10185 tx_bd = &fp->tx_desc_ring[bd_prod];
10187 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10188 tx_bd->general_data = (UNICAST_ADDRESS <<
10189 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10191 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10193 /* remember the first BD of the packet */
10194 tx_buf->first_bd = fp->tx_bd_prod;
10197 DP(NETIF_MSG_TX_QUEUED,
10198 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10199 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10202 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10203 (bp->flags & HW_VLAN_TX_FLAG)) {
10204 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10205 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10209 tx_bd->vlan = cpu_to_le16(pkt_prod);
10212 /* turn on parsing and get a BD */
10213 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10214 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10216 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10219 if (xmit_type & XMIT_CSUM) {
10220 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10222 /* for now NS flag is not used in Linux */
10223 pbd->global_data = (hlen |
10224 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10225 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10227 pbd->ip_hlen = (skb_transport_header(skb) -
10228 skb_network_header(skb)) / 2;
10230 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10232 pbd->total_hlen = cpu_to_le16(hlen);
10233 hlen = hlen*2 - vlan_off;
10235 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10237 if (xmit_type & XMIT_CSUM_V4)
10238 tx_bd->bd_flags.as_bitfield |=
10239 ETH_TX_BD_FLAGS_IP_CSUM;
10241 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10243 if (xmit_type & XMIT_CSUM_TCP) {
10244 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10247 s8 fix = SKB_CS_OFF(skb); /* signed! */
10249 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10250 pbd->cs_offset = fix / 2;
10252 DP(NETIF_MSG_TX_QUEUED,
10253 "hlen %d offset %d fix %d csum before fix %x\n",
10254 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10257 /* HW bug: fixup the CSUM */
10258 pbd->tcp_pseudo_csum =
10259 bnx2x_csum_fix(skb_transport_header(skb),
10262 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10263 pbd->tcp_pseudo_csum);
10267 mapping = pci_map_single(bp->pdev, skb->data,
10268 skb_headlen(skb), PCI_DMA_TODEVICE);
10270 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10271 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10272 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10273 tx_bd->nbd = cpu_to_le16(nbd);
10274 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10276 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10277 " nbytes %d flags %x vlan %x\n",
10278 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10279 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10280 le16_to_cpu(tx_bd->vlan));
10282 if (xmit_type & XMIT_GSO) {
10284 DP(NETIF_MSG_TX_QUEUED,
10285 "TSO packet len %d hlen %d total len %d tso size %d\n",
10286 skb->len, hlen, skb_headlen(skb),
10287 skb_shinfo(skb)->gso_size);
10289 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10291 if (unlikely(skb_headlen(skb) > hlen))
10292 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10295 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10296 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10297 pbd->tcp_flags = pbd_tcp_flags(skb);
10299 if (xmit_type & XMIT_GSO_V4) {
10300 pbd->ip_id = swab16(ip_hdr(skb)->id);
10301 pbd->tcp_pseudo_csum =
10302 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10303 ip_hdr(skb)->daddr,
10304 0, IPPROTO_TCP, 0));
10307 pbd->tcp_pseudo_csum =
10308 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10309 &ipv6_hdr(skb)->daddr,
10310 0, IPPROTO_TCP, 0));
10312 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10315 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10316 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10318 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10319 tx_bd = &fp->tx_desc_ring[bd_prod];
10321 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10322 frag->size, PCI_DMA_TODEVICE);
10324 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10325 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10326 tx_bd->nbytes = cpu_to_le16(frag->size);
10327 tx_bd->vlan = cpu_to_le16(pkt_prod);
10328 tx_bd->bd_flags.as_bitfield = 0;
10330 DP(NETIF_MSG_TX_QUEUED,
10331 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10332 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10333 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10336 /* now at last mark the BD as the last BD */
10337 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10339 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10340 tx_bd, tx_bd->bd_flags.as_bitfield);
10342 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10344 /* now send a tx doorbell, counting the next BD
10345 * if the packet contains or ends with it
10347 if (TX_BD_POFF(bd_prod) < nbd)
10351 DP(NETIF_MSG_TX_QUEUED,
10352 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10353 " tcp_flags %x xsum %x seq %u hlen %u\n",
10354 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10355 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10356 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10358 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10361 * Make sure that the BD data is updated before updating the producer
10362 * since FW might read the BD right after the producer is updated.
10363 * This is only applicable for weak-ordered memory model archs such
10364 * as IA-64. The following barrier is also mandatory since FW will
10365 * assumes packets must have BDs.
10369 fp->hw_tx_prods->bds_prod =
10370 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10371 mb(); /* FW restriction: must not reorder writing nbd and packets */
10372 fp->hw_tx_prods->packets_prod =
10373 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10374 DOORBELL(bp, FP_IDX(fp), 0);
10378 fp->tx_bd_prod += nbd;
10379 dev->trans_start = jiffies;
10381 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10382 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10383 if we put Tx into XOFF state. */
10385 netif_tx_stop_queue(txq);
10386 fp->eth_q_stats.driver_xoff++;
10387 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10388 netif_tx_wake_queue(txq);
10392 return NETDEV_TX_OK;
10395 /* called with rtnl_lock */
10396 static int bnx2x_open(struct net_device *dev)
10398 struct bnx2x *bp = netdev_priv(dev);
10400 netif_carrier_off(dev);
10402 bnx2x_set_power_state(bp, PCI_D0);
10404 return bnx2x_nic_load(bp, LOAD_OPEN);
10407 /* called with rtnl_lock */
10408 static int bnx2x_close(struct net_device *dev)
10410 struct bnx2x *bp = netdev_priv(dev);
10412 /* Unload the driver, release IRQs */
10413 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10414 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10415 if (!CHIP_REV_IS_SLOW(bp))
10416 bnx2x_set_power_state(bp, PCI_D3hot);
10421 /* called with netif_tx_lock from set_multicast */
10422 static void bnx2x_set_rx_mode(struct net_device *dev)
10424 struct bnx2x *bp = netdev_priv(dev);
10425 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10426 int port = BP_PORT(bp);
10428 if (bp->state != BNX2X_STATE_OPEN) {
10429 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10433 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10435 if (dev->flags & IFF_PROMISC)
10436 rx_mode = BNX2X_RX_MODE_PROMISC;
10438 else if ((dev->flags & IFF_ALLMULTI) ||
10439 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10440 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10442 else { /* some multicasts */
10443 if (CHIP_IS_E1(bp)) {
10444 int i, old, offset;
10445 struct dev_mc_list *mclist;
10446 struct mac_configuration_cmd *config =
10447 bnx2x_sp(bp, mcast_config);
10449 for (i = 0, mclist = dev->mc_list;
10450 mclist && (i < dev->mc_count);
10451 i++, mclist = mclist->next) {
10453 config->config_table[i].
10454 cam_entry.msb_mac_addr =
10455 swab16(*(u16 *)&mclist->dmi_addr[0]);
10456 config->config_table[i].
10457 cam_entry.middle_mac_addr =
10458 swab16(*(u16 *)&mclist->dmi_addr[2]);
10459 config->config_table[i].
10460 cam_entry.lsb_mac_addr =
10461 swab16(*(u16 *)&mclist->dmi_addr[4]);
10462 config->config_table[i].cam_entry.flags =
10464 config->config_table[i].
10465 target_table_entry.flags = 0;
10466 config->config_table[i].
10467 target_table_entry.client_id = 0;
10468 config->config_table[i].
10469 target_table_entry.vlan_id = 0;
10472 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10473 config->config_table[i].
10474 cam_entry.msb_mac_addr,
10475 config->config_table[i].
10476 cam_entry.middle_mac_addr,
10477 config->config_table[i].
10478 cam_entry.lsb_mac_addr);
10480 old = config->hdr.length;
10482 for (; i < old; i++) {
10483 if (CAM_IS_INVALID(config->
10484 config_table[i])) {
10485 /* already invalidated */
10489 CAM_INVALIDATE(config->
10494 if (CHIP_REV_IS_SLOW(bp))
10495 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10497 offset = BNX2X_MAX_MULTICAST*(1 + port);
10499 config->hdr.length = i;
10500 config->hdr.offset = offset;
10501 config->hdr.client_id = bp->fp->cl_id;
10502 config->hdr.reserved1 = 0;
10504 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10505 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10506 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10509 /* Accept one or more multicasts */
10510 struct dev_mc_list *mclist;
10511 u32 mc_filter[MC_HASH_SIZE];
10512 u32 crc, bit, regidx;
10515 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10517 for (i = 0, mclist = dev->mc_list;
10518 mclist && (i < dev->mc_count);
10519 i++, mclist = mclist->next) {
10521 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10524 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10525 bit = (crc >> 24) & 0xff;
10528 mc_filter[regidx] |= (1 << bit);
10531 for (i = 0; i < MC_HASH_SIZE; i++)
10532 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10537 bp->rx_mode = rx_mode;
10538 bnx2x_set_storm_rx_mode(bp);
10541 /* called with rtnl_lock */
10542 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10544 struct sockaddr *addr = p;
10545 struct bnx2x *bp = netdev_priv(dev);
10547 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10550 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10551 if (netif_running(dev)) {
10552 if (CHIP_IS_E1(bp))
10553 bnx2x_set_mac_addr_e1(bp, 1);
10555 bnx2x_set_mac_addr_e1h(bp, 1);
10561 /* called with rtnl_lock */
10562 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10564 struct mii_ioctl_data *data = if_mii(ifr);
10565 struct bnx2x *bp = netdev_priv(dev);
10566 int port = BP_PORT(bp);
10571 data->phy_id = bp->port.phy_addr;
10575 case SIOCGMIIREG: {
10578 if (!netif_running(dev))
10581 mutex_lock(&bp->port.phy_mutex);
10582 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10583 DEFAULT_PHY_DEV_ADDR,
10584 (data->reg_num & 0x1f), &mii_regval);
10585 data->val_out = mii_regval;
10586 mutex_unlock(&bp->port.phy_mutex);
10591 if (!capable(CAP_NET_ADMIN))
10594 if (!netif_running(dev))
10597 mutex_lock(&bp->port.phy_mutex);
10598 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10599 DEFAULT_PHY_DEV_ADDR,
10600 (data->reg_num & 0x1f), data->val_in);
10601 mutex_unlock(&bp->port.phy_mutex);
10609 return -EOPNOTSUPP;
10612 /* called with rtnl_lock */
10613 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10615 struct bnx2x *bp = netdev_priv(dev);
10618 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10619 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10622 /* This does not race with packet allocation
10623 * because the actual alloc size is
10624 * only updated as part of load
10626 dev->mtu = new_mtu;
10628 if (netif_running(dev)) {
10629 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10630 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10636 static void bnx2x_tx_timeout(struct net_device *dev)
10638 struct bnx2x *bp = netdev_priv(dev);
10640 #ifdef BNX2X_STOP_ON_ERROR
10644 /* This allows the netif to be shutdown gracefully before resetting */
10645 schedule_work(&bp->reset_task);
10649 /* called with rtnl_lock */
10650 static void bnx2x_vlan_rx_register(struct net_device *dev,
10651 struct vlan_group *vlgrp)
10653 struct bnx2x *bp = netdev_priv(dev);
10657 /* Set flags according to the required capabilities */
10658 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10660 if (dev->features & NETIF_F_HW_VLAN_TX)
10661 bp->flags |= HW_VLAN_TX_FLAG;
10663 if (dev->features & NETIF_F_HW_VLAN_RX)
10664 bp->flags |= HW_VLAN_RX_FLAG;
10666 if (netif_running(dev))
10667 bnx2x_set_client_config(bp);
10672 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10673 static void poll_bnx2x(struct net_device *dev)
10675 struct bnx2x *bp = netdev_priv(dev);
10677 disable_irq(bp->pdev->irq);
10678 bnx2x_interrupt(bp->pdev->irq, dev);
10679 enable_irq(bp->pdev->irq);
10683 static const struct net_device_ops bnx2x_netdev_ops = {
10684 .ndo_open = bnx2x_open,
10685 .ndo_stop = bnx2x_close,
10686 .ndo_start_xmit = bnx2x_start_xmit,
10687 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10688 .ndo_set_mac_address = bnx2x_change_mac_addr,
10689 .ndo_validate_addr = eth_validate_addr,
10690 .ndo_do_ioctl = bnx2x_ioctl,
10691 .ndo_change_mtu = bnx2x_change_mtu,
10692 .ndo_tx_timeout = bnx2x_tx_timeout,
10694 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10696 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10697 .ndo_poll_controller = poll_bnx2x,
10702 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10703 struct net_device *dev)
10708 SET_NETDEV_DEV(dev, &pdev->dev);
10709 bp = netdev_priv(dev);
10714 bp->func = PCI_FUNC(pdev->devfn);
10716 rc = pci_enable_device(pdev);
10718 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10722 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10723 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10726 goto err_out_disable;
10729 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10730 printk(KERN_ERR PFX "Cannot find second PCI device"
10731 " base address, aborting\n");
10733 goto err_out_disable;
10736 if (atomic_read(&pdev->enable_cnt) == 1) {
10737 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10739 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10741 goto err_out_disable;
10744 pci_set_master(pdev);
10745 pci_save_state(pdev);
10748 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10749 if (bp->pm_cap == 0) {
10750 printk(KERN_ERR PFX "Cannot find power management"
10751 " capability, aborting\n");
10753 goto err_out_release;
10756 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10757 if (bp->pcie_cap == 0) {
10758 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10761 goto err_out_release;
10764 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10765 bp->flags |= USING_DAC_FLAG;
10766 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10767 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10768 " failed, aborting\n");
10770 goto err_out_release;
10773 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10774 printk(KERN_ERR PFX "System does not support DMA,"
10777 goto err_out_release;
10780 dev->mem_start = pci_resource_start(pdev, 0);
10781 dev->base_addr = dev->mem_start;
10782 dev->mem_end = pci_resource_end(pdev, 0);
10784 dev->irq = pdev->irq;
10786 bp->regview = pci_ioremap_bar(pdev, 0);
10787 if (!bp->regview) {
10788 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10790 goto err_out_release;
10793 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10794 min_t(u64, BNX2X_DB_SIZE,
10795 pci_resource_len(pdev, 2)));
10796 if (!bp->doorbells) {
10797 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10799 goto err_out_unmap;
10802 bnx2x_set_power_state(bp, PCI_D0);
10804 /* clean indirect addresses */
10805 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10806 PCICFG_VENDOR_ID_OFFSET);
10807 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10808 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10809 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10810 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10812 dev->watchdog_timeo = TX_TIMEOUT;
10814 dev->netdev_ops = &bnx2x_netdev_ops;
10815 dev->ethtool_ops = &bnx2x_ethtool_ops;
10816 dev->features |= NETIF_F_SG;
10817 dev->features |= NETIF_F_HW_CSUM;
10818 if (bp->flags & USING_DAC_FLAG)
10819 dev->features |= NETIF_F_HIGHDMA;
10821 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10822 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10824 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10825 dev->features |= NETIF_F_TSO6;
10831 iounmap(bp->regview);
10832 bp->regview = NULL;
10834 if (bp->doorbells) {
10835 iounmap(bp->doorbells);
10836 bp->doorbells = NULL;
10840 if (atomic_read(&pdev->enable_cnt) == 1)
10841 pci_release_regions(pdev);
10844 pci_disable_device(pdev);
10845 pci_set_drvdata(pdev, NULL);
10851 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10853 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10855 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10859 /* return value of 1=2.5GHz 2=5GHz */
10860 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10862 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10864 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10868 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10869 const struct pci_device_id *ent)
10871 static int version_printed;
10872 struct net_device *dev = NULL;
10876 if (version_printed++ == 0)
10877 printk(KERN_INFO "%s", version);
10879 /* dev zeroed in init_etherdev */
10880 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10882 printk(KERN_ERR PFX "Cannot allocate net device\n");
10886 bp = netdev_priv(dev);
10887 bp->msglevel = debug;
10889 rc = bnx2x_init_dev(pdev, dev);
10895 pci_set_drvdata(pdev, dev);
10897 rc = bnx2x_init_bp(bp);
10899 goto init_one_exit;
10901 rc = register_netdev(dev);
10903 dev_err(&pdev->dev, "Cannot register net device\n");
10904 goto init_one_exit;
10907 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10908 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
10909 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10910 bnx2x_get_pcie_width(bp),
10911 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10912 dev->base_addr, bp->pdev->irq);
10913 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10918 iounmap(bp->regview);
10921 iounmap(bp->doorbells);
10925 if (atomic_read(&pdev->enable_cnt) == 1)
10926 pci_release_regions(pdev);
10928 pci_disable_device(pdev);
10929 pci_set_drvdata(pdev, NULL);
10934 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10936 struct net_device *dev = pci_get_drvdata(pdev);
10940 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10943 bp = netdev_priv(dev);
10945 unregister_netdev(dev);
10948 iounmap(bp->regview);
10951 iounmap(bp->doorbells);
10955 if (atomic_read(&pdev->enable_cnt) == 1)
10956 pci_release_regions(pdev);
10958 pci_disable_device(pdev);
10959 pci_set_drvdata(pdev, NULL);
10962 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10964 struct net_device *dev = pci_get_drvdata(pdev);
10968 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10971 bp = netdev_priv(dev);
10975 pci_save_state(pdev);
10977 if (!netif_running(dev)) {
10982 netif_device_detach(dev);
10984 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10986 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10993 static int bnx2x_resume(struct pci_dev *pdev)
10995 struct net_device *dev = pci_get_drvdata(pdev);
11000 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11003 bp = netdev_priv(dev);
11007 pci_restore_state(pdev);
11009 if (!netif_running(dev)) {
11014 bnx2x_set_power_state(bp, PCI_D0);
11015 netif_device_attach(dev);
11017 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11024 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11028 bp->state = BNX2X_STATE_ERROR;
11030 bp->rx_mode = BNX2X_RX_MODE_NONE;
11032 bnx2x_netif_stop(bp, 0);
11034 del_timer_sync(&bp->timer);
11035 bp->stats_state = STATS_STATE_DISABLED;
11036 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11039 bnx2x_free_irq(bp);
11041 if (CHIP_IS_E1(bp)) {
11042 struct mac_configuration_cmd *config =
11043 bnx2x_sp(bp, mcast_config);
11045 for (i = 0; i < config->hdr.length; i++)
11046 CAM_INVALIDATE(config->config_table[i]);
11049 /* Free SKBs, SGEs, TPA pool and driver internals */
11050 bnx2x_free_skbs(bp);
11051 for_each_rx_queue(bp, i)
11052 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11053 for_each_rx_queue(bp, i)
11054 netif_napi_del(&bnx2x_fp(bp, i, napi));
11055 bnx2x_free_mem(bp);
11057 bp->state = BNX2X_STATE_CLOSED;
11059 netif_carrier_off(bp->dev);
11064 static void bnx2x_eeh_recover(struct bnx2x *bp)
11068 mutex_init(&bp->port.phy_mutex);
11070 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11071 bp->link_params.shmem_base = bp->common.shmem_base;
11072 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11074 if (!bp->common.shmem_base ||
11075 (bp->common.shmem_base < 0xA0000) ||
11076 (bp->common.shmem_base >= 0xC0000)) {
11077 BNX2X_DEV_INFO("MCP not active\n");
11078 bp->flags |= NO_MCP_FLAG;
11082 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11083 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11084 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11085 BNX2X_ERR("BAD MCP validity signature\n");
11087 if (!BP_NOMCP(bp)) {
11088 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11089 & DRV_MSG_SEQ_NUMBER_MASK);
11090 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11095 * bnx2x_io_error_detected - called when PCI error is detected
11096 * @pdev: Pointer to PCI device
11097 * @state: The current pci connection state
11099 * This function is called after a PCI bus error affecting
11100 * this device has been detected.
11102 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11103 pci_channel_state_t state)
11105 struct net_device *dev = pci_get_drvdata(pdev);
11106 struct bnx2x *bp = netdev_priv(dev);
11110 netif_device_detach(dev);
11112 if (netif_running(dev))
11113 bnx2x_eeh_nic_unload(bp);
11115 pci_disable_device(pdev);
11119 /* Request a slot reset */
11120 return PCI_ERS_RESULT_NEED_RESET;
11124 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11125 * @pdev: Pointer to PCI device
11127 * Restart the card from scratch, as if from a cold-boot.
11129 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11131 struct net_device *dev = pci_get_drvdata(pdev);
11132 struct bnx2x *bp = netdev_priv(dev);
11136 if (pci_enable_device(pdev)) {
11137 dev_err(&pdev->dev,
11138 "Cannot re-enable PCI device after reset\n");
11140 return PCI_ERS_RESULT_DISCONNECT;
11143 pci_set_master(pdev);
11144 pci_restore_state(pdev);
11146 if (netif_running(dev))
11147 bnx2x_set_power_state(bp, PCI_D0);
11151 return PCI_ERS_RESULT_RECOVERED;
11155 * bnx2x_io_resume - called when traffic can start flowing again
11156 * @pdev: Pointer to PCI device
11158 * This callback is called when the error recovery driver tells us that
11159 * its OK to resume normal operation.
11161 static void bnx2x_io_resume(struct pci_dev *pdev)
11163 struct net_device *dev = pci_get_drvdata(pdev);
11164 struct bnx2x *bp = netdev_priv(dev);
11168 bnx2x_eeh_recover(bp);
11170 if (netif_running(dev))
11171 bnx2x_nic_load(bp, LOAD_NORMAL);
11173 netif_device_attach(dev);
11178 static struct pci_error_handlers bnx2x_err_handler = {
11179 .error_detected = bnx2x_io_error_detected,
11180 .slot_reset = bnx2x_io_slot_reset,
11181 .resume = bnx2x_io_resume,
11184 static struct pci_driver bnx2x_pci_driver = {
11185 .name = DRV_MODULE_NAME,
11186 .id_table = bnx2x_pci_tbl,
11187 .probe = bnx2x_init_one,
11188 .remove = __devexit_p(bnx2x_remove_one),
11189 .suspend = bnx2x_suspend,
11190 .resume = bnx2x_resume,
11191 .err_handler = &bnx2x_err_handler,
11194 static int __init bnx2x_init(void)
11196 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11197 if (bnx2x_wq == NULL) {
11198 printk(KERN_ERR PFX "Cannot create workqueue\n");
11202 return pci_register_driver(&bnx2x_pci_driver);
11205 static void __exit bnx2x_cleanup(void)
11207 pci_unregister_driver(&bnx2x_pci_driver);
11209 destroy_workqueue(bnx2x_wq);
11212 module_init(bnx2x_init);
11213 module_exit(bnx2x_cleanup);