1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link management by Yitchak Gertner
17 /* define this to make the driver freeze on error
18 * to allow getting debug info
19 * (you will need to reboot afterwards)
21 /*#define BNX2X_STOP_ON_ERROR*/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h> /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
64 #include "bnx2x_init.h"
66 #define DRV_MODULE_VERSION "0.40.15"
67 #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER 0x040200
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #404 $");
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
105 enum bnx2x_board_type {
109 /* indexed by board_t, above */
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
129 * locking is done by mcp
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
180 struct dmae_command *dmae = &bp->dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185 memset(dmae, 0, sizeof(struct dmae_command));
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
193 DMAE_CMD_ENDIANITY_DW_SWAP |
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
222 bnx2x_post_dmae(bp, dmae, port * 8);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
232 BNX2X_ERR("dmae timeout!\n");
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
242 struct dmae_command *dmae = &bp->dmae;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
256 DMAE_CMD_ENDIANITY_DW_SWAP |
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
280 bnx2x_post_dmae(bp, dmae, port * 8);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
286 BNX2X_ERR("dmae timeout!\n");
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
299 static int bnx2x_mc_assert(struct bnx2x *bp)
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
345 static void bnx2x_fw_dump(struct bnx2x *bp)
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
360 printk(KERN_CONT "%s", (char *)data);
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
367 printk(KERN_CONT "%s", (char *)data);
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
372 static void bnx2x_panic_dump(struct bnx2x *bp)
377 BNX2X_ERR("begin crash dump -----------------\n");
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
434 " spq_prod_idx(%u)\n",
435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
440 BNX2X_ERR("end crash dump -----------------\n");
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
446 static void bnx2x_int_enable(struct bnx2x *bp)
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
459 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
460 HC_CONFIG_0_REG_INT_LINE_EN_0 |
461 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
463 /* Errata A0.158 workaround */
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
465 val, port, addr, msix);
467 REG_WR(bp, addr, val);
469 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
472 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
473 val, port, addr, msix);
475 REG_WR(bp, addr, val);
478 static void bnx2x_int_disable(struct bnx2x *bp)
481 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
482 u32 val = REG_RD(bp, addr);
484 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
485 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
486 HC_CONFIG_0_REG_INT_LINE_EN_0 |
487 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
489 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
492 REG_WR(bp, addr, val);
493 if (REG_RD(bp, addr) != val)
494 BNX2X_ERR("BUG! proper val not read from IGU!\n");
497 static void bnx2x_int_disable_sync(struct bnx2x *bp)
500 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
503 atomic_inc(&bp->intr_sem);
504 /* prevent the HW from sending interrupts */
505 bnx2x_int_disable(bp);
507 /* make sure all ISRs are done */
509 for_each_queue(bp, i)
510 synchronize_irq(bp->msix_table[i].vector);
512 /* one more for the Slow Path IRQ */
513 synchronize_irq(bp->msix_table[i].vector);
515 synchronize_irq(bp->pdev->irq);
517 /* make sure sp_task is not running */
518 cancel_work_sync(&bp->sp_task);
525 * general service functions
528 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
529 u8 storm, u16 index, u8 op, u8 update)
531 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
532 struct igu_ack_register igu_ack;
534 igu_ack.status_block_index = index;
535 igu_ack.sb_id_and_flags =
536 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
537 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
538 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
539 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
541 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
542 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
543 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
546 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
548 struct host_status_block *fpsb = fp->status_blk;
551 barrier(); /* status block is written to by the chip */
552 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
553 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
556 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
557 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
563 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
565 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
567 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
570 if ((rx_cons_sb != fp->rx_comp_cons) ||
571 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
577 static u16 bnx2x_ack_int(struct bnx2x *bp)
579 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
580 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
582 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
583 result, BAR_IGU_INTMEM + igu_addr); */
586 #warning IGU_DEBUG active
588 BNX2X_ERR("read %x from IGU\n", result);
589 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
597 * fast path service functions
600 /* free skb in the packet ring at pos idx
601 * return idx of last bd freed
603 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
606 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
607 struct eth_tx_bd *tx_bd;
608 struct sk_buff *skb = tx_buf->skb;
609 u16 bd_idx = tx_buf->first_bd;
612 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
616 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
617 tx_bd = &fp->tx_desc_ring[bd_idx];
618 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
619 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
621 nbd = le16_to_cpu(tx_bd->nbd) - 1;
622 #ifdef BNX2X_STOP_ON_ERROR
623 if (nbd > (MAX_SKB_FRAGS + 2)) {
624 BNX2X_ERR("bad nbd!\n");
629 /* Skip a parse bd and the TSO split header bd
630 since they have no mapping */
632 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
634 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
635 ETH_TX_BD_FLAGS_TCP_CSUM |
636 ETH_TX_BD_FLAGS_SW_LSO)) {
638 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
639 tx_bd = &fp->tx_desc_ring[bd_idx];
640 /* is this a TSO split header bd? */
641 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
643 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
650 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
651 tx_bd = &fp->tx_desc_ring[bd_idx];
652 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
653 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
655 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
661 tx_buf->first_bd = 0;
667 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
673 /* Tell compiler that prod and cons can change */
675 prod = fp->tx_bd_prod;
676 cons = fp->tx_bd_cons;
678 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
679 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
682 /* used = prod - cons - prod/size + cons/size */
683 used -= NUM_TX_BD - NUM_TX_RINGS;
686 BUG_TRAP(used <= fp->bp->tx_ring_size);
687 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
689 return (fp->bp->tx_ring_size - used);
692 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
694 struct bnx2x *bp = fp->bp;
695 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
698 #ifdef BNX2X_STOP_ON_ERROR
699 if (unlikely(bp->panic))
703 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
704 sw_cons = fp->tx_pkt_cons;
706 while (sw_cons != hw_cons) {
709 pkt_cons = TX_BD(sw_cons);
711 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
713 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
714 hw_cons, sw_cons, pkt_cons);
716 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
718 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
721 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
729 fp->tx_pkt_cons = sw_cons;
730 fp->tx_bd_cons = bd_cons;
732 /* Need to make the tx_cons update visible to start_xmit()
733 * before checking for netif_queue_stopped(). Without the
734 * memory barrier, there is a small possibility that start_xmit()
735 * will miss it and cause the queue to be stopped forever.
739 /* TBD need a thresh? */
740 if (unlikely(netif_queue_stopped(bp->dev))) {
742 netif_tx_lock(bp->dev);
744 if (netif_queue_stopped(bp->dev) &&
745 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
746 netif_wake_queue(bp->dev);
748 netif_tx_unlock(bp->dev);
753 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
754 union eth_rx_cqe *rr_cqe)
756 struct bnx2x *bp = fp->bp;
757 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
760 DP(NETIF_MSG_RX_STATUS,
761 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
762 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
767 switch (command | fp->state) {
768 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
769 BNX2X_FP_STATE_OPENING):
770 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
772 fp->state = BNX2X_FP_STATE_OPEN;
775 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
776 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
778 fp->state = BNX2X_FP_STATE_HALTED;
782 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
785 mb(); /* force bnx2x_wait_ramrod to see the change */
789 switch (command | bp->state) {
790 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
791 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
792 bp->state = BNX2X_STATE_OPEN;
795 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
796 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
797 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
798 fp->state = BNX2X_FP_STATE_HALTED;
801 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
802 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
804 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
807 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
808 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
811 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
812 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
816 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
820 mb(); /* force bnx2x_wait_ramrod to see the change */
823 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
824 struct bnx2x_fastpath *fp, u16 index)
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
831 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
832 if (unlikely(skb == NULL))
835 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
837 if (unlikely(dma_mapping_error(mapping))) {
844 pci_unmap_addr_set(rx_buf, mapping, mapping);
846 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
847 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
852 /* note that we are not allocating a new skb,
853 * we are just moving one from cons to prod
854 * we are not creating a new mapping,
855 * so there is no need to check for dma_mapping_error().
857 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
858 struct sk_buff *skb, u16 cons, u16 prod)
860 struct bnx2x *bp = fp->bp;
861 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
862 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
863 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
864 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
866 pci_dma_sync_single_for_device(bp->pdev,
867 pci_unmap_addr(cons_rx_buf, mapping),
868 bp->rx_offset + RX_COPY_THRESH,
871 prod_rx_buf->skb = cons_rx_buf->skb;
872 pci_unmap_addr_set(prod_rx_buf, mapping,
873 pci_unmap_addr(cons_rx_buf, mapping));
877 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, comp_ring_cons;
881 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
884 #ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
889 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
890 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 sw_comp_cons = fp->rx_comp_cons;
896 sw_comp_prod = fp->rx_comp_prod;
898 /* Memory barrier necessary as speculative reads of the rx
899 * buffer can be ahead of the index in the status block
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
905 fp->index, hw_comp_cons, sw_comp_cons);
907 while (sw_comp_cons != hw_comp_cons) {
908 unsigned int len, pad;
909 struct sw_rx_bd *rx_buf;
911 union eth_rx_cqe *cqe;
913 comp_ring_cons = RCQ_BD(sw_comp_cons);
914 bd_prod = RX_BD(bd_prod);
915 bd_cons = RX_BD(bd_cons);
917 cqe = &fp->rx_comp_ring[comp_ring_cons];
919 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
920 " comp_ring (%u) bd_ring (%u,%u)\n",
921 hw_comp_cons, sw_comp_cons,
922 comp_ring_cons, bd_prod, bd_cons);
923 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
924 " queue %x vlan %x len %x\n",
925 cqe->fast_path_cqe.type,
926 cqe->fast_path_cqe.error_type_flags,
927 cqe->fast_path_cqe.status_flags,
928 cqe->fast_path_cqe.rss_hash_result,
929 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
931 /* is this a slowpath msg? */
932 if (unlikely(cqe->fast_path_cqe.type)) {
933 bnx2x_sp_event(fp, cqe);
936 /* this is an rx packet */
938 rx_buf = &fp->rx_buf_ring[bd_cons];
941 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
942 pad = cqe->fast_path_cqe.placement_offset;
944 pci_dma_sync_single_for_device(bp->pdev,
945 pci_unmap_addr(rx_buf, mapping),
946 pad + RX_COPY_THRESH,
949 prefetch(((char *)(skb)) + 128);
951 /* is this an error packet? */
952 if (unlikely(cqe->fast_path_cqe.error_type_flags &
953 ETH_RX_ERROR_FALGS)) {
954 /* do we sometimes forward error packets anyway? */
956 "ERROR flags(%u) Rx packet(%u)\n",
957 cqe->fast_path_cqe.error_type_flags,
959 /* TBD make sure MC counts this as a drop */
963 /* Since we don't have a jumbo ring
964 * copy small packets if mtu > 1500
966 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
967 (len <= RX_COPY_THRESH)) {
968 struct sk_buff *new_skb;
970 new_skb = netdev_alloc_skb(bp->dev,
972 if (new_skb == NULL) {
974 "ERROR packet dropped "
975 "because of alloc failure\n");
976 /* TBD count this as a drop? */
981 skb_copy_from_linear_data_offset(skb, pad,
982 new_skb->data + pad, len);
983 skb_reserve(new_skb, pad);
984 skb_put(new_skb, len);
986 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
990 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
991 pci_unmap_single(bp->pdev,
992 pci_unmap_addr(rx_buf, mapping),
995 skb_reserve(skb, pad);
1000 "ERROR packet dropped because "
1001 "of alloc failure\n");
1003 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1007 skb->protocol = eth_type_trans(skb, bp->dev);
1009 skb->ip_summed = CHECKSUM_NONE;
1010 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1011 skb->ip_summed = CHECKSUM_UNNECESSARY;
1013 /* TBD do we pass bad csum packets in promisc */
1017 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1018 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1019 && (bp->vlgrp != NULL))
1020 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1021 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1024 netif_receive_skb(skb);
1026 bp->dev->last_rx = jiffies;
1031 bd_cons = NEXT_RX_IDX(bd_cons);
1032 bd_prod = NEXT_RX_IDX(bd_prod);
1034 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1035 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1038 if ((rx_pkt == budget))
1042 fp->rx_bd_cons = bd_cons;
1043 fp->rx_bd_prod = bd_prod;
1044 fp->rx_comp_cons = sw_comp_cons;
1045 fp->rx_comp_prod = sw_comp_prod;
1047 REG_WR(bp, BAR_TSTRORM_INTMEM +
1048 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1050 mmiowb(); /* keep prod updates ordered */
1052 fp->rx_pkt += rx_pkt;
1058 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1060 struct bnx2x_fastpath *fp = fp_cookie;
1061 struct bnx2x *bp = fp->bp;
1062 struct net_device *dev = bp->dev;
1063 int index = fp->index;
1065 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1066 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1068 #ifdef BNX2X_STOP_ON_ERROR
1069 if (unlikely(bp->panic))
1073 prefetch(fp->rx_cons_sb);
1074 prefetch(fp->tx_cons_sb);
1075 prefetch(&fp->status_blk->c_status_block.status_block_index);
1076 prefetch(&fp->status_blk->u_status_block.status_block_index);
1078 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1082 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1084 struct net_device *dev = dev_instance;
1085 struct bnx2x *bp = netdev_priv(dev);
1086 u16 status = bnx2x_ack_int(bp);
1088 if (unlikely(status == 0)) {
1089 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1093 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1095 #ifdef BNX2X_STOP_ON_ERROR
1096 if (unlikely(bp->panic))
1100 /* Return here if interrupt is shared and is disabled */
1101 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1102 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1107 struct bnx2x_fastpath *fp = &bp->fp[0];
1109 prefetch(fp->rx_cons_sb);
1110 prefetch(fp->tx_cons_sb);
1111 prefetch(&fp->status_blk->c_status_block.status_block_index);
1112 prefetch(&fp->status_blk->u_status_block.status_block_index);
1114 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1121 if (unlikely(status & 0x1)) {
1123 schedule_work(&bp->sp_task);
1130 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1136 /* end of fast path */
1141 * General service functions
1144 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1146 int port = bp->port;
1148 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1149 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1150 SHARED_HW_CFG_LED_MODE_SHIFT));
1151 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1153 /* Set blinking rate to ~15.9Hz */
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1155 LED_BLINK_RATE_VAL);
1156 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1158 /* On Ax chip versions for speeds less than 10G
1159 LED scheme is different */
1160 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1161 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1162 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1167 static void bnx2x_leds_unset(struct bnx2x *bp)
1169 int port = bp->port;
1171 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1172 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1175 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1177 u32 val = REG_RD(bp, reg);
1180 REG_WR(bp, reg, val);
1184 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1186 u32 val = REG_RD(bp, reg);
1189 REG_WR(bp, reg, val);
1193 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1197 u32 resource_bit = (1 << resource);
1200 /* Validating that the resource is within range */
1201 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1203 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1204 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1208 /* Validating that the resource is not already taken */
1209 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1210 if (lock_status & resource_bit) {
1211 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1212 lock_status, resource_bit);
1216 /* Try for 1 second every 5ms */
1217 for (cnt = 0; cnt < 200; cnt++) {
1218 /* Try to acquire the lock */
1219 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1221 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1222 if (lock_status & resource_bit)
1227 DP(NETIF_MSG_HW, "Timeout\n");
1231 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1234 u32 resource_bit = (1 << resource);
1237 /* Validating that the resource is within range */
1238 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1240 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1241 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1245 /* Validating that the resource is currently taken */
1246 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1247 if (!(lock_status & resource_bit)) {
1248 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1249 lock_status, resource_bit);
1253 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1257 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1259 /* The GPIO should be swapped if swap register is set and active */
1260 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1261 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1262 int gpio_shift = gpio_num +
1263 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1264 u32 gpio_mask = (1 << gpio_shift);
1267 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1268 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1272 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1273 /* read GPIO and mask except the float bits */
1274 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1277 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set CLR */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1285 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1287 gpio_num, gpio_shift);
1288 /* clear FLOAT and set SET */
1289 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1290 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1293 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1294 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1295 gpio_num, gpio_shift);
1297 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1304 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1305 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1310 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1312 u32 spio_mask = (1 << spio_num);
1315 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1316 (spio_num > MISC_REGISTERS_SPIO_7)) {
1317 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1321 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1322 /* read SPIO and mask except the float bits */
1323 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1326 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1327 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1328 /* clear FLOAT and set CLR */
1329 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1330 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1333 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1334 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1335 /* clear FLOAT and set SET */
1336 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1337 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1340 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1341 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1343 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1350 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1351 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1356 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1358 int port = bp->port;
1359 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1363 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1364 bp->phy_addr, reg, val); */
1366 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1368 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1369 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1370 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1371 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1375 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1376 (val & EMAC_MDIO_COMM_DATA) |
1377 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1378 EMAC_MDIO_COMM_START_BUSY);
1379 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1381 for (i = 0; i < 50; i++) {
1384 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1385 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1392 BNX2X_ERR("write phy register failed\n");
1399 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1401 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1402 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1403 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1409 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1411 int port = bp->port;
1412 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1416 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1419 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1421 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1425 val = ((bp->phy_addr << 21) | (reg << 16) |
1426 EMAC_MDIO_COMM_COMMAND_READ_22 |
1427 EMAC_MDIO_COMM_START_BUSY);
1428 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1430 for (i = 0; i < 50; i++) {
1433 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1434 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1435 val &= EMAC_MDIO_COMM_DATA;
1440 if (val & EMAC_MDIO_COMM_START_BUSY) {
1441 BNX2X_ERR("read phy register failed\n");
1450 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1452 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1453 val |= EMAC_MDIO_MODE_AUTO_POLL;
1454 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1457 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1458 bp->phy_addr, reg, *ret_val); */
1463 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1464 u32 phy_addr, u32 reg, u32 addr, u32 val)
1469 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1470 * (a value of 49==0x31) and make sure that the AUTO poll is off
1472 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1473 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1474 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1475 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1477 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1481 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1482 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1483 EMAC_MDIO_COMM_START_BUSY);
1484 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1486 for (i = 0; i < 50; i++) {
1489 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1490 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1495 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1496 BNX2X_ERR("write phy register failed\n");
1502 tmp = ((phy_addr << 21) | (reg << 16) | val |
1503 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1504 EMAC_MDIO_COMM_START_BUSY);
1505 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1507 for (i = 0; i < 50; i++) {
1510 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1511 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1517 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1518 BNX2X_ERR("write phy register failed\n");
1524 /* unset clause 45 mode, set the MDIO clock to a faster value
1525 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1527 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1528 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1529 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1530 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1531 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1532 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1537 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1540 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1542 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1546 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1547 u32 phy_addr, u32 reg, u32 addr,
1553 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1554 * (a value of 49==0x31) and make sure that the AUTO poll is off
1556 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1557 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1558 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1559 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1561 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1565 val = ((phy_addr << 21) | (reg << 16) | addr |
1566 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1567 EMAC_MDIO_COMM_START_BUSY);
1568 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1570 for (i = 0; i < 50; i++) {
1573 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1574 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1579 if (val & EMAC_MDIO_COMM_START_BUSY) {
1580 BNX2X_ERR("read phy register failed\n");
1587 val = ((phy_addr << 21) | (reg << 16) |
1588 EMAC_MDIO_COMM_COMMAND_READ_45 |
1589 EMAC_MDIO_COMM_START_BUSY);
1590 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1592 for (i = 0; i < 50; i++) {
1595 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1596 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1597 val &= EMAC_MDIO_COMM_DATA;
1602 if (val & EMAC_MDIO_COMM_START_BUSY) {
1603 BNX2X_ERR("read phy register failed\n");
1612 /* unset clause 45 mode, set the MDIO clock to a faster value
1613 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1615 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1616 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1617 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1618 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1619 val |= EMAC_MDIO_MODE_AUTO_POLL;
1620 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1625 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1626 u32 addr, u32 *ret_val)
1628 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1630 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1631 reg, addr, ret_val);
1634 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1641 for (i = 0; i < 10; i++) {
1642 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1644 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1645 /* if the read value is not the same as the value we wrote,
1646 we should write it again */
1650 BNX2X_ERR("MDIO write in CL45 failed\n");
1658 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1660 switch (pause_result) { /* ASYM P ASYM P */
1661 case 0xb: /* 1 0 1 1 */
1662 bp->flow_ctrl = FLOW_CTRL_TX;
1665 case 0xe: /* 1 1 1 0 */
1666 bp->flow_ctrl = FLOW_CTRL_RX;
1669 case 0x5: /* 0 1 0 1 */
1670 case 0x7: /* 0 1 1 1 */
1671 case 0xd: /* 1 1 0 1 */
1672 case 0xf: /* 1 1 1 1 */
1673 bp->flow_ctrl = FLOW_CTRL_BOTH;
1681 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1684 u32 ld_pause; /* local */
1685 u32 lp_pause; /* link partner */
1686 u32 an_complete; /* AN complete */
1690 ext_phy_addr = ((bp->ext_phy_config &
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1692 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1695 bnx2x_mdio45_read(bp, ext_phy_addr,
1696 EXT_PHY_KR_AUTO_NEG_DEVAD,
1697 EXT_PHY_KR_STATUS, &an_complete);
1698 bnx2x_mdio45_read(bp, ext_phy_addr,
1699 EXT_PHY_KR_AUTO_NEG_DEVAD,
1700 EXT_PHY_KR_STATUS, &an_complete);
1702 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1704 bnx2x_mdio45_read(bp, ext_phy_addr,
1705 EXT_PHY_KR_AUTO_NEG_DEVAD,
1706 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1707 bnx2x_mdio45_read(bp, ext_phy_addr,
1708 EXT_PHY_KR_AUTO_NEG_DEVAD,
1709 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1710 pause_result = (ld_pause &
1711 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1712 pause_result |= (lp_pause &
1713 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1714 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1716 bnx2x_pause_resolve(bp, pause_result);
1721 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1723 u32 ld_pause; /* local driver */
1724 u32 lp_pause; /* link partner */
1729 /* resolve from gp_status in case of AN complete and not sgmii */
1730 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1731 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1732 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1733 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1735 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1736 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1738 bnx2x_mdio22_read(bp,
1739 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1741 pause_result = (ld_pause &
1742 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1743 pause_result |= (lp_pause &
1744 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1745 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1746 bnx2x_pause_resolve(bp, pause_result);
1747 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1748 !(bnx2x_ext_phy_resove_fc(bp))) {
1750 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1751 switch (bp->req_flow_ctrl) {
1752 case FLOW_CTRL_AUTO:
1753 if (bp->dev->mtu <= 4500)
1754 bp->flow_ctrl = FLOW_CTRL_BOTH;
1756 bp->flow_ctrl = FLOW_CTRL_TX;
1760 bp->flow_ctrl = FLOW_CTRL_TX;
1764 if (bp->dev->mtu <= 4500)
1765 bp->flow_ctrl = FLOW_CTRL_RX;
1768 case FLOW_CTRL_BOTH:
1769 if (bp->dev->mtu <= 4500)
1770 bp->flow_ctrl = FLOW_CTRL_BOTH;
1772 bp->flow_ctrl = FLOW_CTRL_TX;
1775 case FLOW_CTRL_NONE:
1779 } else { /* forced mode */
1780 switch (bp->req_flow_ctrl) {
1781 case FLOW_CTRL_AUTO:
1782 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1783 " req_autoneg 0x%x\n",
1784 bp->req_flow_ctrl, bp->req_autoneg);
1789 case FLOW_CTRL_BOTH:
1790 bp->flow_ctrl = bp->req_flow_ctrl;
1793 case FLOW_CTRL_NONE:
1799 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1802 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1804 bp->link_status = 0;
1806 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1807 DP(NETIF_MSG_LINK, "phy link up\n");
1809 bp->phy_link_up = 1;
1810 bp->link_status |= LINK_STATUS_LINK_UP;
1812 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1813 bp->duplex = DUPLEX_FULL;
1815 bp->duplex = DUPLEX_HALF;
1817 bnx2x_flow_ctrl_resolve(bp, gp_status);
1819 switch (gp_status & GP_STATUS_SPEED_MASK) {
1821 bp->line_speed = SPEED_10;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_10TFD;
1825 bp->link_status |= LINK_10THD;
1828 case GP_STATUS_100M:
1829 bp->line_speed = SPEED_100;
1830 if (bp->duplex == DUPLEX_FULL)
1831 bp->link_status |= LINK_100TXFD;
1833 bp->link_status |= LINK_100TXHD;
1837 case GP_STATUS_1G_KX:
1838 bp->line_speed = SPEED_1000;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_1000TFD;
1842 bp->link_status |= LINK_1000THD;
1845 case GP_STATUS_2_5G:
1846 bp->line_speed = SPEED_2500;
1847 if (bp->duplex == DUPLEX_FULL)
1848 bp->link_status |= LINK_2500TFD;
1850 bp->link_status |= LINK_2500THD;
1855 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1859 case GP_STATUS_10G_KX4:
1860 case GP_STATUS_10G_HIG:
1861 case GP_STATUS_10G_CX4:
1862 bp->line_speed = SPEED_10000;
1863 bp->link_status |= LINK_10GTFD;
1866 case GP_STATUS_12G_HIG:
1867 bp->line_speed = SPEED_12000;
1868 bp->link_status |= LINK_12GTFD;
1871 case GP_STATUS_12_5G:
1872 bp->line_speed = SPEED_12500;
1873 bp->link_status |= LINK_12_5GTFD;
1877 bp->line_speed = SPEED_13000;
1878 bp->link_status |= LINK_13GTFD;
1882 bp->line_speed = SPEED_15000;
1883 bp->link_status |= LINK_15GTFD;
1887 bp->line_speed = SPEED_16000;
1888 bp->link_status |= LINK_16GTFD;
1892 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1897 bp->link_status |= LINK_STATUS_SERDES_LINK;
1899 if (bp->req_autoneg & AUTONEG_SPEED) {
1900 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1902 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1904 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1906 if (bp->autoneg & AUTONEG_PARALLEL)
1908 LINK_STATUS_PARALLEL_DETECTION_USED;
1911 if (bp->flow_ctrl & FLOW_CTRL_TX)
1912 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1914 if (bp->flow_ctrl & FLOW_CTRL_RX)
1915 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1917 } else { /* link_down */
1918 DP(NETIF_MSG_LINK, "phy link down\n");
1920 bp->phy_link_up = 0;
1923 bp->duplex = DUPLEX_FULL;
1927 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1928 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1929 " link_status 0x%x\n",
1930 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1931 bp->flow_ctrl, bp->link_status);
1934 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1936 int port = bp->port;
1938 /* first reset all status
1939 * we assume only one line will be change at a time */
1940 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1941 (NIG_STATUS_XGXS0_LINK10G |
1942 NIG_STATUS_XGXS0_LINK_STATUS |
1943 NIG_STATUS_SERDES0_LINK_STATUS));
1944 if (bp->phy_link_up) {
1946 /* Disable the 10G link interrupt
1947 * by writing 1 to the status register
1949 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1951 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1952 NIG_STATUS_XGXS0_LINK10G);
1954 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1955 /* Disable the link interrupt
1956 * by writing 1 to the relevant lane
1957 * in the status register
1959 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1961 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1962 ((1 << bp->ser_lane) <<
1963 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1965 } else { /* SerDes */
1966 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1967 /* Disable the link interrupt
1968 * by writing 1 to the status register
1971 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1972 NIG_STATUS_SERDES0_LINK_STATUS);
1975 } else { /* link_down */
1979 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1984 u32 rx_sd, pcs_status;
1986 if (bp->phy_flags & PHY_XGXS_FLAG) {
1987 ext_phy_addr = ((bp->ext_phy_config &
1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1989 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1991 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1992 switch (ext_phy_type) {
1993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1994 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1998 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1999 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2000 bnx2x_mdio45_read(bp, ext_phy_addr,
2001 EXT_PHY_OPT_WIS_DEVAD,
2002 EXT_PHY_OPT_LASI_STATUS, &val1);
2003 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2005 bnx2x_mdio45_read(bp, ext_phy_addr,
2006 EXT_PHY_OPT_WIS_DEVAD,
2007 EXT_PHY_OPT_LASI_STATUS, &val1);
2008 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2010 bnx2x_mdio45_read(bp, ext_phy_addr,
2011 EXT_PHY_OPT_PMA_PMD_DEVAD,
2012 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2013 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2014 val1 = (rx_sd & 0x1);
2017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2018 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2019 bnx2x_mdio45_read(bp, ext_phy_addr,
2020 EXT_PHY_OPT_PMA_PMD_DEVAD,
2021 EXT_PHY_OPT_LASI_STATUS, &val1);
2022 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PMA_PMD_DEVAD,
2026 EXT_PHY_OPT_LASI_STATUS, &val1);
2027 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2029 bnx2x_mdio45_read(bp, ext_phy_addr,
2030 EXT_PHY_OPT_PMA_PMD_DEVAD,
2031 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2032 bnx2x_mdio45_read(bp, ext_phy_addr,
2033 EXT_PHY_OPT_PCS_DEVAD,
2034 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2035 bnx2x_mdio45_read(bp, ext_phy_addr,
2036 EXT_PHY_AUTO_NEG_DEVAD,
2037 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2039 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2040 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2041 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2042 /* link is up if both bit 0 of pmd_rx_sd and
2043 * bit 0 of pcs_status are set, or if the autoneg bit
2046 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2050 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2052 /* clear the interrupt LASI status register */
2053 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2055 EXT_PHY_KR_PCS_DEVAD,
2056 EXT_PHY_KR_LASI_STATUS, &val2);
2057 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2059 EXT_PHY_KR_PCS_DEVAD,
2060 EXT_PHY_KR_LASI_STATUS, &val1);
2061 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2063 /* Check the LASI */
2064 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2066 EXT_PHY_KR_PMA_PMD_DEVAD,
2068 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2070 EXT_PHY_KR_PMA_PMD_DEVAD,
2072 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2074 /* Check the link status */
2075 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2077 EXT_PHY_KR_PCS_DEVAD,
2078 EXT_PHY_KR_PCS_STATUS, &val2);
2079 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2080 /* Check the link status on 1.1.2 */
2081 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2083 EXT_PHY_OPT_PMA_PMD_DEVAD,
2084 EXT_PHY_KR_STATUS, &val2);
2085 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2087 EXT_PHY_OPT_PMA_PMD_DEVAD,
2088 EXT_PHY_KR_STATUS, &val1);
2090 "KR PMA status 0x%x->0x%x\n", val2, val1);
2091 val1 = ((val1 & 4) == 4);
2092 /* If 1G was requested assume the link is up */
2093 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2094 (bp->req_line_speed == SPEED_1000))
2096 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2099 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_OPT_LASI_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_OPT_LASI_STATUS, &val1);
2107 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2108 bnx2x_mdio45_read(bp, ext_phy_addr,
2109 EXT_PHY_OPT_PMA_PMD_DEVAD,
2110 EXT_PHY_KR_STATUS, &val2);
2111 bnx2x_mdio45_read(bp, ext_phy_addr,
2112 EXT_PHY_OPT_PMA_PMD_DEVAD,
2113 EXT_PHY_KR_STATUS, &val1);
2115 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2116 val1 = ((val1 & 4) == 4);
2118 * print the AN outcome of the SFX7101 PHY
2121 bnx2x_mdio45_read(bp, ext_phy_addr,
2122 EXT_PHY_KR_AUTO_NEG_DEVAD,
2125 "SFX7101 AN status 0x%x->%s\n", val2,
2126 (val2 & (1<<14)) ? "Master" : "Slave");
2131 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2132 bp->ext_phy_config);
2137 } else { /* SerDes */
2138 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2139 switch (ext_phy_type) {
2140 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2141 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2145 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2146 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2151 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2152 bp->ext_phy_config);
2161 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2163 int port = bp->port;
2164 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2165 NIG_REG_INGRESS_BMAC0_MEM;
2169 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2170 /* reset and unreset the BigMac */
2171 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2172 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2175 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2177 /* enable access for bmac registers */
2178 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2183 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2187 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2188 (bp->dev->dev_addr[3] << 16) |
2189 (bp->dev->dev_addr[4] << 8) |
2190 bp->dev->dev_addr[5]);
2191 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2192 bp->dev->dev_addr[1]);
2193 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2198 if (bp->flow_ctrl & FLOW_CTRL_TX)
2202 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2205 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2207 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2213 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2217 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2220 /* rx control set to don't strip crc */
2222 if (bp->flow_ctrl & FLOW_CTRL_RX)
2226 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2229 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2231 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2233 /* set cnt max size */
2234 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2236 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2239 /* configure safc */
2240 wb_write[0] = 0x1000200;
2242 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2245 /* fix for emulation */
2246 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2247 wb_write[0] = 0xf000;
2250 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2254 /* reset old bmac stats */
2255 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2257 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2260 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2261 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2263 /* disable the NIG in/out to the emac */
2264 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2265 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2266 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2268 /* enable the NIG in/out to the bmac */
2269 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2271 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2273 if (bp->flow_ctrl & FLOW_CTRL_TX)
2275 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2276 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2278 bp->phy_flags |= PHY_BMAC_FLAG;
2280 bp->stats_state = STATS_STATE_ENABLE;
2283 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2285 int port = bp->port;
2286 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2287 NIG_REG_INGRESS_BMAC0_MEM;
2290 /* Only if the bmac is out of reset */
2291 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2292 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2293 /* Clear Rx Enable bit in BMAC_CONTROL register */
2294 #ifdef BNX2X_DMAE_RD
2295 bnx2x_read_dmae(bp, bmac_addr +
2296 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2297 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2298 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2300 wb_write[0] = REG_RD(bp,
2301 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2302 wb_write[1] = REG_RD(bp,
2303 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2305 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2306 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2312 static void bnx2x_emac_enable(struct bnx2x *bp)
2314 int port = bp->port;
2315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2319 DP(NETIF_MSG_LINK, "enabling EMAC\n");
2320 /* reset and unreset the emac core */
2321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2322 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2325 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2327 /* enable emac and not bmac */
2328 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2331 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2332 /* Use lane 1 (of lanes 0-3) */
2333 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2334 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2337 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2338 /* Use lane 1 (of lanes 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2340 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2344 if (bp->phy_flags & PHY_XGXS_FLAG) {
2345 DP(NETIF_MSG_LINK, "XGXS\n");
2346 /* select the master lanes (out of 0-3) */
2347 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2350 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2352 } else { /* SerDes */
2353 DP(NETIF_MSG_LINK, "SerDes\n");
2355 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2360 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2362 /* init emac - use read-modify-write */
2363 /* self clear reset */
2364 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2365 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2368 while (val & EMAC_MODE_RESET) {
2369 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2370 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2372 BNX2X_ERR("EMAC timeout!\n");
2379 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2382 while (val & EMAC_TX_MODE_RESET) {
2383 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2384 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2386 BNX2X_ERR("EMAC timeout!\n");
2392 if (CHIP_REV_IS_SLOW(bp)) {
2393 /* config GMII mode */
2394 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2395 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2398 /* pause enable/disable */
2399 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2400 EMAC_RX_MODE_FLOW_EN);
2401 if (bp->flow_ctrl & FLOW_CTRL_RX)
2402 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2403 EMAC_RX_MODE_FLOW_EN);
2405 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2406 EMAC_TX_MODE_EXT_PAUSE_EN);
2407 if (bp->flow_ctrl & FLOW_CTRL_TX)
2408 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2409 EMAC_TX_MODE_EXT_PAUSE_EN);
2412 /* KEEP_VLAN_TAG, promiscuous */
2413 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2414 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2415 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2417 /* identify magic packets */
2418 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2419 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2421 /* enable emac for jumbo packets */
2422 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2423 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2424 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2427 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2429 val = ((bp->dev->dev_addr[0] << 8) |
2430 bp->dev->dev_addr[1]);
2431 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2433 val = ((bp->dev->dev_addr[2] << 24) |
2434 (bp->dev->dev_addr[3] << 16) |
2435 (bp->dev->dev_addr[4] << 8) |
2436 bp->dev->dev_addr[5]);
2437 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2439 /* disable the NIG in/out to the bmac */
2440 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2441 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2442 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2444 /* enable the NIG in/out to the emac */
2445 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2447 if (bp->flow_ctrl & FLOW_CTRL_TX)
2449 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2450 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2452 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2453 /* take the BigMac out of reset */
2454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2455 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2457 /* enable access for bmac registers */
2458 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2461 bp->phy_flags |= PHY_EMAC_FLAG;
2463 bp->stats_state = STATS_STATE_ENABLE;
2466 static void bnx2x_emac_program(struct bnx2x *bp)
2469 int port = bp->port;
2471 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2472 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2473 (EMAC_MODE_25G_MODE |
2474 EMAC_MODE_PORT_MII_10M |
2475 EMAC_MODE_HALF_DUPLEX));
2476 switch (bp->line_speed) {
2478 mode |= EMAC_MODE_PORT_MII_10M;
2482 mode |= EMAC_MODE_PORT_MII;
2486 mode |= EMAC_MODE_PORT_GMII;
2490 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2494 /* 10G not valid for EMAC */
2495 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2499 if (bp->duplex == DUPLEX_HALF)
2500 mode |= EMAC_MODE_HALF_DUPLEX;
2501 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2504 bnx2x_leds_set(bp, bp->line_speed);
2507 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2513 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2514 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2516 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2517 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2519 /* bits [10:7] at lp_up2, positioned at [15:12] */
2520 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2521 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2522 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2524 if ((lp_up2 != 0) &&
2525 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2526 /* replace tx_driver bits [15:12] */
2527 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2528 tx_driver |= lp_up2;
2529 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2533 static void bnx2x_pbf_update(struct bnx2x *bp)
2535 int port = bp->port;
2541 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2543 /* wait for init credit */
2544 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2545 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2548 while ((init_crd != crd) && count) {
2551 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2554 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2555 if (init_crd != crd)
2556 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2558 if (bp->flow_ctrl & FLOW_CTRL_RX)
2560 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2564 /* update init credit */
2565 init_crd = 778; /* (800-18-4) */
2568 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2570 /* update threshold */
2571 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2572 /* update init credit */
2573 switch (bp->line_speed) {
2577 init_crd = thresh + 55 - 22;
2581 init_crd = thresh + 138 - 22;
2585 init_crd = thresh + 553 - 22;
2589 BNX2X_ERR("Invalid line_speed 0x%x\n",
2594 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2595 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2596 bp->line_speed, init_crd);
2598 /* probe the credit changes */
2599 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2601 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2604 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2607 static void bnx2x_update_mng(struct bnx2x *bp)
2610 SHMEM_WR(bp, port_mb[bp->port].link_status,
2614 static void bnx2x_link_report(struct bnx2x *bp)
2617 netif_carrier_on(bp->dev);
2618 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2620 printk("%d Mbps ", bp->line_speed);
2622 if (bp->duplex == DUPLEX_FULL)
2623 printk("full duplex");
2625 printk("half duplex");
2627 if (bp->flow_ctrl) {
2628 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2629 printk(", receive ");
2630 if (bp->flow_ctrl & FLOW_CTRL_TX)
2631 printk("& transmit ");
2633 printk(", transmit ");
2635 printk("flow control ON");
2639 } else { /* link_down */
2640 netif_carrier_off(bp->dev);
2641 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2645 static void bnx2x_link_up(struct bnx2x *bp)
2647 int port = bp->port;
2650 bnx2x_pbf_update(bp);
2653 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2655 /* update shared memory */
2656 bnx2x_update_mng(bp);
2658 /* indicate link up */
2659 bnx2x_link_report(bp);
2662 static void bnx2x_link_down(struct bnx2x *bp)
2664 int port = bp->port;
2667 if (bp->stats_state != STATS_STATE_DISABLE) {
2668 bp->stats_state = STATS_STATE_STOP;
2669 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2672 /* indicate no mac active */
2673 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2675 /* update shared memory */
2676 bnx2x_update_mng(bp);
2678 /* activate nig drain */
2679 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2682 bnx2x_bmac_rx_disable(bp);
2683 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2684 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2686 /* indicate link down */
2687 bnx2x_link_report(bp);
2690 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2692 /* This function is called upon link interrupt */
2693 static void bnx2x_link_update(struct bnx2x *bp)
2695 int port = bp->port;
2700 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2701 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2702 " 10G %x, XGXS_LINK %x\n", port,
2703 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2704 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2705 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2706 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2707 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2708 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2709 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2713 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2714 /* avoid fast toggling */
2715 for (i = 0; i < 10; i++) {
2717 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2721 bnx2x_link_settings_status(bp, gp_status);
2723 /* anything 10 and over uses the bmac */
2724 link_10g = ((bp->line_speed >= SPEED_10000) &&
2725 (bp->line_speed <= SPEED_16000));
2727 bnx2x_link_int_ack(bp, link_10g);
2729 /* link is up only if both local phy and external phy are up */
2730 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2733 bnx2x_bmac_enable(bp, 0);
2734 bnx2x_leds_set(bp, SPEED_10000);
2737 bnx2x_emac_enable(bp);
2738 bnx2x_emac_program(bp);
2741 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2742 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2743 bnx2x_set_sgmii_tx_driver(bp);
2748 } else { /* link down */
2749 bnx2x_leds_unset(bp);
2750 bnx2x_link_down(bp);
2753 bnx2x_init_mac_stats(bp);
2757 * Init service functions
2760 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2762 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2763 (bp->phy_addr + bp->ser_lane) : 0;
2765 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2766 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2769 static void bnx2x_set_master_ln(struct bnx2x *bp)
2773 /* set the master_ln for AN */
2774 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2775 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2777 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2778 (new_master_ln | bp->ser_lane));
2781 static void bnx2x_reset_unicore(struct bnx2x *bp)
2786 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2787 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2788 /* reset the unicore */
2789 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2790 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2792 /* wait for the reset to self clear */
2793 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2796 /* the reset erased the previous bank value */
2797 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2798 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2801 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2807 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2808 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2812 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2814 /* Each two bits represents a lane number:
2815 No swap is 0123 => 0x1b no need to enable the swap */
2817 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2818 if (bp->rx_lane_swap != 0x1b) {
2819 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2821 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2822 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2827 if (bp->tx_lane_swap != 0x1b) {
2828 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2830 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2832 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2836 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2840 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2841 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2844 if (bp->autoneg & AUTONEG_PARALLEL) {
2845 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2847 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2849 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2852 if (bp->phy_flags & PHY_XGXS_FLAG) {
2853 DP(NETIF_MSG_LINK, "XGXS\n");
2854 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2856 bnx2x_mdio22_write(bp,
2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2860 bnx2x_mdio22_read(bp,
2861 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2864 if (bp->autoneg & AUTONEG_PARALLEL) {
2866 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2869 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2871 bnx2x_mdio22_write(bp,
2872 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2875 /* Disable parallel detection of HiG */
2876 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2877 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2879 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2883 static void bnx2x_set_autoneg(struct bnx2x *bp)
2888 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2889 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2890 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2891 (bp->autoneg & AUTONEG_CL37)) {
2892 /* CL37 Autoneg Enabled */
2893 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2895 /* CL37 Autoneg Disabled */
2896 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2897 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2899 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2901 /* Enable/Disable Autodetection */
2902 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2903 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
2904 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2906 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2907 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2908 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2910 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2912 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2914 /* Enable TetonII and BAM autoneg */
2915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2916 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2918 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2919 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2920 /* Enable BAM aneg Mode and TetonII aneg Mode */
2921 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2922 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2924 /* TetonII and BAM Autoneg Disabled */
2925 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2926 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2928 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2931 /* Enable Clause 73 Aneg */
2932 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2933 (bp->autoneg & AUTONEG_CL73)) {
2934 /* Enable BAM Station Manager */
2935 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2937 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2939 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2941 /* Merge CL73 and CL37 aneg resolution */
2942 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2944 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2946 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2948 /* Set the CL73 AN speed */
2949 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2950 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
2951 /* In the SerDes we support only the 1G.
2952 In the XGXS we support the 10G KX4
2953 but we currently do not support the KR */
2954 if (bp->phy_flags & PHY_XGXS_FLAG) {
2955 DP(NETIF_MSG_LINK, "XGXS\n");
2957 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2959 DP(NETIF_MSG_LINK, "SerDes\n");
2961 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2963 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2965 /* CL73 Autoneg Enabled */
2966 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2968 /* CL73 Autoneg Disabled */
2971 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2972 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2975 /* program SerDes, forced speed */
2976 static void bnx2x_program_serdes(struct bnx2x *bp)
2980 /* program duplex, disable autoneg */
2981 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2982 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2983 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2984 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2985 if (bp->req_duplex == DUPLEX_FULL)
2986 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2987 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2990 - needed only if the speed is greater than 1G (2.5G or 10G) */
2991 if (bp->req_line_speed > SPEED_1000) {
2992 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2993 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, ®_val);
2994 /* clearing the speed value before setting the right speed */
2995 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2996 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2997 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2998 if (bp->req_line_speed == SPEED_10000)
3000 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
3001 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
3005 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3009 /* configure the 48 bits for BAM AN */
3010 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3012 /* set extended capabilities */
3013 if (bp->advertising & ADVERTISED_2500baseX_Full)
3014 val |= MDIO_OVER_1G_UP1_2_5G;
3015 if (bp->advertising & ADVERTISED_10000baseT_Full)
3016 val |= MDIO_OVER_1G_UP1_10G;
3017 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3019 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3022 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3026 /* for AN, we are always publishing full duplex */
3027 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3029 /* resolve pause mode and advertisement
3030 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3031 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3032 switch (bp->req_flow_ctrl) {
3033 case FLOW_CTRL_AUTO:
3034 if (bp->dev->mtu <= 4500) {
3036 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3037 bp->advertising |= (ADVERTISED_Pause |
3038 ADVERTISED_Asym_Pause);
3041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3042 bp->advertising |= ADVERTISED_Asym_Pause;
3048 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3049 bp->advertising |= ADVERTISED_Asym_Pause;
3053 if (bp->dev->mtu <= 4500) {
3055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3056 bp->advertising |= (ADVERTISED_Pause |
3057 ADVERTISED_Asym_Pause);
3060 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3061 bp->advertising &= ~(ADVERTISED_Pause |
3062 ADVERTISED_Asym_Pause);
3066 case FLOW_CTRL_BOTH:
3067 if (bp->dev->mtu <= 4500) {
3069 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3070 bp->advertising |= (ADVERTISED_Pause |
3071 ADVERTISED_Asym_Pause);
3074 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3075 bp->advertising |= ADVERTISED_Asym_Pause;
3079 case FLOW_CTRL_NONE:
3081 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3082 bp->advertising &= ~(ADVERTISED_Pause |
3083 ADVERTISED_Asym_Pause);
3086 } else { /* forced mode */
3087 switch (bp->req_flow_ctrl) {
3088 case FLOW_CTRL_AUTO:
3089 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3090 " req_autoneg 0x%x\n",
3091 bp->req_flow_ctrl, bp->req_autoneg);
3096 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3097 bp->advertising |= ADVERTISED_Asym_Pause;
3101 case FLOW_CTRL_BOTH:
3102 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3103 bp->advertising |= (ADVERTISED_Pause |
3104 ADVERTISED_Asym_Pause);
3107 case FLOW_CTRL_NONE:
3109 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3110 bp->advertising &= ~(ADVERTISED_Pause |
3111 ADVERTISED_Asym_Pause);
3116 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3117 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3120 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3122 if (bp->autoneg & AUTONEG_CL73) {
3123 /* enable and restart clause 73 aneg */
3126 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3127 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3129 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3131 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3132 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3135 /* Enable and restart BAM/CL37 aneg */
3138 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3139 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3141 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3143 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3144 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3148 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3152 /* in SGMII mode, the unicore is always slave */
3153 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3154 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3156 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3157 /* set sgmii mode (and not fiber) */
3158 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3159 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3160 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3161 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3164 /* if forced speed */
3165 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3166 /* set speed, disable autoneg */
3169 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3170 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3172 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3173 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3174 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3176 switch (bp->req_line_speed) {
3179 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3183 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3186 /* there is nothing to set for 10M */
3189 /* invalid speed for SGMII */
3190 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3191 bp->req_line_speed);
3195 /* setting the full duplex */
3196 if (bp->req_duplex == DUPLEX_FULL)
3198 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3199 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3202 } else { /* AN mode */
3203 /* enable and restart AN */
3204 bnx2x_restart_autoneg(bp);
3208 static void bnx2x_link_int_enable(struct bnx2x *bp)
3210 int port = bp->port;
3214 /* setting the status to report on link up
3215 for either XGXS or SerDes */
3216 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3217 (NIG_STATUS_XGXS0_LINK10G |
3218 NIG_STATUS_XGXS0_LINK_STATUS |
3219 NIG_STATUS_SERDES0_LINK_STATUS));
3221 if (bp->phy_flags & PHY_XGXS_FLAG) {
3222 mask = (NIG_MASK_XGXS0_LINK10G |
3223 NIG_MASK_XGXS0_LINK_STATUS);
3224 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3225 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3226 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3227 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3230 mask |= NIG_MASK_MI_INT;
3231 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3234 } else { /* SerDes */
3235 mask = NIG_MASK_SERDES0_LINK_STATUS;
3236 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3237 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3238 if ((ext_phy_type !=
3239 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3241 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3242 mask |= NIG_MASK_MI_INT;
3243 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3247 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3249 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3250 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3251 " 10G %x, XGXS_LINK %x\n", port,
3252 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3253 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3255 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3256 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3258 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3262 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3264 u32 ext_phy_addr = ((bp->ext_phy_config &
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3266 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3267 u32 fw_ver1, fw_ver2;
3269 /* Need to wait 200ms after reset */
3271 /* Boot port from external ROM
3272 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3274 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275 EXT_PHY_KR_PMA_PMD_DEVAD,
3276 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* set micro reset = 0 */
3283 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3284 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3285 EXT_PHY_KR_ROM_MICRO_RESET);
3286 /* Reset internal microprocessor */
3287 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3288 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3289 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3290 /* wait for 100ms for code download via SPI port */
3293 /* Clear ser_boot_ctl bit */
3294 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3295 EXT_PHY_KR_PMA_PMD_DEVAD,
3296 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3300 /* Print the PHY FW version */
3301 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3302 EXT_PHY_KR_PMA_PMD_DEVAD,
3304 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3305 EXT_PHY_KR_PMA_PMD_DEVAD,
3308 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3311 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3313 u32 ext_phy_addr = ((bp->ext_phy_config &
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3315 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3317 /* Force KR or KX */
3318 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3321 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3322 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3324 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3325 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3327 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3328 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3332 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3340 if (bp->phy_flags & PHY_XGXS_FLAG) {
3341 ext_phy_addr = ((bp->ext_phy_config &
3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3343 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3345 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3346 /* Make sure that the soft reset is off (expect for the 8072:
3347 * due to the lock, it will be done inside the specific
3350 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3353 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3354 /* Wait for soft reset to get cleared upto 1 sec */
3355 for (cnt = 0; cnt < 1000; cnt++) {
3356 bnx2x_mdio45_read(bp, ext_phy_addr,
3357 EXT_PHY_OPT_PMA_PMD_DEVAD,
3358 EXT_PHY_OPT_CNTL, &ctrl);
3359 if (!(ctrl & (1<<15)))
3364 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3367 switch (ext_phy_type) {
3368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3369 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3373 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
3377 EXT_PHY_OPT_PMD_MISC_CNTL,
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_PMA_PMD_DEVAD,
3381 EXT_PHY_OPT_PHY_IDENTIFIER,
3383 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3384 EXT_PHY_OPT_PMA_PMD_DEVAD,
3385 EXT_PHY_OPT_CMU_PLL_BYPASS,
3387 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3388 EXT_PHY_OPT_WIS_DEVAD,
3389 EXT_PHY_OPT_LASI_CNTL, 0x1);
3392 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3393 DP(NETIF_MSG_LINK, "XGXS 8706\n");
3395 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3397 if (bp->req_line_speed == SPEED_10000) {
3399 "XGXS 8706 force 10Gbps\n");
3400 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401 EXT_PHY_OPT_PMA_PMD_DEVAD,
3402 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3407 "XGXS 8706 force 1Gbps\n");
3409 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3410 EXT_PHY_OPT_PMA_PMD_DEVAD,
3414 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3415 EXT_PHY_OPT_PMA_PMD_DEVAD,
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_OPT_PMA_PMD_DEVAD,
3423 EXT_PHY_OPT_LASI_CNTL,
3427 /* Allow CL37 through CL73 */
3428 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3429 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3430 EXT_PHY_AUTO_NEG_DEVAD,
3431 EXT_PHY_OPT_AN_CL37_CL73,
3434 /* Enable Full-Duplex advertisment on CL37 */
3435 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3436 EXT_PHY_AUTO_NEG_DEVAD,
3437 EXT_PHY_OPT_AN_CL37_FD,
3439 /* Enable CL37 AN */
3440 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3441 EXT_PHY_AUTO_NEG_DEVAD,
3442 EXT_PHY_OPT_AN_CL37_AN,
3444 /* Advertise 10G/1G support */
3445 if (bp->advertising &
3446 ADVERTISED_1000baseT_Full)
3448 if (bp->advertising &
3449 ADVERTISED_10000baseT_Full)
3452 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3453 EXT_PHY_AUTO_NEG_DEVAD,
3454 EXT_PHY_OPT_AN_ADV, val);
3456 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3457 EXT_PHY_OPT_PMA_PMD_DEVAD,
3458 EXT_PHY_OPT_LASI_CNTL,
3461 /* Enable clause 73 AN */
3462 bnx2x_mdio45_write(bp, ext_phy_addr,
3463 EXT_PHY_AUTO_NEG_DEVAD,
3469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3470 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3471 /* Wait for soft reset to get cleared upto 1 sec */
3472 for (cnt = 0; cnt < 1000; cnt++) {
3473 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3475 EXT_PHY_OPT_PMA_PMD_DEVAD,
3476 EXT_PHY_OPT_CNTL, &ctrl);
3477 if (!(ctrl & (1<<15)))
3482 "8072 control reg 0x%x (after %d ms)\n",
3485 bnx2x_bcm8072_external_rom_boot(bp);
3486 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3489 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3491 EXT_PHY_KR_PMA_PMD_DEVAD,
3493 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3495 EXT_PHY_KR_PMA_PMD_DEVAD,
3496 EXT_PHY_KR_LASI_CNTL, 0x0004);
3498 /* If this is forced speed, set to KR or KX
3499 * (all other are not supported)
3501 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3502 if (bp->req_line_speed == SPEED_10000) {
3503 bnx2x_bcm8072_force_10G(bp);
3505 "Forced speed 10G on 8072\n");
3508 HW_LOCK_RESOURCE_8072_MDIO);
3514 /* Advertise 10G/1G support */
3515 if (bp->advertising &
3516 ADVERTISED_1000baseT_Full)
3518 if (bp->advertising &
3519 ADVERTISED_10000baseT_Full)
3522 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3524 EXT_PHY_KR_AUTO_NEG_DEVAD,
3526 /* Add support for CL37 ( passive mode ) I */
3527 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3529 EXT_PHY_KR_AUTO_NEG_DEVAD,
3531 /* Add support for CL37 ( passive mode ) II */
3532 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3534 EXT_PHY_KR_AUTO_NEG_DEVAD,
3536 /* Add support for CL37 ( passive mode ) III */
3537 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3539 EXT_PHY_KR_AUTO_NEG_DEVAD,
3541 /* Restart autoneg */
3543 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3545 EXT_PHY_KR_AUTO_NEG_DEVAD,
3546 EXT_PHY_KR_CTRL, 0x1200);
3547 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3548 "1G %ssupported 10G %ssupported\n",
3549 (val & (1<<5)) ? "" : "not ",
3550 (val & (1<<7)) ? "" : "not ");
3553 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3558 "Setting the SFX7101 LASI indication\n");
3559 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3560 EXT_PHY_OPT_PMA_PMD_DEVAD,
3561 EXT_PHY_OPT_LASI_CNTL, 0x1);
3563 "Setting the SFX7101 LED to blink on traffic\n");
3564 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3565 EXT_PHY_OPT_PMA_PMD_DEVAD,
3568 /* read modify write pause advertizing */
3569 bnx2x_mdio45_read(bp, ext_phy_addr,
3570 EXT_PHY_KR_AUTO_NEG_DEVAD,
3571 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3572 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3573 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3574 if (bp->advertising & ADVERTISED_Pause)
3575 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3577 if (bp->advertising & ADVERTISED_Asym_Pause) {
3579 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3581 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3582 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3585 /* Restart autoneg */
3586 bnx2x_mdio45_read(bp, ext_phy_addr,
3587 EXT_PHY_KR_AUTO_NEG_DEVAD,
3588 EXT_PHY_KR_CTRL, &val);
3590 bnx2x_mdio45_write(bp, ext_phy_addr,
3591 EXT_PHY_KR_AUTO_NEG_DEVAD,
3592 EXT_PHY_KR_CTRL, val);
3596 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3597 bp->ext_phy_config);
3601 } else { /* SerDes */
3602 /* ext_phy_addr = ((bp->ext_phy_config &
3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3604 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3606 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3607 switch (ext_phy_type) {
3608 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3609 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3613 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3617 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3618 bp->ext_phy_config);
3624 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3627 u32 ext_phy_addr = ((bp->ext_phy_config &
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3629 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3630 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3632 /* The PHY reset is controled by GPIO 1
3633 * Give it 1ms of reset pulse
3635 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3636 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3637 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3638 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3640 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3641 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3644 if (bp->phy_flags & PHY_XGXS_FLAG) {
3645 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3646 switch (ext_phy_type) {
3647 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3648 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3653 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3654 bnx2x_mdio45_write(bp, ext_phy_addr,
3655 EXT_PHY_OPT_PMA_PMD_DEVAD,
3656 EXT_PHY_OPT_CNTL, 0xa040);
3659 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3660 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3661 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3662 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3664 EXT_PHY_KR_PMA_PMD_DEVAD,
3666 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3670 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3674 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3675 bp->ext_phy_config);
3679 } else { /* SerDes */
3680 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3681 switch (ext_phy_type) {
3682 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3683 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3686 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3687 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3691 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3692 bp->ext_phy_config);
3698 static void bnx2x_link_initialize(struct bnx2x *bp)
3700 int port = bp->port;
3702 /* disable attentions */
3703 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3704 (NIG_MASK_XGXS0_LINK_STATUS |
3705 NIG_MASK_XGXS0_LINK10G |
3706 NIG_MASK_SERDES0_LINK_STATUS |
3709 /* Activate the external PHY */
3710 bnx2x_ext_phy_reset(bp);
3712 bnx2x_set_aer_mmd(bp);
3714 if (bp->phy_flags & PHY_XGXS_FLAG)
3715 bnx2x_set_master_ln(bp);
3717 /* reset the SerDes and wait for reset bit return low */
3718 bnx2x_reset_unicore(bp);
3720 bnx2x_set_aer_mmd(bp);
3722 /* setting the masterLn_def again after the reset */
3723 if (bp->phy_flags & PHY_XGXS_FLAG) {
3724 bnx2x_set_master_ln(bp);
3725 bnx2x_set_swap_lanes(bp);
3728 /* Set Parallel Detect */
3729 if (bp->req_autoneg & AUTONEG_SPEED)
3730 bnx2x_set_parallel_detection(bp);
3732 if (bp->phy_flags & PHY_XGXS_FLAG) {
3733 if (bp->req_line_speed &&
3734 bp->req_line_speed < SPEED_1000) {
3735 bp->phy_flags |= PHY_SGMII_FLAG;
3737 bp->phy_flags &= ~PHY_SGMII_FLAG;
3741 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3744 rx_eq = ((bp->serdes_config &
3745 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3746 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3748 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3749 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3750 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3751 MDIO_SET_REG_BANK(bp, bank);
3752 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3754 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3755 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3758 /* forced speed requested? */
3759 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3760 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3762 /* disable autoneg */
3763 bnx2x_set_autoneg(bp);
3765 /* program speed and duplex */
3766 bnx2x_program_serdes(bp);
3768 } else { /* AN_mode */
3769 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3772 bnx2x_set_brcm_cl37_advertisment(bp);
3774 /* program duplex & pause advertisement (for aneg) */
3775 bnx2x_set_ieee_aneg_advertisment(bp);
3777 /* enable autoneg */
3778 bnx2x_set_autoneg(bp);
3780 /* enable and restart AN */
3781 bnx2x_restart_autoneg(bp);
3784 } else { /* SGMII mode */
3785 DP(NETIF_MSG_LINK, "SGMII\n");
3787 bnx2x_initialize_sgmii_process(bp);
3790 /* init ext phy and enable link state int */
3791 bnx2x_ext_phy_init(bp);
3793 /* enable the interrupt */
3794 bnx2x_link_int_enable(bp);
3797 static void bnx2x_phy_deassert(struct bnx2x *bp)
3799 int port = bp->port;
3802 if (bp->phy_flags & PHY_XGXS_FLAG) {
3803 DP(NETIF_MSG_LINK, "XGXS\n");
3804 val = XGXS_RESET_BITS;
3806 } else { /* SerDes */
3807 DP(NETIF_MSG_LINK, "SerDes\n");
3808 val = SERDES_RESET_BITS;
3811 val = val << (port*16);
3813 /* reset and unreset the SerDes/XGXS */
3814 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3816 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3819 static int bnx2x_phy_init(struct bnx2x *bp)
3821 DP(NETIF_MSG_LINK, "started\n");
3822 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3823 bp->phy_flags |= PHY_EMAC_FLAG;
3825 bp->line_speed = SPEED_10000;
3826 bp->duplex = DUPLEX_FULL;
3827 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3828 bnx2x_emac_enable(bp);
3829 bnx2x_link_report(bp);
3832 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3833 bp->phy_flags |= PHY_BMAC_FLAG;
3835 bp->line_speed = SPEED_10000;
3836 bp->duplex = DUPLEX_FULL;
3837 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3838 bnx2x_bmac_enable(bp, 0);
3839 bnx2x_link_report(bp);
3843 bnx2x_phy_deassert(bp);
3844 bnx2x_link_initialize(bp);
3850 static void bnx2x_link_reset(struct bnx2x *bp)
3852 int port = bp->port;
3853 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3855 /* update shared memory */
3856 bp->link_status = 0;
3857 bnx2x_update_mng(bp);
3859 /* disable attentions */
3860 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3861 (NIG_MASK_XGXS0_LINK_STATUS |
3862 NIG_MASK_XGXS0_LINK10G |
3863 NIG_MASK_SERDES0_LINK_STATUS |
3866 /* activate nig drain */
3867 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3869 /* disable nig egress interface */
3870 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3871 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3873 /* Stop BigMac rx */
3874 bnx2x_bmac_rx_disable(bp);
3877 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3881 /* The PHY reset is controled by GPIO 1
3882 * Hold it as output low
3884 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3885 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3886 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3887 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3888 DP(NETIF_MSG_LINK, "reset external PHY\n");
3891 /* reset the SerDes/XGXS */
3892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3893 (0x1ff << (port*16)));
3896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3897 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3899 /* disable nig ingress interface */
3900 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3901 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3907 #ifdef BNX2X_XGXS_LB
3908 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3910 int port = bp->port;
3915 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3917 /* change the uni_phy_addr in the nig */
3918 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3920 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3922 /* change the aer mmd */
3923 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3924 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3926 /* config combo IEEE0 control reg for loopback */
3927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3928 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3931 /* set aer mmd back */
3932 bnx2x_set_aer_mmd(bp);
3935 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3940 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3942 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3943 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3945 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3947 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3952 /* end of PHY/MAC */
3957 * General service functions
3960 /* the slow path queue is odd since completions arrive on the fastpath ring */
3961 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3962 u32 data_hi, u32 data_lo, int common)
3964 int port = bp->port;
3967 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3968 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3969 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3970 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3972 #ifdef BNX2X_STOP_ON_ERROR
3973 if (unlikely(bp->panic))
3977 spin_lock(&bp->spq_lock);
3979 if (!bp->spq_left) {
3980 BNX2X_ERR("BUG! SPQ ring full!\n");
3981 spin_unlock(&bp->spq_lock);
3986 /* CID needs port number to be encoded int it */
3987 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3988 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3990 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3992 bp->spq_prod_bd->hdr.type |=
3993 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3995 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3996 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
4000 if (bp->spq_prod_bd == bp->spq_last_bd) {
4001 bp->spq_prod_bd = bp->spq;
4002 bp->spq_prod_idx = 0;
4003 DP(NETIF_MSG_TIMER, "end of spq\n");
4010 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4013 spin_unlock(&bp->spq_lock);
4017 /* acquire split MCP access lock register */
4018 static int bnx2x_lock_alr(struct bnx2x *bp)
4025 for (j = 0; j < i*10; j++) {
4027 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4028 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4029 if (val & (1L << 31))
4035 if (!(val & (1L << 31))) {
4036 BNX2X_ERR("Cannot acquire nvram interface\n");
4044 /* Release split MCP access lock register */
4045 static void bnx2x_unlock_alr(struct bnx2x *bp)
4049 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4052 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4054 struct host_def_status_block *def_sb = bp->def_status_blk;
4057 barrier(); /* status block is written to by the chip */
4059 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4060 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4063 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4064 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4067 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4068 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4071 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4072 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4075 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4076 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4083 * slow path service functions
4086 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4088 int port = bp->port;
4089 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4090 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4091 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4092 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4093 NIG_REG_MASK_INTERRUPT_PORT0;
4095 if (~bp->aeu_mask & (asserted & 0xff))
4096 BNX2X_ERR("IGU ERROR\n");
4097 if (bp->attn_state & asserted)
4098 BNX2X_ERR("IGU ERROR\n");
4100 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4101 bp->aeu_mask, asserted);
4102 bp->aeu_mask &= ~(asserted & 0xff);
4103 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4105 REG_WR(bp, aeu_addr, bp->aeu_mask);
4107 bp->attn_state |= asserted;
4109 if (asserted & ATTN_HARD_WIRED_MASK) {
4110 if (asserted & ATTN_NIG_FOR_FUNC) {
4112 /* save nig interrupt mask */
4113 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4114 REG_WR(bp, nig_int_mask_addr, 0);
4116 bnx2x_link_update(bp);
4118 /* handle unicore attn? */
4120 if (asserted & ATTN_SW_TIMER_4_FUNC)
4121 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4123 if (asserted & GPIO_2_FUNC)
4124 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4126 if (asserted & GPIO_3_FUNC)
4127 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4129 if (asserted & GPIO_4_FUNC)
4130 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4133 if (asserted & ATTN_GENERAL_ATTN_1) {
4134 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4137 if (asserted & ATTN_GENERAL_ATTN_2) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4141 if (asserted & ATTN_GENERAL_ATTN_3) {
4142 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4146 if (asserted & ATTN_GENERAL_ATTN_4) {
4147 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4148 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4150 if (asserted & ATTN_GENERAL_ATTN_5) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4154 if (asserted & ATTN_GENERAL_ATTN_6) {
4155 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4156 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4160 } /* if hardwired */
4162 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4163 asserted, BAR_IGU_INTMEM + igu_addr);
4164 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4166 /* now set back the mask */
4167 if (asserted & ATTN_NIG_FOR_FUNC)
4168 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
4171 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4173 int port = bp->port;
4177 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4179 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4180 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4182 val = REG_RD(bp, reg_offset);
4183 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4184 REG_WR(bp, reg_offset, val);
4186 BNX2X_ERR("SPIO5 hw attention\n");
4188 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4189 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4190 /* Fan failure attention */
4192 /* The PHY reset is controled by GPIO 1 */
4193 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4194 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4195 /* Low power mode is controled by GPIO 2 */
4196 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4197 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4198 /* mark the failure */
4199 bp->ext_phy_config &=
4200 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4201 bp->ext_phy_config |=
4202 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4204 dev_info.port_hw_config[port].
4205 external_phy_config,
4206 bp->ext_phy_config);
4207 /* log the failure */
4208 printk(KERN_ERR PFX "Fan Failure on Network"
4209 " Controller %s has caused the driver to"
4210 " shutdown the card to prevent permanent"
4211 " damage. Please contact Dell Support for"
4212 " assistance\n", bp->dev->name);
4221 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4225 if (attn & BNX2X_DOORQ_ASSERT) {
4227 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4228 BNX2X_ERR("DB hw attention 0x%x\n", val);
4229 /* DORQ discard attention */
4231 BNX2X_ERR("FATAL error from DORQ\n");
4235 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4239 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4241 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4242 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4243 /* CFC error attention */
4245 BNX2X_ERR("FATAL error from CFC\n");
4248 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4250 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4251 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4252 /* RQ_USDMDP_FIFO_OVERFLOW */
4254 BNX2X_ERR("FATAL error from PXP\n");
4258 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4260 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4262 if (attn & BNX2X_MC_ASSERT_BITS) {
4264 BNX2X_ERR("MC assert!\n");
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4266 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4267 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4268 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4271 } else if (attn & BNX2X_MCP_ASSERT) {
4273 BNX2X_ERR("MCP assert!\n");
4274 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4275 bnx2x_mc_assert(bp);
4278 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4281 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4283 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4284 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4288 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4290 struct attn_route attn;
4291 struct attn_route group_mask;
4292 int port = bp->port;
4297 /* need to take HW lock because MCP or other port might also
4298 try to handle this event */
4301 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4302 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4303 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4304 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4305 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4307 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4308 if (deasserted & (1 << index)) {
4309 group_mask = bp->attn_group[index];
4311 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4312 (unsigned long long)group_mask.sig[0]);
4314 bnx2x_attn_int_deasserted3(bp,
4315 attn.sig[3] & group_mask.sig[3]);
4316 bnx2x_attn_int_deasserted1(bp,
4317 attn.sig[1] & group_mask.sig[1]);
4318 bnx2x_attn_int_deasserted2(bp,
4319 attn.sig[2] & group_mask.sig[2]);
4320 bnx2x_attn_int_deasserted0(bp,
4321 attn.sig[0] & group_mask.sig[0]);
4323 if ((attn.sig[0] & group_mask.sig[0] &
4324 HW_INTERRUT_ASSERT_SET_0) ||
4325 (attn.sig[1] & group_mask.sig[1] &
4326 HW_INTERRUT_ASSERT_SET_1) ||
4327 (attn.sig[2] & group_mask.sig[2] &
4328 HW_INTERRUT_ASSERT_SET_2))
4329 BNX2X_ERR("FATAL HW block attention"
4330 " set0 0x%x set1 0x%x"
4332 (attn.sig[0] & group_mask.sig[0] &
4333 HW_INTERRUT_ASSERT_SET_0),
4334 (attn.sig[1] & group_mask.sig[1] &
4335 HW_INTERRUT_ASSERT_SET_1),
4336 (attn.sig[2] & group_mask.sig[2] &
4337 HW_INTERRUT_ASSERT_SET_2));
4339 if ((attn.sig[0] & group_mask.sig[0] &
4340 HW_PRTY_ASSERT_SET_0) ||
4341 (attn.sig[1] & group_mask.sig[1] &
4342 HW_PRTY_ASSERT_SET_1) ||
4343 (attn.sig[2] & group_mask.sig[2] &
4344 HW_PRTY_ASSERT_SET_2))
4345 BNX2X_ERR("FATAL HW block parity attention\n");
4349 bnx2x_unlock_alr(bp);
4351 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4354 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4355 val, BAR_IGU_INTMEM + reg_addr); */
4356 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4358 if (bp->aeu_mask & (deasserted & 0xff))
4359 BNX2X_ERR("IGU BUG\n");
4360 if (~bp->attn_state & deasserted)
4361 BNX2X_ERR("IGU BUG\n");
4363 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4364 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4366 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4367 bp->aeu_mask |= (deasserted & 0xff);
4369 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4370 REG_WR(bp, reg_addr, bp->aeu_mask);
4372 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4373 bp->attn_state &= ~deasserted;
4374 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4377 static void bnx2x_attn_int(struct bnx2x *bp)
4379 /* read local copy of bits */
4380 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4381 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4382 u32 attn_state = bp->attn_state;
4384 /* look for changed bits */
4385 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4386 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4389 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4390 attn_bits, attn_ack, asserted, deasserted);
4392 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4393 BNX2X_ERR("bad attention state\n");
4395 /* handle bits that were raised */
4397 bnx2x_attn_int_asserted(bp, asserted);
4400 bnx2x_attn_int_deasserted(bp, deasserted);
4403 static void bnx2x_sp_task(struct work_struct *work)
4405 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4408 /* Return here if interrupt is disabled */
4409 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4410 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
4414 status = bnx2x_update_dsb_idx(bp);
4416 BNX2X_ERR("spurious slowpath interrupt!\n");
4418 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4424 /* CStorm events: query_stats, port delete ramrod */
4426 bp->stat_pending = 0;
4428 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4430 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4432 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4434 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4436 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4441 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4443 struct net_device *dev = dev_instance;
4444 struct bnx2x *bp = netdev_priv(dev);
4446 /* Return here if interrupt is disabled */
4447 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4448 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
4452 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4454 #ifdef BNX2X_STOP_ON_ERROR
4455 if (unlikely(bp->panic))
4459 schedule_work(&bp->sp_task);
4464 /* end of slow path */
4468 /****************************************************************************
4470 ****************************************************************************/
4472 #define UPDATE_STAT(s, t) \
4474 estats->t += new->s - old->s; \
4478 /* sum[hi:lo] += add[hi:lo] */
4479 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4482 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4485 /* difference = minuend - subtrahend */
4486 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4488 if (m_lo < s_lo) { /* underflow */ \
4489 d_hi = m_hi - s_hi; \
4490 if (d_hi > 0) { /* we can 'loan' 1 */ \
4492 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4493 } else { /* m_hi <= s_hi */ \
4497 } else { /* m_lo >= s_lo */ \
4498 if (m_hi < s_hi) { \
4501 } else { /* m_hi >= s_hi */ \
4502 d_hi = m_hi - s_hi; \
4503 d_lo = m_lo - s_lo; \
4508 /* minuend -= subtrahend */
4509 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4511 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4514 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4516 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4517 diff.lo, new->s_lo, old->s_lo); \
4518 old->s_hi = new->s_hi; \
4519 old->s_lo = new->s_lo; \
4520 ADD_64(estats->t_hi, diff.hi, \
4521 estats->t_lo, diff.lo); \
4524 /* sum[hi:lo] += add */
4525 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4528 s_hi += (s_lo < a) ? 1 : 0; \
4531 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4533 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4536 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4538 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4539 old_tclient->s = le32_to_cpu(tclient->s); \
4540 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4544 * General service functions
4547 static inline long bnx2x_hilo(u32 *hiref)
4549 u32 lo = *(hiref + 1);
4550 #if (BITS_PER_LONG == 64)
4553 return HILO_U64(hi, lo);
4560 * Init service functions
4563 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4565 struct dmae_command *dmae;
4566 int port = bp->port;
4567 int loader_idx = port * 8;
4571 bp->executer_idx = 0;
4574 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4575 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4577 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4579 DMAE_CMD_ENDIANITY_DW_SWAP |
4581 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4584 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4586 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4587 dmae->opcode = opcode;
4588 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4590 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4592 dmae->dst_addr_lo = bp->fw_mb >> 2;
4593 dmae->dst_addr_hi = 0;
4594 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4597 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4598 dmae->comp_addr_hi = 0;
4601 dmae->comp_addr_lo = 0;
4602 dmae->comp_addr_hi = 0;
4608 /* no need to collect statistics in link down */
4612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4618 DMAE_CMD_ENDIANITY_DW_SWAP |
4620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4622 if (bp->phy_flags & PHY_BMAC_FLAG) {
4624 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4625 NIG_REG_INGRESS_BMAC0_MEM);
4627 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4628 BIGMAC_REGISTER_TX_STAT_GTBYT */
4629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4630 dmae->opcode = opcode;
4631 dmae->src_addr_lo = (mac_addr +
4632 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4633 dmae->src_addr_hi = 0;
4634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4635 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4636 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4637 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4638 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4639 dmae->comp_addr_hi = 0;
4642 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4643 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4645 dmae->opcode = opcode;
4646 dmae->src_addr_lo = (mac_addr +
4647 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4648 dmae->src_addr_hi = 0;
4649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4650 offsetof(struct bmac_stats, rx_gr64));
4651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4652 offsetof(struct bmac_stats, rx_gr64));
4653 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4654 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4656 dmae->comp_addr_hi = 0;
4659 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4661 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4663 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4664 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4665 dmae->opcode = opcode;
4666 dmae->src_addr_lo = (mac_addr +
4667 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4668 dmae->src_addr_hi = 0;
4669 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4670 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4671 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4672 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4673 dmae->comp_addr_hi = 0;
4676 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4677 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4678 dmae->opcode = opcode;
4679 dmae->src_addr_lo = (mac_addr +
4680 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4681 dmae->src_addr_hi = 0;
4682 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4683 offsetof(struct emac_stats,
4684 rx_falsecarriererrors));
4685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4686 offsetof(struct emac_stats,
4687 rx_falsecarriererrors));
4689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4690 dmae->comp_addr_hi = 0;
4693 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4695 dmae->opcode = opcode;
4696 dmae->src_addr_lo = (mac_addr +
4697 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4698 dmae->src_addr_hi = 0;
4699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4700 offsetof(struct emac_stats,
4702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4703 offsetof(struct emac_stats,
4705 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4706 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4707 dmae->comp_addr_hi = 0;
4712 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4713 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4714 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4715 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4717 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4719 DMAE_CMD_ENDIANITY_DW_SWAP |
4721 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4723 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4724 dmae->src_addr_hi = 0;
4725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4727 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4728 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4729 offsetof(struct nig_stats, done));
4730 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4731 offsetof(struct nig_stats, done));
4732 dmae->comp_val = 0xffffffff;
4735 static void bnx2x_init_stats(struct bnx2x *bp)
4737 int port = bp->port;
4739 bp->stats_state = STATS_STATE_DISABLE;
4740 bp->executer_idx = 0;
4742 bp->old_brb_discard = REG_RD(bp,
4743 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4745 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4746 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4747 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4749 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4750 REG_WR(bp, BAR_XSTRORM_INTMEM +
4751 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4753 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4754 REG_WR(bp, BAR_TSTRORM_INTMEM +
4755 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4757 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4761 REG_WR(bp, BAR_XSTRORM_INTMEM +
4762 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4763 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4764 REG_WR(bp, BAR_XSTRORM_INTMEM +
4765 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4766 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4768 REG_WR(bp, BAR_TSTRORM_INTMEM +
4769 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4770 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4771 REG_WR(bp, BAR_TSTRORM_INTMEM +
4772 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4773 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4776 static void bnx2x_stop_stats(struct bnx2x *bp)
4779 if (bp->stats_state != STATS_STATE_DISABLE) {
4782 bp->stats_state = STATS_STATE_STOP;
4783 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4785 while (bp->stats_state != STATS_STATE_DISABLE) {
4787 BNX2X_ERR("timeout waiting for stats stop\n");
4794 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4798 * Statistics service functions
4801 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4805 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4806 struct bmac_stats *old = &bp->old_bmac;
4807 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4812 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4813 tx_gtbyt.lo, total_bytes_transmitted_lo);
4815 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4816 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4817 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4819 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4820 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4821 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4823 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4824 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4825 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4826 estats->total_unicast_packets_transmitted_lo, sum.lo);
4828 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4829 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4830 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4831 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4832 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4833 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4834 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4835 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4836 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4837 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4838 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4840 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4841 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4842 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4843 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4844 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4845 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4846 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4847 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4849 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4850 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4851 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4852 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4853 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4854 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4855 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4858 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4860 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4861 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4863 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4864 total_bytes_transmitted_lo);
4865 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4866 total_unicast_packets_transmitted_hi,
4867 total_unicast_packets_transmitted_lo);
4868 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4869 total_multicast_packets_transmitted_hi,
4870 total_multicast_packets_transmitted_lo);
4871 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4872 total_broadcast_packets_transmitted_hi,
4873 total_broadcast_packets_transmitted_lo);
4875 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4876 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4877 estats->single_collision_transmit_frames +=
4878 new->tx_dot3statssinglecollisionframes;
4879 estats->multiple_collision_transmit_frames +=
4880 new->tx_dot3statsmultiplecollisionframes;
4881 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4882 estats->excessive_collision_frames +=
4883 new->tx_dot3statsexcessivecollisions;
4884 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4885 estats->frames_transmitted_65_127_bytes +=
4886 new->tx_etherstatspkts65octetsto127octets;
4887 estats->frames_transmitted_128_255_bytes +=
4888 new->tx_etherstatspkts128octetsto255octets;
4889 estats->frames_transmitted_256_511_bytes +=
4890 new->tx_etherstatspkts256octetsto511octets;
4891 estats->frames_transmitted_512_1023_bytes +=
4892 new->tx_etherstatspkts512octetsto1023octets;
4893 estats->frames_transmitted_1024_1522_bytes +=
4894 new->tx_etherstatspkts1024octetsto1522octet;
4895 estats->frames_transmitted_1523_9022_bytes +=
4896 new->tx_etherstatspktsover1522octets;
4898 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4899 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4900 estats->false_carrier_detections += new->rx_falsecarriererrors;
4901 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4902 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4903 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4904 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4905 estats->control_frames_received += new->rx_maccontrolframesreceived;
4906 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4907 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4909 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4910 stat_IfHCInBadOctets_lo);
4911 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4912 stat_IfHCOutBadOctets_lo);
4913 estats->stat_Dot3statsInternalMacTransmitErrors +=
4914 new->tx_dot3statsinternalmactransmiterrors;
4915 estats->stat_Dot3StatsCarrierSenseErrors +=
4916 new->rx_dot3statscarriersenseerrors;
4917 estats->stat_Dot3StatsDeferredTransmissions +=
4918 new->tx_dot3statsdeferredtransmissions;
4919 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4920 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4923 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4925 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4926 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4927 struct tstorm_per_client_stats *tclient =
4928 &tstats->client_statistics[0];
4929 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4930 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4931 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4932 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4935 /* are DMAE stats valid? */
4936 if (nstats->done != 0xffffffff) {
4937 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4941 /* are storm stats valid? */
4942 if (tstats->done.hi != 0xffffffff) {
4943 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4946 if (xstats->done.hi != 0xffffffff) {
4947 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4951 estats->total_bytes_received_hi =
4952 estats->valid_bytes_received_hi =
4953 le32_to_cpu(tclient->total_rcv_bytes.hi);
4954 estats->total_bytes_received_lo =
4955 estats->valid_bytes_received_lo =
4956 le32_to_cpu(tclient->total_rcv_bytes.lo);
4957 ADD_64(estats->total_bytes_received_hi,
4958 le32_to_cpu(tclient->rcv_error_bytes.hi),
4959 estats->total_bytes_received_lo,
4960 le32_to_cpu(tclient->rcv_error_bytes.lo));
4962 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4963 total_unicast_packets_received_hi,
4964 total_unicast_packets_received_lo);
4965 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4966 total_multicast_packets_received_hi,
4967 total_multicast_packets_received_lo);
4968 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4969 total_broadcast_packets_received_hi,
4970 total_broadcast_packets_received_lo);
4972 estats->frames_received_64_bytes = MAC_STX_NA;
4973 estats->frames_received_65_127_bytes = MAC_STX_NA;
4974 estats->frames_received_128_255_bytes = MAC_STX_NA;
4975 estats->frames_received_256_511_bytes = MAC_STX_NA;
4976 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4977 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4978 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4980 estats->x_total_sent_bytes_hi =
4981 le32_to_cpu(xstats->total_sent_bytes.hi);
4982 estats->x_total_sent_bytes_lo =
4983 le32_to_cpu(xstats->total_sent_bytes.lo);
4984 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4986 estats->t_rcv_unicast_bytes_hi =
4987 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4988 estats->t_rcv_unicast_bytes_lo =
4989 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4990 estats->t_rcv_broadcast_bytes_hi =
4991 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4992 estats->t_rcv_broadcast_bytes_lo =
4993 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4994 estats->t_rcv_multicast_bytes_hi =
4995 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4996 estats->t_rcv_multicast_bytes_lo =
4997 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4998 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
5000 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
5001 estats->packets_too_big_discard =
5002 le32_to_cpu(tclient->packets_too_big_discard);
5003 estats->jabber_packets_received = estats->packets_too_big_discard +
5004 estats->stat_Dot3statsFramesTooLong;
5005 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
5006 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
5007 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
5008 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
5009 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
5010 estats->brb_truncate_discard =
5011 le32_to_cpu(tstats->brb_truncate_discard);
5013 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
5014 bp->old_brb_discard = nstats->brb_discard;
5016 estats->brb_packet = nstats->brb_packet;
5017 estats->brb_truncate = nstats->brb_truncate;
5018 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
5019 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
5020 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
5021 estats->mng_discard = nstats->mng_discard;
5022 estats->mng_octet_inp = nstats->mng_octet_inp;
5023 estats->mng_octet_out = nstats->mng_octet_out;
5024 estats->mng_packet_inp = nstats->mng_packet_inp;
5025 estats->mng_packet_out = nstats->mng_packet_out;
5026 estats->pbf_octets = nstats->pbf_octets;
5027 estats->pbf_packet = nstats->pbf_packet;
5028 estats->safc_inp = nstats->safc_inp;
5030 xstats->done.hi = 0;
5031 tstats->done.hi = 0;
5037 static void bnx2x_update_net_stats(struct bnx2x *bp)
5039 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5040 struct net_device_stats *nstats = &bp->dev->stats;
5042 nstats->rx_packets =
5043 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
5044 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
5045 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
5047 nstats->tx_packets =
5048 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
5049 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
5050 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
5052 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
5054 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
5056 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
5057 nstats->tx_dropped = 0;
5060 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
5062 nstats->collisions = estats->single_collision_transmit_frames +
5063 estats->multiple_collision_transmit_frames +
5064 estats->late_collision_frames +
5065 estats->excessive_collision_frames;
5067 nstats->rx_length_errors = estats->runt_packets_received +
5068 estats->jabber_packets_received;
5069 nstats->rx_over_errors = estats->brb_discard +
5070 estats->brb_truncate_discard;
5071 nstats->rx_crc_errors = estats->crc_receive_errors;
5072 nstats->rx_frame_errors = estats->alignment_errors;
5073 nstats->rx_fifo_errors = estats->no_buff_discard;
5074 nstats->rx_missed_errors = estats->xxoverflow_discard;
5076 nstats->rx_errors = nstats->rx_length_errors +
5077 nstats->rx_over_errors +
5078 nstats->rx_crc_errors +
5079 nstats->rx_frame_errors +
5080 nstats->rx_fifo_errors +
5081 nstats->rx_missed_errors;
5083 nstats->tx_aborted_errors = estats->late_collision_frames +
5084 estats->excessive_collision_frames;
5085 nstats->tx_carrier_errors = estats->false_carrier_detections;
5086 nstats->tx_fifo_errors = 0;
5087 nstats->tx_heartbeat_errors = 0;
5088 nstats->tx_window_errors = 0;
5090 nstats->tx_errors = nstats->tx_aborted_errors +
5091 nstats->tx_carrier_errors;
5093 estats->mac_stx_start = ++estats->mac_stx_end;
5096 static void bnx2x_update_stats(struct bnx2x *bp)
5100 if (!bnx2x_update_storm_stats(bp)) {
5102 if (bp->phy_flags & PHY_BMAC_FLAG) {
5103 bnx2x_update_bmac_stats(bp);
5105 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5106 bnx2x_update_emac_stats(bp);
5108 } else { /* unreached */
5109 BNX2X_ERR("no MAC active\n");
5113 bnx2x_update_net_stats(bp);
5116 if (bp->msglevel & NETIF_MSG_TIMER) {
5117 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5118 struct net_device_stats *nstats = &bp->dev->stats;
5120 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5121 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5123 bnx2x_tx_avail(bp->fp),
5124 *bp->fp->tx_cons_sb, nstats->tx_packets);
5125 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5127 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5128 *bp->fp->rx_cons_sb, nstats->rx_packets);
5129 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5130 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5131 estats->driver_xoff, estats->brb_discard);
5132 printk(KERN_DEBUG "tstats: checksum_discard %u "
5133 "packets_too_big_discard %u no_buff_discard %u "
5134 "mac_discard %u mac_filter_discard %u "
5135 "xxovrflow_discard %u brb_truncate_discard %u "
5136 "ttl0_discard %u\n",
5137 estats->checksum_discard,
5138 estats->packets_too_big_discard,
5139 estats->no_buff_discard, estats->mac_discard,
5140 estats->mac_filter_discard, estats->xxoverflow_discard,
5141 estats->brb_truncate_discard, estats->ttl0_discard);
5143 for_each_queue(bp, i) {
5144 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5145 bnx2x_fp(bp, i, tx_pkt),
5146 bnx2x_fp(bp, i, rx_pkt),
5147 bnx2x_fp(bp, i, rx_calls));
5151 if (bp->state != BNX2X_STATE_OPEN) {
5152 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5156 #ifdef BNX2X_STOP_ON_ERROR
5157 if (unlikely(bp->panic))
5162 if (bp->executer_idx) {
5163 struct dmae_command *dmae = &bp->dmae;
5164 int port = bp->port;
5165 int loader_idx = port * 8;
5167 memset(dmae, 0, sizeof(struct dmae_command));
5169 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5170 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5171 DMAE_CMD_DST_RESET |
5173 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5175 DMAE_CMD_ENDIANITY_DW_SWAP |
5177 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5180 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5181 sizeof(struct dmae_command) *
5182 (loader_idx + 1)) >> 2;
5183 dmae->dst_addr_hi = 0;
5184 dmae->len = sizeof(struct dmae_command) >> 2;
5185 dmae->len--; /* !!! for A0/1 only */
5186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5187 dmae->comp_addr_hi = 0;
5190 bnx2x_post_dmae(bp, dmae, loader_idx);
5193 if (bp->stats_state != STATS_STATE_ENABLE) {
5194 bp->stats_state = STATS_STATE_DISABLE;
5198 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5199 /* stats ramrod has it's own slot on the spe */
5201 bp->stat_pending = 1;
5205 static void bnx2x_timer(unsigned long data)
5207 struct bnx2x *bp = (struct bnx2x *) data;
5209 if (!netif_running(bp->dev))
5212 if (atomic_read(&bp->intr_sem) != 0)
5216 struct bnx2x_fastpath *fp = &bp->fp[0];
5219 bnx2x_tx_int(fp, 1000);
5220 rc = bnx2x_rx_int(fp, 1000);
5224 int port = bp->port;
5228 ++bp->fw_drv_pulse_wr_seq;
5229 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5230 /* TBD - add SYSTEM_TIME */
5231 drv_pulse = bp->fw_drv_pulse_wr_seq;
5232 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5234 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5235 MCP_PULSE_SEQ_MASK);
5236 /* The delta between driver pulse and mcp response
5237 * should be 1 (before mcp response) or 0 (after mcp response)
5239 if ((drv_pulse != mcp_pulse) &&
5240 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5241 /* someone lost a heartbeat... */
5242 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5243 drv_pulse, mcp_pulse);
5247 if (bp->stats_state == STATS_STATE_DISABLE)
5250 bnx2x_update_stats(bp);
5253 mod_timer(&bp->timer, jiffies + bp->current_interval);
5256 /* end of Statistics */
5261 * nic init service functions
5264 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5265 dma_addr_t mapping, int id)
5267 int port = bp->port;
5272 section = ((u64)mapping) + offsetof(struct host_status_block,
5274 sb->u_status_block.status_block_id = id;
5276 REG_WR(bp, BAR_USTRORM_INTMEM +
5277 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5278 REG_WR(bp, BAR_USTRORM_INTMEM +
5279 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5282 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5283 REG_WR16(bp, BAR_USTRORM_INTMEM +
5284 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5287 section = ((u64)mapping) + offsetof(struct host_status_block,
5289 sb->c_status_block.status_block_id = id;
5291 REG_WR(bp, BAR_CSTRORM_INTMEM +
5292 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5293 REG_WR(bp, BAR_CSTRORM_INTMEM +
5294 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5297 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5298 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5299 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5301 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5304 static void bnx2x_init_def_sb(struct bnx2x *bp,
5305 struct host_def_status_block *def_sb,
5306 dma_addr_t mapping, int id)
5308 int port = bp->port;
5309 int index, val, reg_offset;
5313 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5314 atten_status_block);
5315 def_sb->atten_status_block.status_block_id = id;
5317 bp->def_att_idx = 0;
5320 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5321 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5323 for (index = 0; index < 3; index++) {
5324 bp->attn_group[index].sig[0] = REG_RD(bp,
5325 reg_offset + 0x10*index);
5326 bp->attn_group[index].sig[1] = REG_RD(bp,
5327 reg_offset + 0x4 + 0x10*index);
5328 bp->attn_group[index].sig[2] = REG_RD(bp,
5329 reg_offset + 0x8 + 0x10*index);
5330 bp->attn_group[index].sig[3] = REG_RD(bp,
5331 reg_offset + 0xc + 0x10*index);
5334 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5335 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5337 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5338 HC_REG_ATTN_MSG0_ADDR_L);
5340 REG_WR(bp, reg_offset, U64_LO(section));
5341 REG_WR(bp, reg_offset + 4, U64_HI(section));
5343 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5345 val = REG_RD(bp, reg_offset);
5347 REG_WR(bp, reg_offset, val);
5350 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5351 u_def_status_block);
5352 def_sb->u_def_status_block.status_block_id = id;
5356 REG_WR(bp, BAR_USTRORM_INTMEM +
5357 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5358 REG_WR(bp, BAR_USTRORM_INTMEM +
5359 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5361 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5364 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5365 REG_WR16(bp, BAR_USTRORM_INTMEM +
5366 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5369 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5370 c_def_status_block);
5371 def_sb->c_def_status_block.status_block_id = id;
5375 REG_WR(bp, BAR_CSTRORM_INTMEM +
5376 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5377 REG_WR(bp, BAR_CSTRORM_INTMEM +
5378 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5380 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5383 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5384 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5385 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5389 t_def_status_block);
5390 def_sb->t_def_status_block.status_block_id = id;
5394 REG_WR(bp, BAR_TSTRORM_INTMEM +
5395 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5396 REG_WR(bp, BAR_TSTRORM_INTMEM +
5397 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5399 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5402 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5403 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5404 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5407 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5408 x_def_status_block);
5409 def_sb->x_def_status_block.status_block_id = id;
5413 REG_WR(bp, BAR_XSTRORM_INTMEM +
5414 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5415 REG_WR(bp, BAR_XSTRORM_INTMEM +
5416 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5418 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5421 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5422 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5423 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5425 bp->stat_pending = 0;
5427 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5430 static void bnx2x_update_coalesce(struct bnx2x *bp)
5432 int port = bp->port;
5435 for_each_queue(bp, i) {
5437 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5438 REG_WR8(bp, BAR_USTRORM_INTMEM +
5439 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5440 HC_INDEX_U_ETH_RX_CQ_CONS),
5441 bp->rx_ticks_int/12);
5442 REG_WR16(bp, BAR_USTRORM_INTMEM +
5443 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5444 HC_INDEX_U_ETH_RX_CQ_CONS),
5445 bp->rx_ticks_int ? 0 : 1);
5447 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5448 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5449 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5450 HC_INDEX_C_ETH_TX_CQ_CONS),
5451 bp->tx_ticks_int/12);
5452 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5453 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5454 HC_INDEX_C_ETH_TX_CQ_CONS),
5455 bp->tx_ticks_int ? 0 : 1);
5459 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5463 int port = bp->port;
5465 bp->rx_buf_use_size = bp->dev->mtu;
5467 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5468 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5470 for_each_queue(bp, j) {
5471 struct bnx2x_fastpath *fp = &bp->fp[j];
5474 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5476 for (i = 1; i <= NUM_RX_RINGS; i++) {
5477 struct eth_rx_bd *rx_bd;
5479 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5481 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5482 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5484 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5485 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5489 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5490 struct eth_rx_cqe_next_page *nextpg;
5492 nextpg = (struct eth_rx_cqe_next_page *)
5493 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5495 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5496 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5498 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5499 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5502 /* rx completion queue */
5503 fp->rx_comp_cons = ring_prod = 0;
5505 for (i = 0; i < bp->rx_ring_size; i++) {
5506 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5507 BNX2X_ERR("was only able to allocate "
5511 ring_prod = NEXT_RX_IDX(ring_prod);
5512 BUG_TRAP(ring_prod > i);
5515 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5516 fp->rx_pkt = fp->rx_calls = 0;
5518 /* Warning! this will generate an interrupt (to the TSTORM) */
5519 /* must only be done when chip is initialized */
5520 REG_WR(bp, BAR_TSTRORM_INTMEM +
5521 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5525 REG_WR(bp, BAR_USTRORM_INTMEM +
5526 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5527 U64_LO(fp->rx_comp_mapping));
5528 REG_WR(bp, BAR_USTRORM_INTMEM +
5529 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5530 U64_HI(fp->rx_comp_mapping));
5534 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5538 for_each_queue(bp, j) {
5539 struct bnx2x_fastpath *fp = &bp->fp[j];
5541 for (i = 1; i <= NUM_TX_RINGS; i++) {
5542 struct eth_tx_bd *tx_bd =
5543 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5546 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5547 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5549 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5550 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5553 fp->tx_pkt_prod = 0;
5554 fp->tx_pkt_cons = 0;
5557 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5562 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5564 int port = bp->port;
5566 spin_lock_init(&bp->spq_lock);
5568 bp->spq_left = MAX_SPQ_PENDING;
5569 bp->spq_prod_idx = 0;
5570 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5571 bp->spq_prod_bd = bp->spq;
5572 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5574 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5575 U64_LO(bp->spq_mapping));
5576 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5577 U64_HI(bp->spq_mapping));
5579 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5583 static void bnx2x_init_context(struct bnx2x *bp)
5587 for_each_queue(bp, i) {
5588 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5589 struct bnx2x_fastpath *fp = &bp->fp[i];
5591 context->xstorm_st_context.tx_bd_page_base_hi =
5592 U64_HI(fp->tx_desc_mapping);
5593 context->xstorm_st_context.tx_bd_page_base_lo =
5594 U64_LO(fp->tx_desc_mapping);
5595 context->xstorm_st_context.db_data_addr_hi =
5596 U64_HI(fp->tx_prods_mapping);
5597 context->xstorm_st_context.db_data_addr_lo =
5598 U64_LO(fp->tx_prods_mapping);
5600 context->ustorm_st_context.rx_bd_page_base_hi =
5601 U64_HI(fp->rx_desc_mapping);
5602 context->ustorm_st_context.rx_bd_page_base_lo =
5603 U64_LO(fp->rx_desc_mapping);
5604 context->ustorm_st_context.status_block_id = i;
5605 context->ustorm_st_context.sb_index_number =
5606 HC_INDEX_U_ETH_RX_CQ_CONS;
5607 context->ustorm_st_context.rcq_base_address_hi =
5608 U64_HI(fp->rx_comp_mapping);
5609 context->ustorm_st_context.rcq_base_address_lo =
5610 U64_LO(fp->rx_comp_mapping);
5611 context->ustorm_st_context.flags =
5612 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5613 context->ustorm_st_context.mc_alignment_size = 64;
5614 context->ustorm_st_context.num_rss = bp->num_queues;
5616 context->cstorm_st_context.sb_index_number =
5617 HC_INDEX_C_ETH_TX_CQ_CONS;
5618 context->cstorm_st_context.status_block_id = i;
5620 context->xstorm_ag_context.cdu_reserved =
5621 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5622 CDU_REGION_NUMBER_XCM_AG,
5623 ETH_CONNECTION_TYPE);
5624 context->ustorm_ag_context.cdu_usage =
5625 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5626 CDU_REGION_NUMBER_UCM_AG,
5627 ETH_CONNECTION_TYPE);
5631 static void bnx2x_init_ind_table(struct bnx2x *bp)
5633 int port = bp->port;
5639 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5640 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5641 i % bp->num_queues);
5643 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5646 static void bnx2x_set_client_config(struct bnx2x *bp)
5649 int mode = bp->rx_mode;
5651 int i, port = bp->port;
5652 struct tstorm_eth_client_config tstorm_client = {0};
5654 tstorm_client.mtu = bp->dev->mtu;
5655 tstorm_client.statistics_counter_id = 0;
5656 tstorm_client.config_flags =
5657 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5659 if (mode && bp->vlgrp) {
5660 tstorm_client.config_flags |=
5661 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5662 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5665 if (mode != BNX2X_RX_MODE_PROMISC)
5666 tstorm_client.drop_flags =
5667 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5669 for_each_queue(bp, i) {
5670 REG_WR(bp, BAR_TSTRORM_INTMEM +
5671 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5672 ((u32 *)&tstorm_client)[0]);
5673 REG_WR(bp, BAR_TSTRORM_INTMEM +
5674 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5675 ((u32 *)&tstorm_client)[1]);
5678 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5679 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5682 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5684 int mode = bp->rx_mode;
5685 int port = bp->port;
5686 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5689 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5692 case BNX2X_RX_MODE_NONE: /* no Rx */
5693 tstorm_mac_filter.ucast_drop_all = 1;
5694 tstorm_mac_filter.mcast_drop_all = 1;
5695 tstorm_mac_filter.bcast_drop_all = 1;
5697 case BNX2X_RX_MODE_NORMAL:
5698 tstorm_mac_filter.bcast_accept_all = 1;
5700 case BNX2X_RX_MODE_ALLMULTI:
5701 tstorm_mac_filter.mcast_accept_all = 1;
5702 tstorm_mac_filter.bcast_accept_all = 1;
5704 case BNX2X_RX_MODE_PROMISC:
5705 tstorm_mac_filter.ucast_accept_all = 1;
5706 tstorm_mac_filter.mcast_accept_all = 1;
5707 tstorm_mac_filter.bcast_accept_all = 1;
5710 BNX2X_ERR("bad rx mode (%d)\n", mode);
5713 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5714 REG_WR(bp, BAR_TSTRORM_INTMEM +
5715 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5716 ((u32 *)&tstorm_mac_filter)[i]);
5718 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5719 ((u32 *)&tstorm_mac_filter)[i]); */
5722 if (mode != BNX2X_RX_MODE_NONE)
5723 bnx2x_set_client_config(bp);
5726 static void bnx2x_init_internal(struct bnx2x *bp)
5728 int port = bp->port;
5729 struct tstorm_eth_function_common_config tstorm_config = {0};
5730 struct stats_indication_flags stats_flags = {0};
5733 tstorm_config.config_flags = MULTI_FLAGS;
5734 tstorm_config.rss_result_mask = MULTI_MASK;
5737 REG_WR(bp, BAR_TSTRORM_INTMEM +
5738 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5739 (*(u32 *)&tstorm_config));
5741 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5742 (*(u32 *)&tstorm_config)); */
5744 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5745 bnx2x_set_storm_rx_mode(bp);
5747 stats_flags.collect_eth = cpu_to_le32(1);
5749 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5750 ((u32 *)&stats_flags)[0]);
5751 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5752 ((u32 *)&stats_flags)[1]);
5754 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5755 ((u32 *)&stats_flags)[0]);
5756 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5757 ((u32 *)&stats_flags)[1]);
5759 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5760 ((u32 *)&stats_flags)[0]);
5761 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5762 ((u32 *)&stats_flags)[1]);
5764 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5765 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5768 static void bnx2x_nic_init(struct bnx2x *bp)
5772 for_each_queue(bp, i) {
5773 struct bnx2x_fastpath *fp = &bp->fp[i];
5775 fp->state = BNX2X_FP_STATE_CLOSED;
5776 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5777 bp, fp->status_blk, i);
5779 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5782 bnx2x_init_def_sb(bp, bp->def_status_blk,
5783 bp->def_status_blk_mapping, 0x10);
5784 bnx2x_update_coalesce(bp);
5785 bnx2x_init_rx_rings(bp);
5786 bnx2x_init_tx_ring(bp);
5787 bnx2x_init_sp_ring(bp);
5788 bnx2x_init_context(bp);
5789 bnx2x_init_internal(bp);
5790 bnx2x_init_stats(bp);
5791 bnx2x_init_ind_table(bp);
5792 bnx2x_int_enable(bp);
5796 /* end of nic init */
5799 * gzip service functions
5802 static int bnx2x_gunzip_init(struct bnx2x *bp)
5804 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5805 &bp->gunzip_mapping);
5806 if (bp->gunzip_buf == NULL)
5809 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5810 if (bp->strm == NULL)
5813 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5815 if (bp->strm->workspace == NULL)
5825 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5826 bp->gunzip_mapping);
5827 bp->gunzip_buf = NULL;
5830 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5831 " uncompression\n", bp->dev->name);
5835 static void bnx2x_gunzip_end(struct bnx2x *bp)
5837 kfree(bp->strm->workspace);
5842 if (bp->gunzip_buf) {
5843 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5844 bp->gunzip_mapping);
5845 bp->gunzip_buf = NULL;
5849 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5853 /* check gzip header */
5854 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5861 if (zbuf[3] & FNAME)
5862 while ((zbuf[n++] != 0) && (n < len));
5864 bp->strm->next_in = zbuf + n;
5865 bp->strm->avail_in = len - n;
5866 bp->strm->next_out = bp->gunzip_buf;
5867 bp->strm->avail_out = FW_BUF_SIZE;
5869 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5873 rc = zlib_inflate(bp->strm, Z_FINISH);
5874 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5875 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5876 bp->dev->name, bp->strm->msg);
5878 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5879 if (bp->gunzip_outlen & 0x3)
5880 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5881 " gunzip_outlen (%d) not aligned\n",
5882 bp->dev->name, bp->gunzip_outlen);
5883 bp->gunzip_outlen >>= 2;
5885 zlib_inflateEnd(bp->strm);
5887 if (rc == Z_STREAM_END)
5893 /* nic load/unload */
5896 * general service functions
5899 /* send a NIG loopback debug packet */
5900 static void bnx2x_lb_pckt(struct bnx2x *bp)
5906 /* Ethernet source and destination addresses */
5908 wb_write[0] = 0x55555555;
5909 wb_write[1] = 0x55555555;
5910 wb_write[2] = 0x20; /* SOP */
5911 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5913 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5914 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5916 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5919 /* NON-IP protocol */
5921 wb_write[0] = 0x09000000;
5922 wb_write[1] = 0x55555555;
5923 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5924 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5926 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5927 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5928 /* EOP, eop_bvalid = 0 */
5929 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5933 /* some of the internal memories
5934 * are not directly readable from the driver
5935 * to test them we send debug packets
5937 static int bnx2x_int_mem_test(struct bnx2x *bp)
5943 switch (CHIP_REV(bp)) {
5955 DP(NETIF_MSG_HW, "start part1\n");
5957 /* Disable inputs of parser neighbor blocks */
5958 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5959 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5960 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5961 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5963 /* Write 0 to parser credits for CFC search request */
5964 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5966 /* send Ethernet packet */
5969 /* TODO do i reset NIG statistic? */
5970 /* Wait until NIG register shows 1 packet of size 0x10 */
5971 count = 1000 * factor;
5973 #ifdef BNX2X_DMAE_RD
5974 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5975 val = *bnx2x_sp(bp, wb_data[0]);
5977 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5978 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5987 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5991 /* Wait until PRS register shows 1 packet */
5992 count = 1000 * factor;
5994 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6003 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6007 /* Reset and init BRB, PRS */
6008 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
6010 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
6012 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6013 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6015 DP(NETIF_MSG_HW, "part2\n");
6017 /* Disable inputs of parser neighbor blocks */
6018 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6019 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6020 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6021 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
6023 /* Write 0 to parser credits for CFC search request */
6024 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6026 /* send 10 Ethernet packets */
6027 for (i = 0; i < 10; i++)
6030 /* Wait until NIG register shows 10 + 1
6031 packets of size 11*0x10 = 0xb0 */
6032 count = 1000 * factor;
6034 #ifdef BNX2X_DMAE_RD
6035 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6036 val = *bnx2x_sp(bp, wb_data[0]);
6038 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6039 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6048 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6052 /* Wait until PRS register shows 2 packets */
6053 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6055 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6057 /* Write 1 to parser credits for CFC search request */
6058 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6060 /* Wait until PRS register shows 3 packets */
6061 msleep(10 * factor);
6062 /* Wait until NIG register shows 1 packet of size 0x10 */
6063 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6065 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6067 /* clear NIG EOP FIFO */
6068 for (i = 0; i < 11; i++)
6069 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6070 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6072 BNX2X_ERR("clear of NIG failed\n");
6076 /* Reset and init BRB, PRS, NIG */
6077 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6081 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6082 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6085 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6088 /* Enable inputs of parser neighbor blocks */
6089 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6090 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6091 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6092 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6094 DP(NETIF_MSG_HW, "done\n");
6099 static void enable_blocks_attention(struct bnx2x *bp)
6101 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6102 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6103 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6104 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6105 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6106 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6107 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6108 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6109 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6110 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6111 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6112 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6113 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6114 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6115 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6116 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6117 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6118 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6119 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6120 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6121 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6122 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6123 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6124 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6125 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6126 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6127 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6128 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6129 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6130 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6131 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6132 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6135 static int bnx2x_function_init(struct bnx2x *bp, int mode)
6137 int func = bp->port;
6138 int port = func ? PORT1 : PORT0;
6144 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6145 if ((func != 0) && (func != 1)) {
6146 BNX2X_ERR("BAD function number (%d)\n", func);
6150 bnx2x_gunzip_init(bp);
6152 if (mode & 0x1) { /* init common */
6153 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6155 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6157 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6159 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6161 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6163 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6165 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6166 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6170 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6171 /* enable HW interrupt from PXP on USDM
6172 overflow bit 16 on INT_MASK_0 */
6173 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6177 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6178 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6179 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6180 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6181 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6182 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6184 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6185 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6186 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6187 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6188 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6193 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6196 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6198 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6199 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6200 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6203 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6205 /* let the HW do it's magic ... */
6208 (can be moved up if we want to use the DMAE) */
6209 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6211 BNX2X_ERR("PXP2 CFG failed\n");
6215 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6217 BNX2X_ERR("PXP2 RD_INIT failed\n");
6221 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6222 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6224 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6226 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6227 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6228 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6229 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6231 #ifdef BNX2X_DMAE_RD
6232 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6233 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6234 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6235 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6237 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6238 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6239 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6240 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6241 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6242 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6243 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6244 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6245 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6246 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6247 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6248 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6250 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
6251 /* soft reset pulse */
6252 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6253 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6256 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6258 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6259 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6260 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6261 /* enable hw interrupt from doorbell Q */
6262 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6265 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6267 if (CHIP_REV_IS_SLOW(bp)) {
6268 /* fix for emulation and FPGA for no pause */
6269 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6270 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6271 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6272 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6275 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6277 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6278 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6279 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6280 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6282 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6283 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6284 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6285 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6287 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6288 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6289 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6290 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6295 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6298 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6299 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6300 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6302 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6303 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6304 REG_WR(bp, i, 0xc0cac01a);
6305 /* TODO: replace with something meaningful */
6307 /* SRCH COMMON comes here */
6308 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6310 if (sizeof(union cdu_context) != 1024) {
6311 /* we currently assume that a context is 1024 bytes */
6312 printk(KERN_ALERT PFX "please adjust the size of"
6313 " cdu_context(%ld)\n",
6314 (long)sizeof(union cdu_context));
6316 val = (4 << 24) + (0 << 12) + 1024;
6317 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6318 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6320 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6321 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6323 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6324 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6325 MISC_AEU_COMMON_END);
6326 /* RXPCS COMMON comes here */
6327 /* EMAC0 COMMON comes here */
6328 /* EMAC1 COMMON comes here */
6329 /* DBU COMMON comes here */
6330 /* DBG COMMON comes here */
6331 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6333 if (CHIP_REV_IS_SLOW(bp))
6336 /* finish CFC init */
6337 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6339 BNX2X_ERR("CFC LL_INIT failed\n");
6343 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6345 BNX2X_ERR("CFC AC_INIT failed\n");
6349 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6351 BNX2X_ERR("CFC CAM_INIT failed\n");
6355 REG_WR(bp, CFC_REG_DEBUG0, 0);
6357 /* read NIG statistic
6358 to see if this is our first up since powerup */
6359 #ifdef BNX2X_DMAE_RD
6360 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6361 val = *bnx2x_sp(bp, wb_data[0]);
6363 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6364 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6366 /* do internal memory self test */
6367 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6368 BNX2X_ERR("internal mem selftest failed\n");
6372 /* clear PXP2 attentions */
6373 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6375 enable_blocks_attention(bp);
6376 /* enable_blocks_parity(bp); */
6378 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6379 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6380 /* Fan failure is indicated by SPIO 5 */
6381 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6382 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6384 /* set to active low mode */
6385 val = REG_RD(bp, MISC_REG_SPIO_INT);
6386 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6387 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6388 REG_WR(bp, MISC_REG_SPIO_INT, val);
6390 /* enable interrupt to signal the IGU */
6391 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6392 val |= (1 << MISC_REGISTERS_SPIO_5);
6393 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6400 } /* end of common init */
6404 /* the phys address is shifted right 12 bits and has an added
6405 1=valid bit added to the 53rd bit
6406 then since this is a wide register(TM)
6407 we split it into two 32 bit writes
6409 #define RQ_ONCHIP_AT_PORT_SIZE 384
6410 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6411 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6412 #define PXP_ONE_ILT(x) ((x << 10) | x)
6414 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6416 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6418 /* Port PXP comes here */
6419 /* Port PXP2 comes here */
6424 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6426 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6427 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6428 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6430 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6431 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6432 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6433 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6435 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6441 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6442 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6443 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6444 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6449 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6450 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6451 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6452 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6457 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6458 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6459 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6460 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6463 /* Port TCM comes here */
6464 /* Port UCM comes here */
6465 /* Port CCM comes here */
6466 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6467 func ? XCM_PORT1_END : XCM_PORT0_END);
6473 for (i = 0; i < 32; i++) {
6474 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6476 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6478 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6479 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6482 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6484 /* Port QM comes here */
6487 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6488 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6490 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6491 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6493 /* Port DQ comes here */
6494 /* Port BRB1 comes here */
6495 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6496 func ? PRS_PORT1_END : PRS_PORT0_END);
6497 /* Port TSDM comes here */
6498 /* Port CSDM comes here */
6499 /* Port USDM comes here */
6500 /* Port XSDM comes here */
6501 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6502 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6503 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6504 func ? USEM_PORT1_END : USEM_PORT0_END);
6505 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6506 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6507 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6508 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6509 /* Port UPB comes here */
6510 /* Port XSDM comes here */
6511 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6512 func ? PBF_PORT1_END : PBF_PORT0_END);
6514 /* configure PBF to work without PAUSE mtu 9000 */
6515 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6517 /* update threshold */
6518 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6519 /* update init credit */
6520 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6523 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6525 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6528 /* tell the searcher where the T2 table is */
6529 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6531 wb_write[0] = U64_LO(bp->t2_mapping);
6532 wb_write[1] = U64_HI(bp->t2_mapping);
6533 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6534 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6535 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6536 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6538 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6539 /* Port SRCH comes here */
6541 /* Port CDU comes here */
6542 /* Port CFC comes here */
6543 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6544 func ? HC_PORT1_END : HC_PORT0_END);
6545 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6546 MISC_AEU_PORT0_START,
6547 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6548 /* Port PXPCS comes here */
6549 /* Port EMAC0 comes here */
6550 /* Port EMAC1 comes here */
6551 /* Port DBU comes here */
6552 /* Port DBG comes here */
6553 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6554 func ? NIG_PORT1_END : NIG_PORT0_END);
6555 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6556 /* Port MCP comes here */
6557 /* Port DMAE comes here */
6559 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6560 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6561 /* add SPIO 5 to group 0 */
6562 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6563 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6564 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6571 bnx2x_link_reset(bp);
6573 /* Reset PCIE errors for debug */
6574 REG_WR(bp, 0x2114, 0xffffffff);
6575 REG_WR(bp, 0x2120, 0xffffffff);
6576 REG_WR(bp, 0x2814, 0xffffffff);
6578 /* !!! move to init_values.h */
6579 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6580 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6581 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6582 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6584 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6585 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6586 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6587 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6589 bnx2x_gunzip_end(bp);
6594 bp->fw_drv_pulse_wr_seq =
6595 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
6596 DRV_PULSE_SEQ_MASK);
6597 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
6598 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6599 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6607 /* send the MCP a request, block until there is a reply */
6608 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6610 int port = bp->port;
6611 u32 seq = ++bp->fw_seq;
6614 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6615 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6617 /* let the FW do it's magic ... */
6618 msleep(100); /* TBD */
6620 if (CHIP_REV_IS_SLOW(bp))
6623 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
6624 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6626 /* is this a reply to our command? */
6627 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6628 rc &= FW_MSG_CODE_MASK;
6632 BNX2X_ERR("FW failed to respond!\n");
6640 static void bnx2x_free_mem(struct bnx2x *bp)
6643 #define BNX2X_PCI_FREE(x, y, size) \
6646 pci_free_consistent(bp->pdev, size, x, y); \
6652 #define BNX2X_FREE(x) \
6663 for_each_queue(bp, i) {
6666 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6667 bnx2x_fp(bp, i, status_blk_mapping),
6668 sizeof(struct host_status_block) +
6669 sizeof(struct eth_tx_db_data));
6671 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6672 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6673 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6674 bnx2x_fp(bp, i, tx_desc_mapping),
6675 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6677 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6678 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6679 bnx2x_fp(bp, i, rx_desc_mapping),
6680 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6682 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6683 bnx2x_fp(bp, i, rx_comp_mapping),
6684 sizeof(struct eth_fast_path_rx_cqe) *
6690 /* end of fastpath */
6692 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6693 (sizeof(struct host_def_status_block)));
6695 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6696 (sizeof(struct bnx2x_slowpath)));
6699 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6700 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6701 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6702 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6704 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6706 #undef BNX2X_PCI_FREE
6710 static int bnx2x_alloc_mem(struct bnx2x *bp)
6713 #define BNX2X_PCI_ALLOC(x, y, size) \
6715 x = pci_alloc_consistent(bp->pdev, size, y); \
6717 goto alloc_mem_err; \
6718 memset(x, 0, size); \
6721 #define BNX2X_ALLOC(x, size) \
6723 x = vmalloc(size); \
6725 goto alloc_mem_err; \
6726 memset(x, 0, size); \
6732 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6734 for_each_queue(bp, i) {
6735 bnx2x_fp(bp, i, bp) = bp;
6738 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6739 &bnx2x_fp(bp, i, status_blk_mapping),
6740 sizeof(struct host_status_block) +
6741 sizeof(struct eth_tx_db_data));
6743 bnx2x_fp(bp, i, hw_tx_prods) =
6744 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6746 bnx2x_fp(bp, i, tx_prods_mapping) =
6747 bnx2x_fp(bp, i, status_blk_mapping) +
6748 sizeof(struct host_status_block);
6750 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6751 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6752 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6753 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6754 &bnx2x_fp(bp, i, tx_desc_mapping),
6755 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6757 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6758 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6759 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6760 &bnx2x_fp(bp, i, rx_desc_mapping),
6761 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6763 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6764 &bnx2x_fp(bp, i, rx_comp_mapping),
6765 sizeof(struct eth_fast_path_rx_cqe) *
6769 /* end of fastpath */
6771 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6772 sizeof(struct host_def_status_block));
6774 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6775 sizeof(struct bnx2x_slowpath));
6778 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6781 for (i = 0; i < 64*1024; i += 64) {
6782 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6783 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6786 /* allocate searcher T2 table
6787 we allocate 1/4 of alloc num for T2
6788 (which is not entered into the ILT) */
6789 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6792 for (i = 0; i < 16*1024; i += 64)
6793 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6795 /* now fixup the last line in the block to point to the next block */
6796 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6798 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6799 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6801 /* QM queues (128*MAX_CONN) */
6802 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6805 /* Slow path ring */
6806 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6814 #undef BNX2X_PCI_ALLOC
6818 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6822 for_each_queue(bp, i) {
6823 struct bnx2x_fastpath *fp = &bp->fp[i];
6825 u16 bd_cons = fp->tx_bd_cons;
6826 u16 sw_prod = fp->tx_pkt_prod;
6827 u16 sw_cons = fp->tx_pkt_cons;
6829 BUG_TRAP(fp->tx_buf_ring != NULL);
6831 while (sw_cons != sw_prod) {
6832 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6838 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6842 for_each_queue(bp, j) {
6843 struct bnx2x_fastpath *fp = &bp->fp[j];
6845 BUG_TRAP(fp->rx_buf_ring != NULL);
6847 for (i = 0; i < NUM_RX_BD; i++) {
6848 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6849 struct sk_buff *skb = rx_buf->skb;
6854 pci_unmap_single(bp->pdev,
6855 pci_unmap_addr(rx_buf, mapping),
6856 bp->rx_buf_use_size,
6857 PCI_DMA_FROMDEVICE);
6865 static void bnx2x_free_skbs(struct bnx2x *bp)
6867 bnx2x_free_tx_skbs(bp);
6868 bnx2x_free_rx_skbs(bp);
6871 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6875 free_irq(bp->msix_table[0].vector, bp->dev);
6876 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6877 bp->msix_table[0].vector);
6879 for_each_queue(bp, i) {
6880 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6881 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6882 bnx2x_fp(bp, i, state));
6884 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6885 BNX2X_ERR("IRQ of fp #%d being freed while "
6886 "state != closed\n", i);
6888 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6893 static void bnx2x_free_irq(struct bnx2x *bp)
6896 if (bp->flags & USING_MSIX_FLAG) {
6898 bnx2x_free_msix_irqs(bp);
6899 pci_disable_msix(bp->pdev);
6901 bp->flags &= ~USING_MSIX_FLAG;
6904 free_irq(bp->pdev->irq, bp->dev);
6907 static int bnx2x_enable_msix(struct bnx2x *bp)
6912 bp->msix_table[0].entry = 0;
6913 for_each_queue(bp, i)
6914 bp->msix_table[i + 1].entry = i + 1;
6916 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6917 bp->num_queues + 1)){
6918 BNX2X_LOG("failed to enable MSI-X\n");
6923 bp->flags |= USING_MSIX_FLAG;
6930 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6935 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6936 bp->dev->name, bp->dev);
6939 BNX2X_ERR("request sp irq failed\n");
6943 for_each_queue(bp, i) {
6944 rc = request_irq(bp->msix_table[i + 1].vector,
6945 bnx2x_msix_fp_int, 0,
6946 bp->dev->name, &bp->fp[i]);
6949 BNX2X_ERR("request fp #%d irq failed "
6951 bnx2x_free_msix_irqs(bp);
6955 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6963 static int bnx2x_req_irq(struct bnx2x *bp)
6966 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6967 IRQF_SHARED, bp->dev->name, bp->dev);
6969 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6976 * Init service functions
6979 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6981 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6984 * unicasts 0-31:port0 32-63:port1
6985 * multicast 64-127:port0 128-191:port1
6987 config->hdr.length_6b = 2;
6988 config->hdr.offset = bp->port ? 31 : 0;
6989 config->hdr.reserved0 = 0;
6990 config->hdr.reserved1 = 0;
6993 config->config_table[0].cam_entry.msb_mac_addr =
6994 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6995 config->config_table[0].cam_entry.middle_mac_addr =
6996 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6997 config->config_table[0].cam_entry.lsb_mac_addr =
6998 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6999 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
7000 config->config_table[0].target_table_entry.flags = 0;
7001 config->config_table[0].target_table_entry.client_id = 0;
7002 config->config_table[0].target_table_entry.vlan_id = 0;
7004 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
7005 config->config_table[0].cam_entry.msb_mac_addr,
7006 config->config_table[0].cam_entry.middle_mac_addr,
7007 config->config_table[0].cam_entry.lsb_mac_addr);
7010 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
7011 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
7012 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
7013 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
7014 config->config_table[1].target_table_entry.flags =
7015 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7016 config->config_table[1].target_table_entry.client_id = 0;
7017 config->config_table[1].target_table_entry.vlan_id = 0;
7019 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7020 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7021 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7024 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7025 int *state_p, int poll)
7027 /* can take a while if any port is running */
7030 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7031 poll ? "polling" : "waiting", state, idx);
7038 bnx2x_rx_int(bp->fp, 10);
7039 /* If index is different from 0
7040 * The reply for some commands will
7041 * be on the none default queue
7044 bnx2x_rx_int(&bp->fp[idx], 10);
7047 mb(); /* state is changed by bnx2x_sp_event()*/
7049 if (*state_p == state)
7058 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7059 poll ? "polling" : "waiting", state, idx);
7064 static int bnx2x_setup_leading(struct bnx2x *bp)
7067 /* reset IGU state */
7068 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7071 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7073 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7077 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7080 /* reset IGU state */
7081 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7084 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7085 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7087 /* Wait for completion */
7088 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7089 &(bp->fp[index].state), 0);
7094 static int bnx2x_poll(struct napi_struct *napi, int budget);
7095 static void bnx2x_set_rx_mode(struct net_device *dev);
7097 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7102 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7104 /* Send LOAD_REQUEST command to MCP.
7105 Returns the type of LOAD command: if it is the
7106 first port to be initialized common blocks should be
7107 initialized, otherwise - not.
7110 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7112 BNX2X_ERR("MCP response failure, unloading\n");
7115 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7116 BNX2X_ERR("MCP refused load request, unloading\n");
7117 return -EBUSY; /* other port in diagnostic mode */
7120 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7123 /* if we can't use msix we only need one fp,
7124 * so try to enable msix with the requested number of fp's
7125 * and fallback to inta with one fp
7131 if ((use_multi > 1) && (use_multi <= 16))
7132 /* user requested number */
7133 bp->num_queues = use_multi;
7134 else if (use_multi == 1)
7135 bp->num_queues = num_online_cpus();
7139 if (bnx2x_enable_msix(bp)) {
7140 /* failed to enable msix */
7143 BNX2X_ERR("Multi requested but failed"
7144 " to enable MSI-X\n");
7149 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7151 if (bnx2x_alloc_mem(bp))
7155 if (bp->flags & USING_MSIX_FLAG) {
7156 if (bnx2x_req_msix_irqs(bp)) {
7157 pci_disable_msix(bp->pdev);
7162 if (bnx2x_req_irq(bp)) {
7163 BNX2X_ERR("IRQ request failed, aborting\n");
7169 for_each_queue(bp, i)
7170 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7175 if (bnx2x_function_init(bp,
7176 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7177 BNX2X_ERR("HW init failed, aborting\n");
7182 atomic_set(&bp->intr_sem, 0);
7185 /* Setup NIC internals and enable interrupts */
7188 /* Send LOAD_DONE command to MCP */
7190 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7192 BNX2X_ERR("MCP response failure, unloading\n");
7193 goto load_int_disable;
7197 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7199 /* Enable Rx interrupt handling before sending the ramrod
7200 as it's completed on Rx FP queue */
7201 for_each_queue(bp, i)
7202 napi_enable(&bnx2x_fp(bp, i, napi));
7204 if (bnx2x_setup_leading(bp))
7205 goto load_stop_netif;
7207 for_each_nondefault_queue(bp, i)
7208 if (bnx2x_setup_multi(bp, i))
7209 goto load_stop_netif;
7211 bnx2x_set_mac_addr(bp);
7215 /* Start fast path */
7216 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7217 netif_start_queue(bp->dev);
7218 if (bp->flags & USING_MSIX_FLAG)
7219 printk(KERN_INFO PFX "%s: using MSI-X\n",
7222 /* Otherwise Tx queue should be only reenabled */
7223 } else if (netif_running(bp->dev)) {
7224 netif_wake_queue(bp->dev);
7225 bnx2x_set_rx_mode(bp->dev);
7228 /* start the timer */
7229 mod_timer(&bp->timer, jiffies + bp->current_interval);
7234 for_each_queue(bp, i)
7235 napi_disable(&bnx2x_fp(bp, i, napi));
7238 bnx2x_int_disable_sync(bp);
7240 bnx2x_free_skbs(bp);
7246 /* TBD we really need to reset the chip
7247 if we want to recover from this */
7252 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7254 int port = bp->port;
7260 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7262 /* Do not rcv packets to BRB */
7263 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7264 /* Do not direct rcv packets that are not for MCP to the BRB */
7265 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7266 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7268 /* Configure IGU and AEU */
7269 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7270 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7272 /* TODO: Close Doorbell port? */
7279 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7280 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7282 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7284 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7285 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7289 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7298 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7303 /* halt the connection */
7304 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7305 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7308 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7309 &(bp->fp[index].state), 1);
7310 if (rc) /* timeout */
7313 /* delete cfc entry */
7314 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7316 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7317 &(bp->fp[index].state), 1);
7322 static void bnx2x_stop_leading(struct bnx2x *bp)
7324 u16 dsb_sp_prod_idx;
7325 /* if the other port is handling traffic,
7326 this can take a lot of time */
7331 /* Send HALT ramrod */
7332 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7333 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7335 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7336 &(bp->fp[0].state), 1))
7339 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7341 /* Send PORT_DELETE ramrod */
7342 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7344 /* Wait for completion to arrive on default status block
7345 we are going to reset the chip anyway
7346 so there is not much to do if this times out
7348 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7353 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7354 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7355 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7357 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7358 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7362 static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
7367 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7369 del_timer_sync(&bp->timer);
7371 bp->rx_mode = BNX2X_RX_MODE_NONE;
7372 bnx2x_set_storm_rx_mode(bp);
7374 if (netif_running(bp->dev)) {
7375 netif_tx_disable(bp->dev);
7376 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7379 /* Wait until all fast path tasks complete */
7380 for_each_queue(bp, i) {
7381 struct bnx2x_fastpath *fp = &bp->fp[i];
7384 while (bnx2x_has_work(fp) && (timeout--))
7387 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
7390 /* Wait until stat ramrod returns and all SP tasks complete */
7392 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
7396 for_each_queue(bp, i)
7397 napi_disable(&bnx2x_fp(bp, i, napi));
7398 /* Disable interrupts after Tx and Rx are disabled on stack level */
7399 bnx2x_int_disable_sync(bp);
7401 if (bp->flags & NO_WOL_FLAG)
7402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7405 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7406 u8 *mac_addr = bp->dev->dev_addr;
7407 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7408 EMAC_MODE_ACPI_RCVD);
7410 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7412 val = (mac_addr[0] << 8) | mac_addr[1];
7413 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7415 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7416 (mac_addr[4] << 8) | mac_addr[5];
7417 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7419 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7422 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7424 /* Close multi and leading connections */
7425 for_each_nondefault_queue(bp, i)
7426 if (bnx2x_stop_multi(bp, i))
7429 bnx2x_stop_leading(bp);
7430 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
7431 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
7432 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
7433 "state 0x%x fp[0].state 0x%x",
7434 bp->state, bp->fp[0].state);
7438 bnx2x_link_reset(bp);
7441 reset_code = bnx2x_fw_command(bp, reset_code);
7443 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7449 /* Reset the chip */
7450 bnx2x_reset_chip(bp, reset_code);
7452 /* Report UNLOAD_DONE to MCP */
7454 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7456 /* Free SKBs and driver internals */
7457 bnx2x_free_skbs(bp);
7460 bp->state = BNX2X_STATE_CLOSED;
7462 netif_carrier_off(bp->dev);
7467 /* end of nic load/unload */
7472 * Init service functions
7475 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7477 int port = bp->port;
7482 switch (switch_cfg) {
7484 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7486 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7487 switch (ext_phy_type) {
7488 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7489 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7492 bp->supported |= (SUPPORTED_10baseT_Half |
7493 SUPPORTED_10baseT_Full |
7494 SUPPORTED_100baseT_Half |
7495 SUPPORTED_100baseT_Full |
7496 SUPPORTED_1000baseT_Full |
7497 SUPPORTED_2500baseX_Full |
7498 SUPPORTED_TP | SUPPORTED_FIBRE |
7501 SUPPORTED_Asym_Pause);
7504 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7505 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7508 bp->phy_flags |= PHY_SGMII_FLAG;
7510 bp->supported |= (SUPPORTED_10baseT_Half |
7511 SUPPORTED_10baseT_Full |
7512 SUPPORTED_100baseT_Half |
7513 SUPPORTED_100baseT_Full |
7514 SUPPORTED_1000baseT_Full |
7515 SUPPORTED_TP | SUPPORTED_FIBRE |
7518 SUPPORTED_Asym_Pause);
7522 BNX2X_ERR("NVRAM config error. "
7523 "BAD SerDes ext_phy_config 0x%x\n",
7524 bp->ext_phy_config);
7528 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7530 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7533 case SWITCH_CFG_10G:
7534 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7536 bp->phy_flags |= PHY_XGXS_FLAG;
7538 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7539 switch (ext_phy_type) {
7540 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7541 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7544 bp->supported |= (SUPPORTED_10baseT_Half |
7545 SUPPORTED_10baseT_Full |
7546 SUPPORTED_100baseT_Half |
7547 SUPPORTED_100baseT_Full |
7548 SUPPORTED_1000baseT_Full |
7549 SUPPORTED_2500baseX_Full |
7550 SUPPORTED_10000baseT_Full |
7551 SUPPORTED_TP | SUPPORTED_FIBRE |
7554 SUPPORTED_Asym_Pause);
7557 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7558 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7561 bp->supported |= (SUPPORTED_10000baseT_Full |
7564 SUPPORTED_Asym_Pause);
7567 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7568 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7571 bp->supported |= (SUPPORTED_10000baseT_Full |
7572 SUPPORTED_1000baseT_Full |
7576 SUPPORTED_Asym_Pause);
7579 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7580 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7583 bp->supported |= (SUPPORTED_10000baseT_Full |
7584 SUPPORTED_1000baseT_Full |
7588 SUPPORTED_Asym_Pause);
7591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7592 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7595 bp->supported |= (SUPPORTED_10000baseT_Full |
7599 SUPPORTED_Asym_Pause);
7603 BNX2X_ERR("NVRAM config error. "
7604 "BAD XGXS ext_phy_config 0x%x\n",
7605 bp->ext_phy_config);
7609 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7611 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7613 bp->ser_lane = ((bp->lane_config &
7614 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7615 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7616 bp->rx_lane_swap = ((bp->lane_config &
7617 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7618 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7619 bp->tx_lane_swap = ((bp->lane_config &
7620 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7621 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7622 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7623 bp->rx_lane_swap, bp->tx_lane_swap);
7627 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7632 /* mask what we support according to speed_cap_mask */
7633 if (!(bp->speed_cap_mask &
7634 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7635 bp->supported &= ~SUPPORTED_10baseT_Half;
7637 if (!(bp->speed_cap_mask &
7638 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7639 bp->supported &= ~SUPPORTED_10baseT_Full;
7641 if (!(bp->speed_cap_mask &
7642 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7643 bp->supported &= ~SUPPORTED_100baseT_Half;
7645 if (!(bp->speed_cap_mask &
7646 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7647 bp->supported &= ~SUPPORTED_100baseT_Full;
7649 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7650 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7651 SUPPORTED_1000baseT_Full);
7653 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7654 bp->supported &= ~SUPPORTED_2500baseX_Full;
7656 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7657 bp->supported &= ~SUPPORTED_10000baseT_Full;
7659 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7662 static void bnx2x_link_settings_requested(struct bnx2x *bp)
7664 bp->req_autoneg = 0;
7665 bp->req_duplex = DUPLEX_FULL;
7667 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7668 case PORT_FEATURE_LINK_SPEED_AUTO:
7669 if (bp->supported & SUPPORTED_Autoneg) {
7670 bp->req_autoneg |= AUTONEG_SPEED;
7671 bp->req_line_speed = 0;
7672 bp->advertising = bp->supported;
7674 if (XGXS_EXT_PHY_TYPE(bp) ==
7675 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
7676 /* force 10G, no AN */
7677 bp->req_line_speed = SPEED_10000;
7679 (ADVERTISED_10000baseT_Full |
7683 BNX2X_ERR("NVRAM config error. "
7684 "Invalid link_config 0x%x"
7685 " Autoneg not supported\n",
7691 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7692 if (bp->supported & SUPPORTED_10baseT_Full) {
7693 bp->req_line_speed = SPEED_10;
7694 bp->advertising = (ADVERTISED_10baseT_Full |
7697 BNX2X_ERR("NVRAM config error. "
7698 "Invalid link_config 0x%x"
7699 " speed_cap_mask 0x%x\n",
7700 bp->link_config, bp->speed_cap_mask);
7705 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7706 if (bp->supported & SUPPORTED_10baseT_Half) {
7707 bp->req_line_speed = SPEED_10;
7708 bp->req_duplex = DUPLEX_HALF;
7709 bp->advertising = (ADVERTISED_10baseT_Half |
7712 BNX2X_ERR("NVRAM config error. "
7713 "Invalid link_config 0x%x"
7714 " speed_cap_mask 0x%x\n",
7715 bp->link_config, bp->speed_cap_mask);
7720 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7721 if (bp->supported & SUPPORTED_100baseT_Full) {
7722 bp->req_line_speed = SPEED_100;
7723 bp->advertising = (ADVERTISED_100baseT_Full |
7726 BNX2X_ERR("NVRAM config error. "
7727 "Invalid link_config 0x%x"
7728 " speed_cap_mask 0x%x\n",
7729 bp->link_config, bp->speed_cap_mask);
7734 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7735 if (bp->supported & SUPPORTED_100baseT_Half) {
7736 bp->req_line_speed = SPEED_100;
7737 bp->req_duplex = DUPLEX_HALF;
7738 bp->advertising = (ADVERTISED_100baseT_Half |
7741 BNX2X_ERR("NVRAM config error. "
7742 "Invalid link_config 0x%x"
7743 " speed_cap_mask 0x%x\n",
7744 bp->link_config, bp->speed_cap_mask);
7749 case PORT_FEATURE_LINK_SPEED_1G:
7750 if (bp->supported & SUPPORTED_1000baseT_Full) {
7751 bp->req_line_speed = SPEED_1000;
7752 bp->advertising = (ADVERTISED_1000baseT_Full |
7755 BNX2X_ERR("NVRAM config error. "
7756 "Invalid link_config 0x%x"
7757 " speed_cap_mask 0x%x\n",
7758 bp->link_config, bp->speed_cap_mask);
7763 case PORT_FEATURE_LINK_SPEED_2_5G:
7764 if (bp->supported & SUPPORTED_2500baseX_Full) {
7765 bp->req_line_speed = SPEED_2500;
7766 bp->advertising = (ADVERTISED_2500baseX_Full |
7769 BNX2X_ERR("NVRAM config error. "
7770 "Invalid link_config 0x%x"
7771 " speed_cap_mask 0x%x\n",
7772 bp->link_config, bp->speed_cap_mask);
7777 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7778 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7779 case PORT_FEATURE_LINK_SPEED_10G_KR:
7780 if (bp->supported & SUPPORTED_10000baseT_Full) {
7781 bp->req_line_speed = SPEED_10000;
7782 bp->advertising = (ADVERTISED_10000baseT_Full |
7785 BNX2X_ERR("NVRAM config error. "
7786 "Invalid link_config 0x%x"
7787 " speed_cap_mask 0x%x\n",
7788 bp->link_config, bp->speed_cap_mask);
7794 BNX2X_ERR("NVRAM config error. "
7795 "BAD link speed link_config 0x%x\n",
7797 bp->req_autoneg |= AUTONEG_SPEED;
7798 bp->req_line_speed = 0;
7799 bp->advertising = bp->supported;
7802 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7803 bp->req_line_speed, bp->req_duplex);
7805 bp->req_flow_ctrl = (bp->link_config &
7806 PORT_FEATURE_FLOW_CONTROL_MASK);
7807 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7808 (bp->supported & SUPPORTED_Autoneg))
7809 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7811 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7812 " advertising 0x%x\n",
7813 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
7816 static void bnx2x_get_hwinfo(struct bnx2x *bp)
7818 u32 val, val2, val3, val4, id;
7819 int port = bp->port;
7822 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7823 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7825 /* Get the chip revision id and number. */
7826 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7827 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7828 id = ((val & 0xffff) << 16);
7829 val = REG_RD(bp, MISC_REG_CHIP_REV);
7830 id |= ((val & 0xf) << 12);
7831 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7832 id |= ((val & 0xff) << 4);
7833 REG_RD(bp, MISC_REG_BOND_ID);
7836 BNX2X_DEV_INFO("chip ID is %x\n", id);
7838 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7839 BNX2X_DEV_INFO("MCP not active\n");
7844 val = SHMEM_RD(bp, validity_map[port]);
7845 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7846 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7847 BNX2X_ERR("BAD MCP validity signature\n");
7849 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
7850 DRV_MSG_SEQ_NUMBER_MASK);
7852 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7853 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7855 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7857 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7858 bp->ext_phy_config =
7860 dev_info.port_hw_config[port].external_phy_config);
7861 bp->speed_cap_mask =
7863 dev_info.port_hw_config[port].speed_capability_mask);
7866 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7868 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
7869 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7870 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7872 bp->hw_config, bp->board, bp->serdes_config,
7873 bp->lane_config, bp->ext_phy_config,
7874 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
7876 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7877 bnx2x_link_settings_supported(bp, switch_cfg);
7879 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7880 /* for now disable cl73 */
7881 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7882 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7884 bnx2x_link_settings_requested(bp);
7886 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7887 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7888 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7889 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7890 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7891 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7892 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7893 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7895 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7898 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7899 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7900 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7901 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7903 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7904 val, val2, val3, val4);
7908 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7909 BNX2X_DEV_INFO("bc_ver %X\n", val);
7910 if (val < BNX2X_BC_VER) {
7911 /* for now only warn
7912 * later we might need to enforce this */
7913 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7914 " please upgrade BC\n", BNX2X_BC_VER, val);
7920 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7921 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7922 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7923 bp->flash_size, bp->flash_size);
7927 set_mac: /* only supposed to happen on emulation/FPGA */
7928 BNX2X_ERR("warning rendom MAC workaround active\n");
7929 random_ether_addr(bp->dev->dev_addr);
7930 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7935 * ethtool service functions
7938 /* All ethtool functions called with rtnl_lock */
7940 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7942 struct bnx2x *bp = netdev_priv(dev);
7944 cmd->supported = bp->supported;
7945 cmd->advertising = bp->advertising;
7947 if (netif_carrier_ok(dev)) {
7948 cmd->speed = bp->line_speed;
7949 cmd->duplex = bp->duplex;
7951 cmd->speed = bp->req_line_speed;
7952 cmd->duplex = bp->req_duplex;
7955 if (bp->phy_flags & PHY_XGXS_FLAG) {
7956 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7958 switch (ext_phy_type) {
7959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7962 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7963 cmd->port = PORT_FIBRE;
7966 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7967 cmd->port = PORT_TP;
7971 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7972 bp->ext_phy_config);
7975 cmd->port = PORT_TP;
7977 cmd->phy_address = bp->phy_addr;
7978 cmd->transceiver = XCVR_INTERNAL;
7980 if (bp->req_autoneg & AUTONEG_SPEED)
7981 cmd->autoneg = AUTONEG_ENABLE;
7983 cmd->autoneg = AUTONEG_DISABLE;
7988 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7989 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7990 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7991 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7992 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7993 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7994 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7999 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8001 struct bnx2x *bp = netdev_priv(dev);
8004 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8005 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8006 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8007 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8008 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8009 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8010 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8012 switch (cmd->port) {
8014 if (!(bp->supported & SUPPORTED_TP)) {
8015 DP(NETIF_MSG_LINK, "TP not supported\n");
8019 if (bp->phy_flags & PHY_XGXS_FLAG) {
8020 bnx2x_link_reset(bp);
8021 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
8022 bnx2x_phy_deassert(bp);
8027 if (!(bp->supported & SUPPORTED_FIBRE)) {
8028 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
8032 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
8033 bnx2x_link_reset(bp);
8034 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
8035 bnx2x_phy_deassert(bp);
8040 DP(NETIF_MSG_LINK, "Unknown port type\n");
8044 if (cmd->autoneg == AUTONEG_ENABLE) {
8045 if (!(bp->supported & SUPPORTED_Autoneg)) {
8046 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8050 /* advertise the requested speed and duplex if supported */
8051 cmd->advertising &= bp->supported;
8053 bp->req_autoneg |= AUTONEG_SPEED;
8054 bp->req_line_speed = 0;
8055 bp->req_duplex = DUPLEX_FULL;
8056 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
8058 } else { /* forced speed */
8059 /* advertise the requested speed and duplex if supported */
8060 switch (cmd->speed) {
8062 if (cmd->duplex == DUPLEX_FULL) {
8063 if (!(bp->supported &
8064 SUPPORTED_10baseT_Full)) {
8066 "10M full not supported\n");
8070 advertising = (ADVERTISED_10baseT_Full |
8073 if (!(bp->supported &
8074 SUPPORTED_10baseT_Half)) {
8076 "10M half not supported\n");
8080 advertising = (ADVERTISED_10baseT_Half |
8086 if (cmd->duplex == DUPLEX_FULL) {
8087 if (!(bp->supported &
8088 SUPPORTED_100baseT_Full)) {
8090 "100M full not supported\n");
8094 advertising = (ADVERTISED_100baseT_Full |
8097 if (!(bp->supported &
8098 SUPPORTED_100baseT_Half)) {
8100 "100M half not supported\n");
8104 advertising = (ADVERTISED_100baseT_Half |
8110 if (cmd->duplex != DUPLEX_FULL) {
8111 DP(NETIF_MSG_LINK, "1G half not supported\n");
8115 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8116 DP(NETIF_MSG_LINK, "1G full not supported\n");
8120 advertising = (ADVERTISED_1000baseT_Full |
8125 if (cmd->duplex != DUPLEX_FULL) {
8127 "2.5G half not supported\n");
8131 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8133 "2.5G full not supported\n");
8137 advertising = (ADVERTISED_2500baseX_Full |
8142 if (cmd->duplex != DUPLEX_FULL) {
8143 DP(NETIF_MSG_LINK, "10G half not supported\n");
8147 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8148 DP(NETIF_MSG_LINK, "10G full not supported\n");
8152 advertising = (ADVERTISED_10000baseT_Full |
8157 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8161 bp->req_autoneg &= ~AUTONEG_SPEED;
8162 bp->req_line_speed = cmd->speed;
8163 bp->req_duplex = cmd->duplex;
8164 bp->advertising = advertising;
8167 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8168 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8169 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8172 bnx2x_stop_stats(bp);
8173 bnx2x_link_initialize(bp);
8178 static void bnx2x_get_drvinfo(struct net_device *dev,
8179 struct ethtool_drvinfo *info)
8181 struct bnx2x *bp = netdev_priv(dev);
8183 strcpy(info->driver, DRV_MODULE_NAME);
8184 strcpy(info->version, DRV_MODULE_VERSION);
8185 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8186 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8187 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8189 strcpy(info->bus_info, pci_name(bp->pdev));
8190 info->n_stats = BNX2X_NUM_STATS;
8191 info->testinfo_len = BNX2X_NUM_TESTS;
8192 info->eedump_len = bp->flash_size;
8193 info->regdump_len = 0;
8196 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8198 struct bnx2x *bp = netdev_priv(dev);
8200 if (bp->flags & NO_WOL_FLAG) {
8204 wol->supported = WAKE_MAGIC;
8206 wol->wolopts = WAKE_MAGIC;
8210 memset(&wol->sopass, 0, sizeof(wol->sopass));
8213 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8215 struct bnx2x *bp = netdev_priv(dev);
8217 if (wol->wolopts & ~WAKE_MAGIC)
8220 if (wol->wolopts & WAKE_MAGIC) {
8221 if (bp->flags & NO_WOL_FLAG)
8231 static u32 bnx2x_get_msglevel(struct net_device *dev)
8233 struct bnx2x *bp = netdev_priv(dev);
8235 return bp->msglevel;
8238 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8240 struct bnx2x *bp = netdev_priv(dev);
8242 if (capable(CAP_NET_ADMIN))
8243 bp->msglevel = level;
8246 static int bnx2x_nway_reset(struct net_device *dev)
8248 struct bnx2x *bp = netdev_priv(dev);
8250 if (bp->state != BNX2X_STATE_OPEN) {
8251 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8255 bnx2x_stop_stats(bp);
8256 bnx2x_link_initialize(bp);
8261 static int bnx2x_get_eeprom_len(struct net_device *dev)
8263 struct bnx2x *bp = netdev_priv(dev);
8265 return bp->flash_size;
8268 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8270 int port = bp->port;
8274 /* adjust timeout for emulation/FPGA */
8275 count = NVRAM_TIMEOUT_COUNT;
8276 if (CHIP_REV_IS_SLOW(bp))
8279 /* request access to nvram interface */
8280 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8281 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8283 for (i = 0; i < count*10; i++) {
8284 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8285 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8291 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8292 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8299 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8301 int port = bp->port;
8305 /* adjust timeout for emulation/FPGA */
8306 count = NVRAM_TIMEOUT_COUNT;
8307 if (CHIP_REV_IS_SLOW(bp))
8310 /* relinquish nvram interface */
8311 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8312 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8314 for (i = 0; i < count*10; i++) {
8315 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8316 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8322 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8323 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8330 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8334 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8336 /* enable both bits, even on read */
8337 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8338 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8339 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8342 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8346 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8348 /* disable both bits, even after read */
8349 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8350 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8351 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8354 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8360 /* build the command word */
8361 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8363 /* need to clear DONE bit separately */
8364 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8366 /* address of the NVRAM to read from */
8367 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8368 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8370 /* issue a read command */
8371 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8373 /* adjust timeout for emulation/FPGA */
8374 count = NVRAM_TIMEOUT_COUNT;
8375 if (CHIP_REV_IS_SLOW(bp))
8378 /* wait for completion */
8381 for (i = 0; i < count; i++) {
8383 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8385 if (val & MCPR_NVM_COMMAND_DONE) {
8386 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8387 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8388 /* we read nvram data in cpu order
8389 * but ethtool sees it as an array of bytes
8390 * converting to big-endian will do the work */
8391 val = cpu_to_be32(val);
8401 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8408 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8410 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8415 if (offset + buf_size > bp->flash_size) {
8416 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8417 " buf_size (0x%x) > flash_size (0x%x)\n",
8418 offset, buf_size, bp->flash_size);
8422 /* request access to nvram interface */
8423 rc = bnx2x_acquire_nvram_lock(bp);
8427 /* enable access to nvram interface */
8428 bnx2x_enable_nvram_access(bp);
8430 /* read the first word(s) */
8431 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8432 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8433 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8434 memcpy(ret_buf, &val, 4);
8436 /* advance to the next dword */
8437 offset += sizeof(u32);
8438 ret_buf += sizeof(u32);
8439 buf_size -= sizeof(u32);
8444 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8445 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8446 memcpy(ret_buf, &val, 4);
8449 /* disable access to nvram interface */
8450 bnx2x_disable_nvram_access(bp);
8451 bnx2x_release_nvram_lock(bp);
8456 static int bnx2x_get_eeprom(struct net_device *dev,
8457 struct ethtool_eeprom *eeprom, u8 *eebuf)
8459 struct bnx2x *bp = netdev_priv(dev);
8462 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8463 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8464 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8465 eeprom->len, eeprom->len);
8467 /* parameters already validated in ethtool_get_eeprom */
8469 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8474 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8479 /* build the command word */
8480 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8482 /* need to clear DONE bit separately */
8483 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8485 /* write the data */
8486 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8488 /* address of the NVRAM to write to */
8489 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8490 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8492 /* issue the write command */
8493 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8495 /* adjust timeout for emulation/FPGA */
8496 count = NVRAM_TIMEOUT_COUNT;
8497 if (CHIP_REV_IS_SLOW(bp))
8500 /* wait for completion */
8502 for (i = 0; i < count; i++) {
8504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8505 if (val & MCPR_NVM_COMMAND_DONE) {
8514 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8516 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8524 if (offset + buf_size > bp->flash_size) {
8525 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8526 " buf_size (0x%x) > flash_size (0x%x)\n",
8527 offset, buf_size, bp->flash_size);
8531 /* request access to nvram interface */
8532 rc = bnx2x_acquire_nvram_lock(bp);
8536 /* enable access to nvram interface */
8537 bnx2x_enable_nvram_access(bp);
8539 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8540 align_offset = (offset & ~0x03);
8541 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8544 val &= ~(0xff << BYTE_OFFSET(offset));
8545 val |= (*data_buf << BYTE_OFFSET(offset));
8547 /* nvram data is returned as an array of bytes
8548 * convert it back to cpu order */
8549 val = be32_to_cpu(val);
8551 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8553 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8557 /* disable access to nvram interface */
8558 bnx2x_disable_nvram_access(bp);
8559 bnx2x_release_nvram_lock(bp);
8564 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8572 if (buf_size == 1) { /* ethtool */
8573 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8576 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8578 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8583 if (offset + buf_size > bp->flash_size) {
8584 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8585 " buf_size (0x%x) > flash_size (0x%x)\n",
8586 offset, buf_size, bp->flash_size);
8590 /* request access to nvram interface */
8591 rc = bnx2x_acquire_nvram_lock(bp);
8595 /* enable access to nvram interface */
8596 bnx2x_enable_nvram_access(bp);
8599 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8600 while ((written_so_far < buf_size) && (rc == 0)) {
8601 if (written_so_far == (buf_size - sizeof(u32)))
8602 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8603 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8604 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8605 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8606 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8608 memcpy(&val, data_buf, 4);
8609 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8611 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8613 /* advance to the next dword */
8614 offset += sizeof(u32);
8615 data_buf += sizeof(u32);
8616 written_so_far += sizeof(u32);
8620 /* disable access to nvram interface */
8621 bnx2x_disable_nvram_access(bp);
8622 bnx2x_release_nvram_lock(bp);
8627 static int bnx2x_set_eeprom(struct net_device *dev,
8628 struct ethtool_eeprom *eeprom, u8 *eebuf)
8630 struct bnx2x *bp = netdev_priv(dev);
8633 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8634 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8635 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8636 eeprom->len, eeprom->len);
8638 /* parameters already validated in ethtool_set_eeprom */
8640 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8645 static int bnx2x_get_coalesce(struct net_device *dev,
8646 struct ethtool_coalesce *coal)
8648 struct bnx2x *bp = netdev_priv(dev);
8650 memset(coal, 0, sizeof(struct ethtool_coalesce));
8652 coal->rx_coalesce_usecs = bp->rx_ticks;
8653 coal->tx_coalesce_usecs = bp->tx_ticks;
8654 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8659 static int bnx2x_set_coalesce(struct net_device *dev,
8660 struct ethtool_coalesce *coal)
8662 struct bnx2x *bp = netdev_priv(dev);
8664 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8665 if (bp->rx_ticks > 3000)
8666 bp->rx_ticks = 3000;
8668 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8669 if (bp->tx_ticks > 0x3000)
8670 bp->tx_ticks = 0x3000;
8672 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8673 if (bp->stats_ticks > 0xffff00)
8674 bp->stats_ticks = 0xffff00;
8675 bp->stats_ticks &= 0xffff00;
8677 if (netif_running(bp->dev))
8678 bnx2x_update_coalesce(bp);
8683 static void bnx2x_get_ringparam(struct net_device *dev,
8684 struct ethtool_ringparam *ering)
8686 struct bnx2x *bp = netdev_priv(dev);
8688 ering->rx_max_pending = MAX_RX_AVAIL;
8689 ering->rx_mini_max_pending = 0;
8690 ering->rx_jumbo_max_pending = 0;
8692 ering->rx_pending = bp->rx_ring_size;
8693 ering->rx_mini_pending = 0;
8694 ering->rx_jumbo_pending = 0;
8696 ering->tx_max_pending = MAX_TX_AVAIL;
8697 ering->tx_pending = bp->tx_ring_size;
8700 static int bnx2x_set_ringparam(struct net_device *dev,
8701 struct ethtool_ringparam *ering)
8703 struct bnx2x *bp = netdev_priv(dev);
8705 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8706 (ering->tx_pending > MAX_TX_AVAIL) ||
8707 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8710 bp->rx_ring_size = ering->rx_pending;
8711 bp->tx_ring_size = ering->tx_pending;
8713 if (netif_running(bp->dev)) {
8714 bnx2x_nic_unload(bp, 0);
8715 bnx2x_nic_load(bp, 0);
8721 static void bnx2x_get_pauseparam(struct net_device *dev,
8722 struct ethtool_pauseparam *epause)
8724 struct bnx2x *bp = netdev_priv(dev);
8727 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8728 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8729 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8731 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8732 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8733 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8736 static int bnx2x_set_pauseparam(struct net_device *dev,
8737 struct ethtool_pauseparam *epause)
8739 struct bnx2x *bp = netdev_priv(dev);
8741 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8742 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8743 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8745 if (epause->autoneg) {
8746 if (!(bp->supported & SUPPORTED_Autoneg)) {
8747 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8751 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8753 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8755 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
8757 if (epause->rx_pause)
8758 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8759 if (epause->tx_pause)
8760 bp->req_flow_ctrl |= FLOW_CTRL_TX;
8762 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8763 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8764 bp->req_flow_ctrl = FLOW_CTRL_NONE;
8766 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8767 bp->req_autoneg, bp->req_flow_ctrl);
8769 bnx2x_stop_stats(bp);
8770 bnx2x_link_initialize(bp);
8775 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8777 struct bnx2x *bp = netdev_priv(dev);
8782 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8784 struct bnx2x *bp = netdev_priv(dev);
8790 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8793 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8795 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8800 char string[ETH_GSTRING_LEN];
8801 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8802 { "MC Errors (online)" }
8805 static int bnx2x_self_test_count(struct net_device *dev)
8807 return BNX2X_NUM_TESTS;
8810 static void bnx2x_self_test(struct net_device *dev,
8811 struct ethtool_test *etest, u64 *buf)
8813 struct bnx2x *bp = netdev_priv(dev);
8816 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8818 if (bp->state != BNX2X_STATE_OPEN) {
8819 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8823 stats_state = bp->stats_state;
8824 bnx2x_stop_stats(bp);
8826 if (bnx2x_mc_assert(bp) != 0) {
8828 etest->flags |= ETH_TEST_FL_FAILED;
8831 #ifdef BNX2X_EXTRA_DEBUG
8832 bnx2x_panic_dump(bp);
8834 bp->stats_state = stats_state;
8838 char string[ETH_GSTRING_LEN];
8839 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8841 { "rx_error_bytes"},
8843 { "tx_error_bytes"},
8844 { "rx_ucast_packets"},
8845 { "rx_mcast_packets"},
8846 { "rx_bcast_packets"},
8847 { "tx_ucast_packets"},
8848 { "tx_mcast_packets"},
8849 { "tx_bcast_packets"},
8850 { "tx_mac_errors"}, /* 10 */
8851 { "tx_carrier_errors"},
8853 { "rx_align_errors"},
8854 { "tx_single_collisions"},
8855 { "tx_multi_collisions"},
8857 { "tx_excess_collisions"},
8858 { "tx_late_collisions"},
8859 { "tx_total_collisions"},
8860 { "rx_fragments"}, /* 20 */
8862 { "rx_undersize_packets"},
8863 { "rx_oversize_packets"},
8865 { "rx_xoff_frames"},
8867 { "tx_xoff_frames"},
8868 { "rx_mac_ctrl_frames"},
8869 { "rx_filtered_packets"},
8870 { "rx_discards"}, /* 30 */
8876 #define STATS_OFFSET32(offset_name) \
8877 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8879 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8880 STATS_OFFSET32(total_bytes_received_hi),
8881 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
8882 STATS_OFFSET32(total_bytes_transmitted_hi),
8883 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
8884 STATS_OFFSET32(total_unicast_packets_received_hi),
8885 STATS_OFFSET32(total_multicast_packets_received_hi),
8886 STATS_OFFSET32(total_broadcast_packets_received_hi),
8887 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8888 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8889 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8890 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8891 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
8892 STATS_OFFSET32(crc_receive_errors),
8893 STATS_OFFSET32(alignment_errors),
8894 STATS_OFFSET32(single_collision_transmit_frames),
8895 STATS_OFFSET32(multiple_collision_transmit_frames),
8896 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
8897 STATS_OFFSET32(excessive_collision_frames),
8898 STATS_OFFSET32(late_collision_frames),
8899 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
8900 STATS_OFFSET32(runt_packets_received), /* 20 */
8901 STATS_OFFSET32(jabber_packets_received),
8902 STATS_OFFSET32(error_runt_packets_received),
8903 STATS_OFFSET32(error_jabber_packets_received),
8904 STATS_OFFSET32(pause_xon_frames_received),
8905 STATS_OFFSET32(pause_xoff_frames_received),
8906 STATS_OFFSET32(pause_xon_frames_transmitted),
8907 STATS_OFFSET32(pause_xoff_frames_transmitted),
8908 STATS_OFFSET32(control_frames_received),
8909 STATS_OFFSET32(mac_filter_discard),
8910 STATS_OFFSET32(no_buff_discard), /* 30 */
8911 STATS_OFFSET32(brb_discard),
8912 STATS_OFFSET32(brb_truncate_discard),
8913 STATS_OFFSET32(xxoverflow_discard)
8916 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8917 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8918 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8919 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8923 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8925 switch (stringset) {
8927 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8931 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8936 static int bnx2x_get_stats_count(struct net_device *dev)
8938 return BNX2X_NUM_STATS;
8941 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8942 struct ethtool_stats *stats, u64 *buf)
8944 struct bnx2x *bp = netdev_priv(dev);
8945 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8948 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8949 if (bnx2x_stats_len_arr[i] == 0) {
8950 /* skip this counter */
8958 if (bnx2x_stats_len_arr[i] == 4) {
8959 /* 4-byte counter */
8960 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8963 /* 8-byte counter */
8964 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8965 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8969 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8971 struct bnx2x *bp = netdev_priv(dev);
8977 for (i = 0; i < (data * 2); i++) {
8979 bnx2x_leds_set(bp, SPEED_1000);
8981 bnx2x_leds_unset(bp);
8983 msleep_interruptible(500);
8984 if (signal_pending(current))
8989 bnx2x_leds_set(bp, bp->line_speed);
8994 static struct ethtool_ops bnx2x_ethtool_ops = {
8995 .get_settings = bnx2x_get_settings,
8996 .set_settings = bnx2x_set_settings,
8997 .get_drvinfo = bnx2x_get_drvinfo,
8998 .get_wol = bnx2x_get_wol,
8999 .set_wol = bnx2x_set_wol,
9000 .get_msglevel = bnx2x_get_msglevel,
9001 .set_msglevel = bnx2x_set_msglevel,
9002 .nway_reset = bnx2x_nway_reset,
9003 .get_link = ethtool_op_get_link,
9004 .get_eeprom_len = bnx2x_get_eeprom_len,
9005 .get_eeprom = bnx2x_get_eeprom,
9006 .set_eeprom = bnx2x_set_eeprom,
9007 .get_coalesce = bnx2x_get_coalesce,
9008 .set_coalesce = bnx2x_set_coalesce,
9009 .get_ringparam = bnx2x_get_ringparam,
9010 .set_ringparam = bnx2x_set_ringparam,
9011 .get_pauseparam = bnx2x_get_pauseparam,
9012 .set_pauseparam = bnx2x_set_pauseparam,
9013 .get_rx_csum = bnx2x_get_rx_csum,
9014 .set_rx_csum = bnx2x_set_rx_csum,
9015 .get_tx_csum = ethtool_op_get_tx_csum,
9016 .set_tx_csum = ethtool_op_set_tx_csum,
9017 .get_sg = ethtool_op_get_sg,
9018 .set_sg = ethtool_op_set_sg,
9019 .get_tso = ethtool_op_get_tso,
9020 .set_tso = bnx2x_set_tso,
9021 .self_test_count = bnx2x_self_test_count,
9022 .self_test = bnx2x_self_test,
9023 .get_strings = bnx2x_get_strings,
9024 .phys_id = bnx2x_phys_id,
9025 .get_stats_count = bnx2x_get_stats_count,
9026 .get_ethtool_stats = bnx2x_get_ethtool_stats
9029 /* end of ethtool_ops */
9031 /****************************************************************************
9032 * General service functions
9033 ****************************************************************************/
9035 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9039 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9043 pci_write_config_word(bp->pdev,
9044 bp->pm_cap + PCI_PM_CTRL,
9045 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9046 PCI_PM_CTRL_PME_STATUS));
9048 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9049 /* delay required during transition out of D3hot */
9054 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9058 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9060 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9063 /* No more memory access after this point until
9064 * device is brought back to D0.
9075 * net_device service functions
9078 /* called with netif_tx_lock from set_multicast */
9079 static void bnx2x_set_rx_mode(struct net_device *dev)
9081 struct bnx2x *bp = netdev_priv(dev);
9082 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9084 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
9086 if (dev->flags & IFF_PROMISC)
9087 rx_mode = BNX2X_RX_MODE_PROMISC;
9089 else if ((dev->flags & IFF_ALLMULTI) ||
9090 (dev->mc_count > BNX2X_MAX_MULTICAST))
9091 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9093 else { /* some multicasts */
9095 struct dev_mc_list *mclist;
9096 struct mac_configuration_cmd *config =
9097 bnx2x_sp(bp, mcast_config);
9099 for (i = 0, mclist = dev->mc_list;
9100 mclist && (i < dev->mc_count);
9101 i++, mclist = mclist->next) {
9103 config->config_table[i].cam_entry.msb_mac_addr =
9104 swab16(*(u16 *)&mclist->dmi_addr[0]);
9105 config->config_table[i].cam_entry.middle_mac_addr =
9106 swab16(*(u16 *)&mclist->dmi_addr[2]);
9107 config->config_table[i].cam_entry.lsb_mac_addr =
9108 swab16(*(u16 *)&mclist->dmi_addr[4]);
9109 config->config_table[i].cam_entry.flags =
9110 cpu_to_le16(bp->port);
9111 config->config_table[i].target_table_entry.flags = 0;
9112 config->config_table[i].target_table_entry.
9114 config->config_table[i].target_table_entry.
9118 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9119 i, config->config_table[i].cam_entry.msb_mac_addr,
9120 config->config_table[i].cam_entry.middle_mac_addr,
9121 config->config_table[i].cam_entry.lsb_mac_addr);
9123 old = config->hdr.length_6b;
9125 for (; i < old; i++) {
9126 if (CAM_IS_INVALID(config->config_table[i])) {
9127 i--; /* already invalidated */
9131 CAM_INVALIDATE(config->config_table[i]);
9135 if (CHIP_REV_IS_SLOW(bp))
9136 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9138 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9140 config->hdr.length_6b = i;
9141 config->hdr.offset = offset;
9142 config->hdr.reserved0 = 0;
9143 config->hdr.reserved1 = 0;
9145 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9146 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9147 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9150 bp->rx_mode = rx_mode;
9151 bnx2x_set_storm_rx_mode(bp);
9154 static int bnx2x_poll(struct napi_struct *napi, int budget)
9156 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9158 struct bnx2x *bp = fp->bp;
9161 #ifdef BNX2X_STOP_ON_ERROR
9162 if (unlikely(bp->panic))
9166 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9167 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9168 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9170 bnx2x_update_fpsb_idx(fp);
9172 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9173 bnx2x_tx_int(fp, budget);
9176 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9177 work_done = bnx2x_rx_int(fp, budget);
9180 rmb(); /* bnx2x_has_work() reads the status block */
9182 /* must not complete if we consumed full budget */
9183 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9185 #ifdef BNX2X_STOP_ON_ERROR
9188 netif_rx_complete(bp->dev, napi);
9190 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9191 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9192 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9193 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9199 /* Called with netif_tx_lock.
9200 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9201 * netif_wake_queue().
9203 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9205 struct bnx2x *bp = netdev_priv(dev);
9206 struct bnx2x_fastpath *fp;
9207 struct sw_tx_bd *tx_buf;
9208 struct eth_tx_bd *tx_bd;
9209 struct eth_tx_parse_bd *pbd = NULL;
9210 u16 pkt_prod, bd_prod;
9211 int nbd, fp_index = 0;
9214 #ifdef BNX2X_STOP_ON_ERROR
9215 if (unlikely(bp->panic))
9216 return NETDEV_TX_BUSY;
9219 fp_index = smp_processor_id() % (bp->num_queues);
9221 fp = &bp->fp[fp_index];
9222 if (unlikely(bnx2x_tx_avail(bp->fp) <
9223 (skb_shinfo(skb)->nr_frags + 3))) {
9224 bp->slowpath->eth_stats.driver_xoff++,
9225 netif_stop_queue(dev);
9226 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9227 return NETDEV_TX_BUSY;
9231 This is a bit ugly. First we use one BD which we mark as start,
9232 then for TSO or xsum we have a parsing info BD,
9233 and only then we have the rest of the TSO bds.
9234 (don't forget to mark the last one as last,
9235 and to unmap only AFTER you write to the BD ...)
9236 I would like to thank DovH for this mess.
9239 pkt_prod = fp->tx_pkt_prod++;
9240 bd_prod = fp->tx_bd_prod;
9241 bd_prod = TX_BD(bd_prod);
9243 /* get a tx_buff and first bd */
9244 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9245 tx_bd = &fp->tx_desc_ring[bd_prod];
9247 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9248 tx_bd->general_data = (UNICAST_ADDRESS <<
9249 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9250 tx_bd->general_data |= 1; /* header nbd */
9252 /* remember the first bd of the packet */
9253 tx_buf->first_bd = bd_prod;
9255 DP(NETIF_MSG_TX_QUEUED,
9256 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9257 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9259 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9260 struct iphdr *iph = ip_hdr(skb);
9263 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9265 /* turn on parsing and get a bd */
9266 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9267 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9268 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9270 /* for now NS flag is not used in Linux */
9271 pbd->global_data = (len |
9272 ((skb->protocol == ETH_P_8021Q) <<
9273 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9274 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9275 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9276 if (iph->protocol == IPPROTO_TCP) {
9277 struct tcphdr *th = tcp_hdr(skb);
9279 tx_bd->bd_flags.as_bitfield |=
9280 ETH_TX_BD_FLAGS_TCP_CSUM;
9281 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9282 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9283 pbd->tcp_pseudo_csum = swab16(th->check);
9285 } else if (iph->protocol == IPPROTO_UDP) {
9286 struct udphdr *uh = udp_hdr(skb);
9288 tx_bd->bd_flags.as_bitfield |=
9289 ETH_TX_BD_FLAGS_TCP_CSUM;
9290 pbd->total_hlen += cpu_to_le16(4);
9291 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9292 pbd->cs_offset = 5; /* 10 >> 1 */
9293 pbd->tcp_pseudo_csum = 0;
9294 /* HW bug: we need to subtract 10 bytes before the
9295 * UDP header from the csum
9297 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9298 csum_partial(((u8 *)(uh)-10), 10, 0)));
9302 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9303 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9304 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9306 tx_bd->vlan = cpu_to_le16(pkt_prod);
9309 mapping = pci_map_single(bp->pdev, skb->data,
9310 skb->len, PCI_DMA_TODEVICE);
9312 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9313 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9314 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9315 tx_bd->nbd = cpu_to_le16(nbd);
9316 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9318 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9319 " nbytes %d flags %x vlan %u\n",
9320 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9321 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9323 if (skb_shinfo(skb)->gso_size &&
9324 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9325 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9327 DP(NETIF_MSG_TX_QUEUED,
9328 "TSO packet len %d hlen %d total len %d tso size %d\n",
9329 skb->len, hlen, skb_headlen(skb),
9330 skb_shinfo(skb)->gso_size);
9332 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9334 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9335 /* we split the first bd into headers and data bds
9336 * to ease the pain of our fellow micocode engineers
9337 * we use one mapping for both bds
9338 * So far this has only been observed to happen
9339 * in Other Operating Systems(TM)
9342 /* first fix first bd */
9344 tx_bd->nbd = cpu_to_le16(nbd);
9345 tx_bd->nbytes = cpu_to_le16(hlen);
9347 /* we only print this as an error
9348 * because we don't think this will ever happen.
9350 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9351 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9352 tx_bd->addr_lo, tx_bd->nbd);
9354 /* now get a new data bd
9355 * (after the pbd) and fill it */
9356 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9357 tx_bd = &fp->tx_desc_ring[bd_prod];
9359 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9360 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9361 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9362 tx_bd->vlan = cpu_to_le16(pkt_prod);
9363 /* this marks the bd
9364 * as one that has no individual mapping
9365 * the FW ignores this flag in a bd not marked start
9367 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9368 DP(NETIF_MSG_TX_QUEUED,
9369 "TSO split data size is %d (%x:%x)\n",
9370 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9374 /* supposed to be unreached
9375 * (and therefore not handled properly...)
9377 BNX2X_ERR("LSO with no PBD\n");
9381 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9382 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9383 pbd->ip_id = swab16(ip_hdr(skb)->id);
9384 pbd->tcp_pseudo_csum =
9385 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9387 0, IPPROTO_TCP, 0));
9388 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9395 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9397 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9398 tx_bd = &fp->tx_desc_ring[bd_prod];
9400 mapping = pci_map_page(bp->pdev, frag->page,
9402 frag->size, PCI_DMA_TODEVICE);
9404 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9405 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9406 tx_bd->nbytes = cpu_to_le16(frag->size);
9407 tx_bd->vlan = cpu_to_le16(pkt_prod);
9408 tx_bd->bd_flags.as_bitfield = 0;
9409 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9410 " addr (%x:%x) nbytes %d flags %x\n",
9411 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9412 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9416 /* now at last mark the bd as the last bd */
9417 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9419 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9420 tx_bd, tx_bd->bd_flags.as_bitfield);
9424 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9426 /* now send a tx doorbell, counting the next bd
9427 * if the packet contains or ends with it
9429 if (TX_BD_POFF(bd_prod) < nbd)
9433 DP(NETIF_MSG_TX_QUEUED,
9434 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9435 " tcp_flags %x xsum %x seq %u hlen %u\n",
9436 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9437 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9438 pbd->tcp_send_seq, pbd->total_hlen);
9440 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9442 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9443 mb(); /* FW restriction: must not reorder writing nbd and packets */
9444 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9445 DOORBELL(bp, fp_index, 0);
9449 fp->tx_bd_prod = bd_prod;
9450 dev->trans_start = jiffies;
9452 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9453 netif_stop_queue(dev);
9454 bp->slowpath->eth_stats.driver_xoff++;
9455 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9456 netif_wake_queue(dev);
9460 return NETDEV_TX_OK;
9463 /* Called with rtnl_lock */
9464 static int bnx2x_open(struct net_device *dev)
9466 struct bnx2x *bp = netdev_priv(dev);
9468 bnx2x_set_power_state(bp, PCI_D0);
9470 return bnx2x_nic_load(bp, 1);
9473 /* Called with rtnl_lock */
9474 static int bnx2x_close(struct net_device *dev)
9476 struct bnx2x *bp = netdev_priv(dev);
9478 /* Unload the driver, release IRQs */
9479 bnx2x_nic_unload(bp, 1);
9481 if (!CHIP_REV_IS_SLOW(bp))
9482 bnx2x_set_power_state(bp, PCI_D3hot);
9487 /* Called with rtnl_lock */
9488 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9490 struct sockaddr *addr = p;
9491 struct bnx2x *bp = netdev_priv(dev);
9493 if (!is_valid_ether_addr(addr->sa_data))
9496 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9497 if (netif_running(dev))
9498 bnx2x_set_mac_addr(bp);
9503 /* Called with rtnl_lock */
9504 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9506 struct mii_ioctl_data *data = if_mii(ifr);
9507 struct bnx2x *bp = netdev_priv(dev);
9512 data->phy_id = bp->phy_addr;
9518 spin_lock_bh(&bp->phy_lock);
9519 if (bp->state == BNX2X_STATE_OPEN) {
9520 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9523 data->val_out = mii_regval;
9527 spin_unlock_bh(&bp->phy_lock);
9532 if (!capable(CAP_NET_ADMIN))
9535 spin_lock_bh(&bp->phy_lock);
9536 if (bp->state == BNX2X_STATE_OPEN) {
9537 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9542 spin_unlock_bh(&bp->phy_lock);
9553 /* Called with rtnl_lock */
9554 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9556 struct bnx2x *bp = netdev_priv(dev);
9558 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9559 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9562 /* This does not race with packet allocation
9563 * because the actual alloc size is
9564 * only updated as part of load
9568 if (netif_running(dev)) {
9569 bnx2x_nic_unload(bp, 0);
9570 bnx2x_nic_load(bp, 0);
9575 static void bnx2x_tx_timeout(struct net_device *dev)
9577 struct bnx2x *bp = netdev_priv(dev);
9579 #ifdef BNX2X_STOP_ON_ERROR
9583 /* This allows the netif to be shutdown gracefully before resetting */
9584 schedule_work(&bp->reset_task);
9588 /* Called with rtnl_lock */
9589 static void bnx2x_vlan_rx_register(struct net_device *dev,
9590 struct vlan_group *vlgrp)
9592 struct bnx2x *bp = netdev_priv(dev);
9595 if (netif_running(dev))
9596 bnx2x_set_client_config(bp);
9600 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9601 static void poll_bnx2x(struct net_device *dev)
9603 struct bnx2x *bp = netdev_priv(dev);
9605 disable_irq(bp->pdev->irq);
9606 bnx2x_interrupt(bp->pdev->irq, dev);
9607 enable_irq(bp->pdev->irq);
9611 static void bnx2x_reset_task(struct work_struct *work)
9613 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9615 #ifdef BNX2X_STOP_ON_ERROR
9616 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9617 " so reset not done to allow debug dump,\n"
9618 KERN_ERR " you will need to reboot when done\n");
9622 if (!netif_running(bp->dev))
9627 if (bp->state != BNX2X_STATE_OPEN) {
9628 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
9629 goto reset_task_exit;
9632 bnx2x_nic_unload(bp, 0);
9633 bnx2x_nic_load(bp, 0);
9639 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9640 struct net_device *dev)
9645 SET_NETDEV_DEV(dev, &pdev->dev);
9646 bp = netdev_priv(dev);
9649 bp->port = PCI_FUNC(pdev->devfn);
9651 rc = pci_enable_device(pdev);
9653 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9657 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9658 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9661 goto err_out_disable;
9664 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9665 printk(KERN_ERR PFX "Cannot find second PCI device"
9666 " base address, aborting\n");
9668 goto err_out_disable;
9671 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9673 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9675 goto err_out_disable;
9678 pci_set_master(pdev);
9680 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9681 if (bp->pm_cap == 0) {
9682 printk(KERN_ERR PFX "Cannot find power management"
9683 " capability, aborting\n");
9685 goto err_out_release;
9688 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9689 if (bp->pcie_cap == 0) {
9690 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9693 goto err_out_release;
9696 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9697 bp->flags |= USING_DAC_FLAG;
9698 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9699 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9700 " failed, aborting\n");
9702 goto err_out_release;
9705 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9706 printk(KERN_ERR PFX "System does not support DMA,"
9709 goto err_out_release;
9715 spin_lock_init(&bp->phy_lock);
9717 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9718 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9720 dev->base_addr = pci_resource_start(pdev, 0);
9722 dev->irq = pdev->irq;
9724 bp->regview = ioremap_nocache(dev->base_addr,
9725 pci_resource_len(pdev, 0));
9727 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9729 goto err_out_release;
9732 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9733 pci_resource_len(pdev, 2));
9734 if (!bp->doorbells) {
9735 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9740 bnx2x_set_power_state(bp, PCI_D0);
9742 bnx2x_get_hwinfo(bp);
9744 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
9745 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
9746 " will only init first device\n");
9752 printk(KERN_ERR PFX "MCP disabled, will only"
9753 " init first device\n");
9757 if (onefunc && bp->port) {
9758 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9763 bp->tx_ring_size = MAX_TX_AVAIL;
9764 bp->rx_ring_size = MAX_RX_AVAIL;
9770 bp->tx_quick_cons_trip_int = 0xff;
9771 bp->tx_quick_cons_trip = 0xff;
9772 bp->tx_ticks_int = 50;
9775 bp->rx_quick_cons_trip_int = 0xff;
9776 bp->rx_quick_cons_trip = 0xff;
9777 bp->rx_ticks_int = 25;
9780 bp->stats_ticks = 1000000 & 0xffff00;
9782 bp->timer_interval = HZ;
9783 bp->current_interval = (poll ? poll : HZ);
9785 init_timer(&bp->timer);
9786 bp->timer.expires = jiffies + bp->current_interval;
9787 bp->timer.data = (unsigned long) bp;
9788 bp->timer.function = bnx2x_timer;
9794 iounmap(bp->regview);
9798 if (bp->doorbells) {
9799 iounmap(bp->doorbells);
9800 bp->doorbells = NULL;
9804 pci_release_regions(pdev);
9807 pci_disable_device(pdev);
9808 pci_set_drvdata(pdev, NULL);
9814 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9816 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9818 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9822 /* return value of 1=2.5GHz 2=5GHz */
9823 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9825 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9827 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9831 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9832 const struct pci_device_id *ent)
9834 static int version_printed;
9835 struct net_device *dev = NULL;
9838 int port = PCI_FUNC(pdev->devfn);
9839 DECLARE_MAC_BUF(mac);
9841 if (version_printed++ == 0)
9842 printk(KERN_INFO "%s", version);
9844 /* dev zeroed in init_etherdev */
9845 dev = alloc_etherdev(sizeof(*bp));
9849 netif_carrier_off(dev);
9851 bp = netdev_priv(dev);
9852 bp->msglevel = debug;
9854 if (port && onefunc) {
9855 printk(KERN_ERR PFX "second function disabled. exiting\n");
9860 rc = bnx2x_init_board(pdev, dev);
9866 dev->hard_start_xmit = bnx2x_start_xmit;
9867 dev->watchdog_timeo = TX_TIMEOUT;
9869 dev->ethtool_ops = &bnx2x_ethtool_ops;
9870 dev->open = bnx2x_open;
9871 dev->stop = bnx2x_close;
9872 dev->set_multicast_list = bnx2x_set_rx_mode;
9873 dev->set_mac_address = bnx2x_change_mac_addr;
9874 dev->do_ioctl = bnx2x_ioctl;
9875 dev->change_mtu = bnx2x_change_mtu;
9876 dev->tx_timeout = bnx2x_tx_timeout;
9878 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9880 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9881 dev->poll_controller = poll_bnx2x;
9883 dev->features |= NETIF_F_SG;
9884 if (bp->flags & USING_DAC_FLAG)
9885 dev->features |= NETIF_F_HIGHDMA;
9886 dev->features |= NETIF_F_IP_CSUM;
9888 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9890 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9892 rc = register_netdev(dev);
9894 dev_err(&pdev->dev, "Cannot register net device\n");
9896 iounmap(bp->regview);
9898 iounmap(bp->doorbells);
9899 pci_release_regions(pdev);
9900 pci_disable_device(pdev);
9901 pci_set_drvdata(pdev, NULL);
9906 pci_set_drvdata(pdev, dev);
9908 bp->name = board_info[ent->driver_data].name;
9909 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9910 " IRQ %d, ", dev->name, bp->name,
9911 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9912 ((CHIP_ID(bp) & 0x0ff0) >> 4),
9913 bnx2x_get_pcie_width(bp),
9914 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9915 dev->base_addr, bp->pdev->irq);
9916 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
9920 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9922 struct net_device *dev = pci_get_drvdata(pdev);
9926 /* we get here if init_one() fails */
9927 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9931 bp = netdev_priv(dev);
9933 unregister_netdev(dev);
9936 iounmap(bp->regview);
9939 iounmap(bp->doorbells);
9942 pci_release_regions(pdev);
9943 pci_disable_device(pdev);
9944 pci_set_drvdata(pdev, NULL);
9947 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9949 struct net_device *dev = pci_get_drvdata(pdev);
9955 if (!netif_running(dev))
9958 bp = netdev_priv(dev);
9960 bnx2x_nic_unload(bp, 0);
9962 netif_device_detach(dev);
9964 pci_save_state(pdev);
9965 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9970 static int bnx2x_resume(struct pci_dev *pdev)
9972 struct net_device *dev = pci_get_drvdata(pdev);
9977 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9981 if (!netif_running(dev))
9984 bp = netdev_priv(dev);
9986 pci_restore_state(pdev);
9987 bnx2x_set_power_state(bp, PCI_D0);
9988 netif_device_attach(dev);
9990 rc = bnx2x_nic_load(bp, 0);
9997 static struct pci_driver bnx2x_pci_driver = {
9998 .name = DRV_MODULE_NAME,
9999 .id_table = bnx2x_pci_tbl,
10000 .probe = bnx2x_init_one,
10001 .remove = __devexit_p(bnx2x_remove_one),
10002 .suspend = bnx2x_suspend,
10003 .resume = bnx2x_resume,
10006 static int __init bnx2x_init(void)
10008 return pci_register_driver(&bnx2x_pci_driver);
10011 static void __exit bnx2x_cleanup(void)
10013 pci_unregister_driver(&bnx2x_pci_driver);
10016 module_init(bnx2x_init);
10017 module_exit(bnx2x_cleanup);