Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm
[linux-2.6] / drivers / net / bnx2x.c
1 /* bnx2x.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Eliezer Tamir <eliezert@broadcom.com>
10  * Based on code from Michael Chan's bnx2 driver
11  * UDP CSUM errata workaround by Arik Gendelman
12  * Slowpath rework by Vladislav Zolotarov
13  * Statistics and Link managment by Yitchak Gertner
14  *
15  */
16
17 /* define this to make the driver freeze on error
18  * to allow getting debug info
19  * (you will need to reboot afterwords)
20  */
21 /*#define BNX2X_STOP_ON_ERROR*/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h>  /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47         #include <linux/if_vlan.h>
48         #define BCM_VLAN 1
49 #endif
50 #include <net/ip.h>
51 #include <net/tcp.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
58 #include <linux/io.h>
59
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65
66 #define DRV_MODULE_VERSION      "0.40.15"
67 #define DRV_MODULE_RELDATE      "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER            0x040009
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT              (5*HZ)
72
73 static char version[] __devinitdata =
74         "Broadcom NetXtreme II 577xx 10Gigabit Ethernet Driver "
75         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #356 $");
82
83 static int use_inta;
84 static int poll;
85 static int onefunc;
86 static int nomcp;
87 static int debug;
88 static int use_multi;
89
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "defualt debug msglevel");
99
100 #ifdef BNX2X_MULTI
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103 #endif
104
105 enum bnx2x_board_type {
106         BCM57710 = 0,
107 };
108
109 /* indexed by board_t, above */
110 static struct {
111         char *name;
112 } board_info[] __devinitdata = {
113         { "Broadcom NetXtreme II BCM57710 XGb" }
114 };
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { 0 }
120 };
121
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
127
128 /* used only at init
129  * locking is done by mcp
130  */
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132 {
133         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136                                PCICFG_VENDOR_ID_OFFSET);
137 }
138
139 #ifdef BNX2X_IND_RD
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141 {
142         u32 val;
143
144         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147                                PCICFG_VENDOR_ID_OFFSET);
148
149         return val;
150 }
151 #endif
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171 /*              DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178                              u32 dst_addr, u32 len32)
179 {
180         struct dmae_command *dmae = &bp->dmae;
181         int port = bp->port;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int timeout = 200;
184
185         memset(dmae, 0, sizeof(struct dmae_command));
186
187         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190 #ifdef __BIG_ENDIAN
191                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
192 #else
193                         DMAE_CMD_ENDIANITY_DW_SWAP |
194 #endif
195                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196         dmae->src_addr_lo = U64_LO(dma_addr);
197         dmae->src_addr_hi = U64_HI(dma_addr);
198         dmae->dst_addr_lo = dst_addr >> 2;
199         dmae->dst_addr_hi = 0;
200         dmae->len = len32;
201         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203         dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205 /*
206         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
208                     "dst_addr [%x:%08x (%08x)]\n"
209            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
210            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213 */
214 /*
215         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218 */
219
220         *wb_comp = 0;
221
222         bnx2x_post_dmae(bp, dmae, port * 8);
223
224         udelay(5);
225         /* adjust timeout for emulation/FPGA */
226         if (CHIP_REV_IS_SLOW(bp))
227                 timeout *= 100;
228         while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /*              DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230                 udelay(5);
231                 if (!timeout) {
232                         BNX2X_ERR("dmae timeout!\n");
233                         break;
234                 }
235                 timeout--;
236         }
237 }
238
239 #ifdef BNX2X_DMAE_RD
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241 {
242         struct dmae_command *dmae = &bp->dmae;
243         int port = bp->port;
244         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245         int timeout = 200;
246
247         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248         memset(dmae, 0, sizeof(struct dmae_command));
249
250         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253 #ifdef __BIG_ENDIAN
254                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
255 #else
256                         DMAE_CMD_ENDIANITY_DW_SWAP |
257 #endif
258                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259         dmae->src_addr_lo = src_addr >> 2;
260         dmae->src_addr_hi = 0;
261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263         dmae->len = len32;
264         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266         dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268 /*
269         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
271                     "dst_addr [%x:%08x (%08x)]\n"
272            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
273            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276 */
277
278         *wb_comp = 0;
279
280         bnx2x_post_dmae(bp, dmae, port * 8);
281
282         udelay(5);
283         while (*wb_comp != BNX2X_WB_COMP_VAL) {
284                 udelay(5);
285                 if (!timeout) {
286                         BNX2X_ERR("dmae timeout!\n");
287                         break;
288                 }
289                 timeout--;
290         }
291 /*
292         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295 */
296 }
297 #endif
298
299 static int bnx2x_mc_assert(struct bnx2x *bp)
300 {
301         int i, j;
302         int rc = 0;
303         char last_idx;
304         const char storm[] = {"XTCU"};
305         const u32 intmem_base[] = {
306                 BAR_XSTRORM_INTMEM,
307                 BAR_TSTRORM_INTMEM,
308                 BAR_CSTRORM_INTMEM,
309                 BAR_USTRORM_INTMEM
310         };
311
312         /* Go through all instances of all SEMIs */
313         for (i = 0; i < 4; i++) {
314                 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315                                    intmem_base[i]);
316                 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317                           storm[i], last_idx);
318
319                 /* print the asserts */
320                 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321                         u32 row0, row1, row2, row3;
322
323                         row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324                                       intmem_base[i]);
325                         row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326                                       intmem_base[i]);
327                         row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328                                       intmem_base[i]);
329                         row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330                                       intmem_base[i]);
331
332                         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333                                 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334                                           " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335                                           storm[i], j, row3, row2, row1, row0);
336                                 rc++;
337                         } else {
338                                 break;
339                         }
340                 }
341         }
342         return rc;
343 }
344 static void bnx2x_fw_dump(struct bnx2x *bp)
345 {
346         u32 mark, offset;
347         u32 data[9];
348         int word;
349
350         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
352
353         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
354                 for (word = 0; word < 8; word++)
355                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
356                                                   offset + 4*word));
357                 data[8] = 0x0;
358                 printk(KERN_ERR PFX "%s", (char *)data);
359         }
360         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
361                 for (word = 0; word < 8; word++)
362                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
363                                                   offset + 4*word));
364                 data[8] = 0x0;
365                 printk(KERN_ERR PFX "%s", (char *)data);
366         }
367         printk("\n" KERN_ERR PFX "end of fw dump\n");
368 }
369
370 static void bnx2x_panic_dump(struct bnx2x *bp)
371 {
372         int i;
373         u16 j, start, end;
374
375         BNX2X_ERR("begin crash dump -----------------\n");
376
377         for_each_queue(bp, i) {
378                 struct bnx2x_fastpath *fp = &bp->fp[i];
379                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
380
381                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
382                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)"
383                           "  *rx_cons_sb(%x)  rx_comp_prod(%x)"
384                           "  rx_comp_cons(%x)  fp_c_idx(%x)  fp_u_idx(%x)"
385                           "  bd data(%x,%x)\n",
386                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
387                           fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
388                           fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
389                           fp->fp_u_idx, hw_prods->packets_prod,
390                           hw_prods->bds_prod);
391
392                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
393                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
394                 for (j = start; j < end; j++) {
395                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
396
397                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
398                                   sw_bd->skb, sw_bd->first_bd);
399                 }
400
401                 start = TX_BD(fp->tx_bd_cons - 10);
402                 end = TX_BD(fp->tx_bd_cons + 254);
403                 for (j = start; j < end; j++) {
404                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
405
406                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
407                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
408                 }
409
410                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
411                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
412                 for (j = start; j < end; j++) {
413                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
414                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
415
416                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
417                                   j, rx_bd[0], rx_bd[1], sw_bd->skb);
418                 }
419
420                 start = RCQ_BD(fp->rx_comp_cons - 10);
421                 end = RCQ_BD(fp->rx_comp_cons + 503);
422                 for (j = start; j < end; j++) {
423                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
424
425                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
426                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
427                 }
428         }
429
430         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_t_idx(%u)"
431                   "  def_x_idx(%u)  def_att_idx(%u)  attn_state(%u)"
432                   "  spq_prod_idx(%u)\n",
433                   bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
434                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
435
436
437         bnx2x_mc_assert(bp);
438         BNX2X_ERR("end crash dump -----------------\n");
439
440         bp->stats_state = STATS_STATE_DISABLE;
441         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
442 }
443
444 static void bnx2x_enable_int(struct bnx2x *bp)
445 {
446         int port = bp->port;
447         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
448         u32 val = REG_RD(bp, addr);
449         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
450
451         if (msix) {
452                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
453                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
454                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
455         } else {
456                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
457                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
458                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
459                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
460         }
461
462         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  msi %d\n",
463            val, port, addr, msix);
464
465         REG_WR(bp, addr, val);
466 }
467
468 static void bnx2x_disable_int(struct bnx2x *bp)
469 {
470         int port = bp->port;
471         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
472         u32 val = REG_RD(bp, addr);
473
474         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
475                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
476                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
477                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
478
479         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
480            val, port, addr);
481
482         REG_WR(bp, addr, val);
483         if (REG_RD(bp, addr) != val)
484                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
485 }
486
487 static void bnx2x_disable_int_sync(struct bnx2x *bp)
488 {
489
490         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
491         int i;
492
493         atomic_inc(&bp->intr_sem);
494         /* prevent the HW from sending interrupts*/
495         bnx2x_disable_int(bp);
496
497         /* make sure all ISRs are done */
498         if (msix) {
499                 for_each_queue(bp, i)
500                         synchronize_irq(bp->msix_table[i].vector);
501
502                 /* one more for the Slow Path IRQ */
503                 synchronize_irq(bp->msix_table[i].vector);
504         } else
505                 synchronize_irq(bp->pdev->irq);
506
507         /* make sure sp_task is not running */
508         cancel_work_sync(&bp->sp_task);
509
510 }
511
512 /* fast path code */
513
514 /*
515  * general service functions
516  */
517
518 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
519                                 u8 storm, u16 index, u8 op, u8 update)
520 {
521         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
522         struct igu_ack_register igu_ack;
523
524         igu_ack.status_block_index = index;
525         igu_ack.sb_id_and_flags =
526                         ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
527                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
528                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
529                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
530
531 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
532            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
533         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
534 }
535
536 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
537 {
538         struct host_status_block *fpsb = fp->status_blk;
539         u16 rc = 0;
540
541         barrier(); /* status block is written to by the chip */
542         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
543                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
544                 rc |= 1;
545         }
546         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
547                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
548                 rc |= 2;
549         }
550         return rc;
551 }
552
553 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
554 {
555         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
556
557         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
558                 rx_cons_sb++;
559
560         if ((rx_cons_sb != fp->rx_comp_cons) ||
561             (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
562                 return 1;
563
564         return 0;
565 }
566
567 static u16 bnx2x_ack_int(struct bnx2x *bp)
568 {
569         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
570         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
571
572 /*      DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
573            result, BAR_IGU_INTMEM + igu_addr); */
574
575 #ifdef IGU_DEBUG
576 #warning IGU_DEBUG active
577         if (result == 0) {
578                 BNX2X_ERR("read %x from IGU\n", result);
579                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
580         }
581 #endif
582         return result;
583 }
584
585
586 /*
587  * fast path service functions
588  */
589
590 /* free skb in the packet ring at pos idx
591  * return idx of last bd freed
592  */
593 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
594                              u16 idx)
595 {
596         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
597         struct eth_tx_bd *tx_bd;
598         struct sk_buff *skb = tx_buf->skb;
599         u16 bd_idx = tx_buf->first_bd;
600         int nbd;
601
602         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
603            idx, tx_buf, skb);
604
605         /* unmap first bd */
606         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
607         tx_bd = &fp->tx_desc_ring[bd_idx];
608         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
609                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
610
611         nbd = le16_to_cpu(tx_bd->nbd) - 1;
612 #ifdef BNX2X_STOP_ON_ERROR
613         if (nbd > (MAX_SKB_FRAGS + 2)) {
614                 BNX2X_ERR("bad nbd!\n");
615                 bnx2x_panic();
616         }
617 #endif
618
619         /* Skip a parse bd and the TSO split header bd
620            since they have no mapping */
621         if (nbd)
622                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
623
624         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
625                                            ETH_TX_BD_FLAGS_TCP_CSUM |
626                                            ETH_TX_BD_FLAGS_SW_LSO)) {
627                 if (--nbd)
628                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
629                 tx_bd = &fp->tx_desc_ring[bd_idx];
630                 /* is this a TSO split header bd? */
631                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
632                         if (--nbd)
633                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
634                 }
635         }
636
637         /* now free frags */
638         while (nbd > 0) {
639
640                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
641                 tx_bd = &fp->tx_desc_ring[bd_idx];
642                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
643                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
644                 if (--nbd)
645                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
646         }
647
648         /* release skb */
649         BUG_TRAP(skb);
650         dev_kfree_skb(skb);
651         tx_buf->first_bd = 0;
652         tx_buf->skb = NULL;
653
654         return bd_idx;
655 }
656
657 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
658 {
659         u16 used;
660         u32 prod;
661         u32 cons;
662
663         /* Tell compiler that prod and cons can change */
664         barrier();
665         prod = fp->tx_bd_prod;
666         cons = fp->tx_bd_cons;
667
668         used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
669                 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
670
671         if (prod >= cons) {
672                 /* used = prod - cons - prod/size + cons/size */
673                 used -= NUM_TX_BD - NUM_TX_RINGS;
674         }
675
676         BUG_TRAP(used <= fp->bp->tx_ring_size);
677         BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
678
679         return (fp->bp->tx_ring_size - used);
680 }
681
682 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
683 {
684         struct bnx2x *bp = fp->bp;
685         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
686         int done = 0;
687
688 #ifdef BNX2X_STOP_ON_ERROR
689         if (unlikely(bp->panic))
690                 return;
691 #endif
692
693         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
694         sw_cons = fp->tx_pkt_cons;
695
696         while (sw_cons != hw_cons) {
697                 u16 pkt_cons;
698
699                 pkt_cons = TX_BD(sw_cons);
700
701                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
702
703                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %d\n",
704                    hw_cons, sw_cons, pkt_cons);
705
706 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
707                         rmb();
708                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
709                 }
710 */
711                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
712                 sw_cons++;
713                 done++;
714
715                 if (done == work)
716                         break;
717         }
718
719         fp->tx_pkt_cons = sw_cons;
720         fp->tx_bd_cons = bd_cons;
721
722         /* Need to make the tx_cons update visible to start_xmit()
723          * before checking for netif_queue_stopped().  Without the
724          * memory barrier, there is a small possibility that start_xmit()
725          * will miss it and cause the queue to be stopped forever.
726          */
727         smp_mb();
728
729         /* TBD need a thresh? */
730         if (unlikely(netif_queue_stopped(bp->dev))) {
731
732                 netif_tx_lock(bp->dev);
733
734                 if (netif_queue_stopped(bp->dev) &&
735                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
736                         netif_wake_queue(bp->dev);
737
738                 netif_tx_unlock(bp->dev);
739
740         }
741 }
742
743 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
744                            union eth_rx_cqe *rr_cqe)
745 {
746         struct bnx2x *bp = fp->bp;
747         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
748         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749
750         DP(NETIF_MSG_RX_STATUS,
751            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
752            fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
753
754         bp->spq_left++;
755
756         if (fp->index) {
757                 switch (command | fp->state) {
758                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
759                                                 BNX2X_FP_STATE_OPENING):
760                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
761                            cid);
762                         fp->state = BNX2X_FP_STATE_OPEN;
763                         break;
764
765                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
766                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
767                            cid);
768                         fp->state = BNX2X_FP_STATE_HALTED;
769                         break;
770
771                 default:
772                         BNX2X_ERR("unexpected MC reply(%d)  state is %x\n",
773                                   command, fp->state);
774                 }
775                 mb(); /* force bnx2x_wait_ramrod to see the change */
776                 return;
777         }
778         switch (command | bp->state) {
779         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
780                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
781                 bp->state = BNX2X_STATE_OPEN;
782                 break;
783
784         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
785                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
786                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
787                 fp->state = BNX2X_FP_STATE_HALTED;
788                 break;
789
790         case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
791                 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
792                 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
793                 break;
794
795         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
796                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
797                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
798                 break;
799
800         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
801                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
802                 break;
803
804         default:
805                 BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
806                           command, bp->state);
807         }
808
809         mb(); /* force bnx2x_wait_ramrod to see the change */
810 }
811
812 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
813                                      struct bnx2x_fastpath *fp, u16 index)
814 {
815         struct sk_buff *skb;
816         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
817         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
818         dma_addr_t mapping;
819
820         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
821         if (unlikely(skb == NULL))
822                 return -ENOMEM;
823
824         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
825                                  PCI_DMA_FROMDEVICE);
826         if (unlikely(dma_mapping_error(mapping))) {
827
828                 dev_kfree_skb(skb);
829                 return -ENOMEM;
830         }
831
832         rx_buf->skb = skb;
833         pci_unmap_addr_set(rx_buf, mapping, mapping);
834
835         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
836         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
837
838         return 0;
839 }
840
841 /* note that we are not allocating a new skb,
842  * we are just moving one from cons to prod
843  * we are not creating a new mapping,
844  * so there is no need to check for dma_mapping_error().
845  */
846 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
847                                struct sk_buff *skb, u16 cons, u16 prod)
848 {
849         struct bnx2x *bp = fp->bp;
850         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
851         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
852         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
853         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
854
855         pci_dma_sync_single_for_device(bp->pdev,
856                                        pci_unmap_addr(cons_rx_buf, mapping),
857                                        bp->rx_offset + RX_COPY_THRESH,
858                                        PCI_DMA_FROMDEVICE);
859
860         prod_rx_buf->skb = cons_rx_buf->skb;
861         pci_unmap_addr_set(prod_rx_buf, mapping,
862                            pci_unmap_addr(cons_rx_buf, mapping));
863         *prod_bd = *cons_bd;
864 }
865
866 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
867 {
868         struct bnx2x *bp = fp->bp;
869         u16 bd_cons, bd_prod, comp_ring_cons;
870         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
871         int rx_pkt = 0;
872
873 #ifdef BNX2X_STOP_ON_ERROR
874         if (unlikely(bp->panic))
875                 return 0;
876 #endif
877
878         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
879         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
880                 hw_comp_cons++;
881
882         bd_cons = fp->rx_bd_cons;
883         bd_prod = fp->rx_bd_prod;
884         sw_comp_cons = fp->rx_comp_cons;
885         sw_comp_prod = fp->rx_comp_prod;
886
887         /* Memory barrier necessary as speculative reads of the rx
888          * buffer can be ahead of the index in the status block
889          */
890         rmb();
891
892         DP(NETIF_MSG_RX_STATUS,
893            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
894            fp->index, hw_comp_cons, sw_comp_cons);
895
896         while (sw_comp_cons != hw_comp_cons) {
897                 unsigned int len, pad;
898                 struct sw_rx_bd *rx_buf;
899                 struct sk_buff *skb;
900                 union eth_rx_cqe *cqe;
901
902                 comp_ring_cons = RCQ_BD(sw_comp_cons);
903                 bd_prod = RX_BD(bd_prod);
904                 bd_cons = RX_BD(bd_cons);
905
906                 cqe = &fp->rx_comp_ring[comp_ring_cons];
907
908                 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u  sw_comp_cons %u"
909                    "  comp_ring (%u)  bd_ring (%u,%u)\n",
910                    hw_comp_cons, sw_comp_cons,
911                    comp_ring_cons, bd_prod, bd_cons);
912                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
913                    "  queue %x  vlan %x  len %x\n",
914                    cqe->fast_path_cqe.type,
915                    cqe->fast_path_cqe.error_type_flags,
916                    cqe->fast_path_cqe.status_flags,
917                    cqe->fast_path_cqe.rss_hash_result,
918                    cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
919
920                 /* is this a slowpath msg? */
921                 if (unlikely(cqe->fast_path_cqe.type)) {
922                         bnx2x_sp_event(fp, cqe);
923                         goto next_cqe;
924
925                 /* this is an rx packet */
926                 } else {
927                         rx_buf = &fp->rx_buf_ring[bd_cons];
928                         skb = rx_buf->skb;
929
930                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
931                         pad = cqe->fast_path_cqe.placement_offset;
932
933                         pci_dma_sync_single_for_device(bp->pdev,
934                                         pci_unmap_addr(rx_buf, mapping),
935                                                        pad + RX_COPY_THRESH,
936                                                        PCI_DMA_FROMDEVICE);
937                         prefetch(skb);
938                         prefetch(((char *)(skb)) + 128);
939
940                         /* is this an error packet? */
941                         if (unlikely(cqe->fast_path_cqe.error_type_flags &
942                                                         ETH_RX_ERROR_FALGS)) {
943                         /* do we sometimes forward error packets anyway? */
944                                 DP(NETIF_MSG_RX_ERR,
945                                    "ERROR flags(%u) Rx packet(%u)\n",
946                                    cqe->fast_path_cqe.error_type_flags,
947                                    sw_comp_cons);
948                                 /* TBD make sure MC counts this as a drop */
949                                 goto reuse_rx;
950                         }
951
952                         /* Since we don't have a jumbo ring
953                          * copy small packets if mtu > 1500
954                          */
955                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
956                             (len <= RX_COPY_THRESH)) {
957                                 struct sk_buff *new_skb;
958
959                                 new_skb = netdev_alloc_skb(bp->dev,
960                                                            len + pad);
961                                 if (new_skb == NULL) {
962                                         DP(NETIF_MSG_RX_ERR,
963                                            "ERROR packet dropped "
964                                            "because of alloc failure\n");
965                                         /* TBD count this as a drop? */
966                                         goto reuse_rx;
967                                 }
968
969                                 /* aligned copy */
970                                 skb_copy_from_linear_data_offset(skb, pad,
971                                                     new_skb->data + pad, len);
972                                 skb_reserve(new_skb, pad);
973                                 skb_put(new_skb, len);
974
975                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
976
977                                 skb = new_skb;
978
979                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
980                                 pci_unmap_single(bp->pdev,
981                                         pci_unmap_addr(rx_buf, mapping),
982                                                  bp->rx_buf_use_size,
983                                                  PCI_DMA_FROMDEVICE);
984                                 skb_reserve(skb, pad);
985                                 skb_put(skb, len);
986
987                         } else {
988                                 DP(NETIF_MSG_RX_ERR,
989                                    "ERROR packet dropped because "
990                                    "of alloc failure\n");
991 reuse_rx:
992                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
993                                 goto next_rx;
994                         }
995
996                         skb->protocol = eth_type_trans(skb, bp->dev);
997
998                         skb->ip_summed = CHECKSUM_NONE;
999                         if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1000                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1001
1002                         /* TBD do we pass bad csum packets in promisc */
1003                 }
1004
1005 #ifdef BCM_VLAN
1006                 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1007                                 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1008                     && (bp->vlgrp != NULL))
1009                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1010                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1011                 else
1012 #endif
1013                 netif_receive_skb(skb);
1014
1015                 bp->dev->last_rx = jiffies;
1016
1017 next_rx:
1018                 rx_buf->skb = NULL;
1019
1020                 bd_cons = NEXT_RX_IDX(bd_cons);
1021                 bd_prod = NEXT_RX_IDX(bd_prod);
1022 next_cqe:
1023                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1024                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1025                 rx_pkt++;
1026
1027                 if ((rx_pkt == budget))
1028                         break;
1029         } /* while */
1030
1031         fp->rx_bd_cons = bd_cons;
1032         fp->rx_bd_prod = bd_prod;
1033         fp->rx_comp_cons = sw_comp_cons;
1034         fp->rx_comp_prod = sw_comp_prod;
1035
1036         REG_WR(bp, BAR_TSTRORM_INTMEM +
1037                TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1038
1039         mmiowb(); /* keep prod updates ordered */
1040
1041         fp->rx_pkt += rx_pkt;
1042         fp->rx_calls++;
1043
1044         return rx_pkt;
1045 }
1046
1047 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1048 {
1049         struct bnx2x_fastpath *fp = fp_cookie;
1050         struct bnx2x *bp = fp->bp;
1051         struct net_device *dev = bp->dev;
1052         int index = fp->index;
1053
1054         DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1055         bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1056
1057 #ifdef BNX2X_STOP_ON_ERROR
1058         if (unlikely(bp->panic))
1059                 return IRQ_HANDLED;
1060 #endif
1061
1062         prefetch(fp->rx_cons_sb);
1063         prefetch(fp->tx_cons_sb);
1064         prefetch(&fp->status_blk->c_status_block.status_block_index);
1065         prefetch(&fp->status_blk->u_status_block.status_block_index);
1066
1067         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1068         return IRQ_HANDLED;
1069 }
1070
1071 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1072 {
1073         struct net_device *dev = dev_instance;
1074         struct bnx2x *bp = netdev_priv(dev);
1075         u16 status = bnx2x_ack_int(bp);
1076
1077         if (unlikely(status == 0)) {
1078                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1079                 return IRQ_NONE;
1080         }
1081
1082         DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1083
1084 #ifdef BNX2X_STOP_ON_ERROR
1085         if (unlikely(bp->panic))
1086                 return IRQ_HANDLED;
1087 #endif
1088
1089         /* Return here if interrupt is shared and is disabled */
1090         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1091                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1092                 return IRQ_HANDLED;
1093         }
1094
1095         if (status & 0x2) {
1096                 struct bnx2x_fastpath *fp = &bp->fp[0];
1097
1098                 prefetch(fp->rx_cons_sb);
1099                 prefetch(fp->tx_cons_sb);
1100                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1101                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1102
1103                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1104
1105                 status &= ~0x2;
1106                 if (!status)
1107                         return IRQ_HANDLED;
1108         }
1109
1110         if (unlikely(status & 0x1)) {
1111
1112                 schedule_work(&bp->sp_task);
1113
1114                 status &= ~0x1;
1115                 if (!status)
1116                         return IRQ_HANDLED;
1117         }
1118
1119         DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1120            status);
1121
1122         return IRQ_HANDLED;
1123 }
1124
1125 /* end of fast path */
1126
1127 /* PHY/MAC */
1128
1129 /*
1130  * General service functions
1131  */
1132
1133 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1134 {
1135         int port = bp->port;
1136
1137         NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1138                ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1139                 SHARED_HW_CFG_LED_MODE_SHIFT));
1140         NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1141
1142         /* Set blinking rate to ~15.9Hz */
1143         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1144                LED_BLINK_RATE_VAL);
1145         NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1146
1147         /* On Ax chip versions for speeds less than 10G
1148            LED scheme is different */
1149         if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1150                 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1151                 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1152                 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1153         }
1154 }
1155
1156 static void bnx2x_leds_unset(struct bnx2x *bp)
1157 {
1158         int port = bp->port;
1159
1160         NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1161         NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1162 }
1163
1164 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1165 {
1166         u32 val = REG_RD(bp, reg);
1167
1168         val |= bits;
1169         REG_WR(bp, reg, val);
1170         return val;
1171 }
1172
1173 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1174 {
1175         u32 val = REG_RD(bp, reg);
1176
1177         val &= ~bits;
1178         REG_WR(bp, reg, val);
1179         return val;
1180 }
1181
1182 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1183 {
1184         int rc;
1185         u32 tmp, i;
1186         int port = bp->port;
1187         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1188
1189 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  val 0x%08x\n",
1190            bp->phy_addr, reg, val); */
1191
1192         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1193
1194                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1195                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1196                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1197                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1198                 udelay(40);
1199         }
1200
1201         tmp = ((bp->phy_addr << 21) | (reg << 16) |
1202                (val & EMAC_MDIO_COMM_DATA) |
1203                EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1204                EMAC_MDIO_COMM_START_BUSY);
1205         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1206
1207         for (i = 0; i < 50; i++) {
1208                 udelay(10);
1209
1210                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1211                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1212                         udelay(5);
1213                         break;
1214                 }
1215         }
1216
1217         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1218                 BNX2X_ERR("write phy register failed\n");
1219
1220                 rc = -EBUSY;
1221         } else {
1222                 rc = 0;
1223         }
1224
1225         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1226
1227                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1228                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1229                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1230         }
1231
1232         return rc;
1233 }
1234
1235 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1236 {
1237         int port = bp->port;
1238         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1239         u32 val, i;
1240         int rc;
1241
1242         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1243
1244                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1245                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1246                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1247                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1248                 udelay(40);
1249         }
1250
1251         val = ((bp->phy_addr << 21) | (reg << 16) |
1252                EMAC_MDIO_COMM_COMMAND_READ_22 |
1253                EMAC_MDIO_COMM_START_BUSY);
1254         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1255
1256         for (i = 0; i < 50; i++) {
1257                 udelay(10);
1258
1259                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1260                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1261                         val &= EMAC_MDIO_COMM_DATA;
1262                         break;
1263                 }
1264         }
1265
1266         if (val & EMAC_MDIO_COMM_START_BUSY) {
1267                 BNX2X_ERR("read phy register failed\n");
1268
1269                 *ret_val = 0x0;
1270                 rc = -EBUSY;
1271         } else {
1272                 *ret_val = val;
1273                 rc = 0;
1274         }
1275
1276         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1277
1278                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1279                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1280                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1281         }
1282
1283 /*      DP(NETIF_MSG_HW, "phy_addr 0x%x  reg 0x%x  ret_val 0x%08x\n",
1284            bp->phy_addr, reg, *ret_val); */
1285
1286         return rc;
1287 }
1288
1289 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1290 {
1291         int rc = 0;
1292         u32 tmp, i;
1293         int port = bp->port;
1294         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1295
1296         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1297
1298                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1299                 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1300                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1301                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1302                 udelay(40);
1303         }
1304
1305         /* set clause 45 mode */
1306         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1307         tmp |= EMAC_MDIO_MODE_CLAUSE_45;
1308         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1309
1310         /* address */
1311         tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
1312                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1313                EMAC_MDIO_COMM_START_BUSY);
1314         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1315
1316         for (i = 0; i < 50; i++) {
1317                 udelay(10);
1318
1319                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1320                 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1321                         udelay(5);
1322                         break;
1323                 }
1324         }
1325
1326         if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1327                 BNX2X_ERR("write phy register failed\n");
1328
1329                 rc = -EBUSY;
1330         } else {
1331                 /* data */
1332                 tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
1333                        EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1334                        EMAC_MDIO_COMM_START_BUSY);
1335                 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1336
1337                 for (i = 0; i < 50; i++) {
1338                         udelay(10);
1339
1340                         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1341                         if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1342                                 udelay(5);
1343                                 break;
1344                         }
1345                 }
1346
1347                 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1348                         BNX2X_ERR("write phy register failed\n");
1349
1350                         rc = -EBUSY;
1351                 }
1352         }
1353
1354         /* unset clause 45 mode */
1355         tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1356         tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
1357         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1358
1359         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1360
1361                 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1362                 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1363                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1364         }
1365
1366         return rc;
1367 }
1368
1369 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1370                              u32 *ret_val)
1371 {
1372         int port = bp->port;
1373         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1374         u32 val, i;
1375         int rc = 0;
1376
1377         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1378
1379                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1380                 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1381                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1382                 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1383                 udelay(40);
1384         }
1385
1386         /* set clause 45 mode */
1387         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1388         val |= EMAC_MDIO_MODE_CLAUSE_45;
1389         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1390
1391         /* address */
1392         val = ((bp->phy_addr << 21) | (reg << 16) | addr |
1393                EMAC_MDIO_COMM_COMMAND_ADDRESS |
1394                EMAC_MDIO_COMM_START_BUSY);
1395         EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1396
1397         for (i = 0; i < 50; i++) {
1398                 udelay(10);
1399
1400                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1401                 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1402                         udelay(5);
1403                         break;
1404                 }
1405         }
1406
1407         if (val & EMAC_MDIO_COMM_START_BUSY) {
1408                 BNX2X_ERR("read phy register failed\n");
1409
1410                 *ret_val = 0;
1411                 rc = -EBUSY;
1412         } else {
1413                 /* data */
1414                 val = ((bp->phy_addr << 21) | (reg << 16) |
1415                        EMAC_MDIO_COMM_COMMAND_READ_45 |
1416                        EMAC_MDIO_COMM_START_BUSY);
1417                 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1418
1419                 for (i = 0; i < 50; i++) {
1420                         udelay(10);
1421
1422                         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1423                         if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1424                                 val &= EMAC_MDIO_COMM_DATA;
1425                                 break;
1426                         }
1427                 }
1428
1429                 if (val & EMAC_MDIO_COMM_START_BUSY) {
1430                         BNX2X_ERR("read phy register failed\n");
1431
1432                         val = 0;
1433                         rc = -EBUSY;
1434                 }
1435
1436                 *ret_val = val;
1437         }
1438
1439         /* unset clause 45 mode */
1440         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1441         val &= ~EMAC_MDIO_MODE_CLAUSE_45;
1442         EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1443
1444         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1445
1446                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1447                 val |= EMAC_MDIO_MODE_AUTO_POLL;
1448                 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1449         }
1450
1451         return rc;
1452 }
1453
1454 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1455 {
1456         int i;
1457         u32 rd_val;
1458
1459         might_sleep();
1460         for (i = 0; i < 10; i++) {
1461                 bnx2x_mdio45_write(bp, reg, addr, val);
1462                 msleep(5);
1463                 bnx2x_mdio45_read(bp, reg, addr, &rd_val);
1464                 /* if the read value is not the same as the value we wrote,
1465                    we should write it again */
1466                 if (rd_val == val)
1467                         return 0;
1468         }
1469         BNX2X_ERR("MDIO write in CL45 failed\n");
1470         return -EBUSY;
1471 }
1472
1473 /*
1474  * link managment
1475  */
1476
1477 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1478 {
1479         u32 ld_pause;   /* local driver */
1480         u32 lp_pause;   /* link partner */
1481         u32 pause_result;
1482
1483         bp->flow_ctrl = 0;
1484
1485         /* reolve from gp_status in case of AN complete and not sgmii */
1486         if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1487             (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1488             (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1489             (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1490
1491                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1492                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1493                                   &ld_pause);
1494                 bnx2x_mdio22_read(bp,
1495                         MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1496                                   &lp_pause);
1497                 pause_result = (ld_pause &
1498                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1499                 pause_result |= (lp_pause &
1500                                  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1501                 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1502
1503                 switch (pause_result) {                 /* ASYM P ASYM P */
1504                 case 0xb:                               /*   1  0   1  1 */
1505                         bp->flow_ctrl = FLOW_CTRL_TX;
1506                         break;
1507
1508                 case 0xe:                               /*   1  1   1  0 */
1509                         bp->flow_ctrl = FLOW_CTRL_RX;
1510                         break;
1511
1512                 case 0x5:                               /*   0  1   0  1 */
1513                 case 0x7:                               /*   0  1   1  1 */
1514                 case 0xd:                               /*   1  1   0  1 */
1515                 case 0xf:                               /*   1  1   1  1 */
1516                         bp->flow_ctrl = FLOW_CTRL_BOTH;
1517                         break;
1518
1519                 default:
1520                         break;
1521                 }
1522
1523         } else { /* forced mode */
1524                 switch (bp->req_flow_ctrl) {
1525                 case FLOW_CTRL_AUTO:
1526                         if (bp->dev->mtu <= 4500)
1527                                 bp->flow_ctrl = FLOW_CTRL_BOTH;
1528                         else
1529                                 bp->flow_ctrl = FLOW_CTRL_TX;
1530                         break;
1531
1532                 case FLOW_CTRL_TX:
1533                 case FLOW_CTRL_RX:
1534                 case FLOW_CTRL_BOTH:
1535                         bp->flow_ctrl = bp->req_flow_ctrl;
1536                         break;
1537
1538                 case FLOW_CTRL_NONE:
1539                 default:
1540                         break;
1541                 }
1542         }
1543         DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1544 }
1545
1546 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1547 {
1548         bp->link_status = 0;
1549
1550         if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1551                 DP(NETIF_MSG_LINK, "link up\n");
1552
1553                 bp->link_up = 1;
1554                 bp->link_status |= LINK_STATUS_LINK_UP;
1555
1556                 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1557                         bp->duplex = DUPLEX_FULL;
1558                 else
1559                         bp->duplex = DUPLEX_HALF;
1560
1561                 bnx2x_flow_ctrl_resolve(bp, gp_status);
1562
1563                 switch (gp_status & GP_STATUS_SPEED_MASK) {
1564                 case GP_STATUS_10M:
1565                         bp->line_speed = SPEED_10;
1566                         if (bp->duplex == DUPLEX_FULL)
1567                                 bp->link_status |= LINK_10TFD;
1568                         else
1569                                 bp->link_status |= LINK_10THD;
1570                         break;
1571
1572                 case GP_STATUS_100M:
1573                         bp->line_speed = SPEED_100;
1574                         if (bp->duplex == DUPLEX_FULL)
1575                                 bp->link_status |= LINK_100TXFD;
1576                         else
1577                                 bp->link_status |= LINK_100TXHD;
1578                         break;
1579
1580                 case GP_STATUS_1G:
1581                 case GP_STATUS_1G_KX:
1582                         bp->line_speed = SPEED_1000;
1583                         if (bp->duplex == DUPLEX_FULL)
1584                                 bp->link_status |= LINK_1000TFD;
1585                         else
1586                                 bp->link_status |= LINK_1000THD;
1587                         break;
1588
1589                 case GP_STATUS_2_5G:
1590                         bp->line_speed = SPEED_2500;
1591                         if (bp->duplex == DUPLEX_FULL)
1592                                 bp->link_status |= LINK_2500TFD;
1593                         else
1594                                 bp->link_status |= LINK_2500THD;
1595                         break;
1596
1597                 case GP_STATUS_5G:
1598                 case GP_STATUS_6G:
1599                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1600                                   gp_status);
1601                         break;
1602
1603                 case GP_STATUS_10G_KX4:
1604                 case GP_STATUS_10G_HIG:
1605                 case GP_STATUS_10G_CX4:
1606                         bp->line_speed = SPEED_10000;
1607                         bp->link_status |= LINK_10GTFD;
1608                         break;
1609
1610                 case GP_STATUS_12G_HIG:
1611                         bp->line_speed = SPEED_12000;
1612                         bp->link_status |= LINK_12GTFD;
1613                         break;
1614
1615                 case GP_STATUS_12_5G:
1616                         bp->line_speed = SPEED_12500;
1617                         bp->link_status |= LINK_12_5GTFD;
1618                         break;
1619
1620                 case GP_STATUS_13G:
1621                         bp->line_speed = SPEED_13000;
1622                         bp->link_status |= LINK_13GTFD;
1623                         break;
1624
1625                 case GP_STATUS_15G:
1626                         bp->line_speed = SPEED_15000;
1627                         bp->link_status |= LINK_15GTFD;
1628                         break;
1629
1630                 case GP_STATUS_16G:
1631                         bp->line_speed = SPEED_16000;
1632                         bp->link_status |= LINK_16GTFD;
1633                         break;
1634
1635                 default:
1636                         BNX2X_ERR("link speed unsupported  gp_status 0x%x\n",
1637                                   gp_status);
1638                         break;
1639                 }
1640
1641                 bp->link_status |= LINK_STATUS_SERDES_LINK;
1642
1643                 if (bp->req_autoneg & AUTONEG_SPEED) {
1644                         bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1645
1646                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1647                                 bp->link_status |=
1648                                         LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1649
1650                         if (bp->autoneg & AUTONEG_PARALLEL)
1651                                 bp->link_status |=
1652                                         LINK_STATUS_PARALLEL_DETECTION_USED;
1653                 }
1654
1655                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1656                        bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1657
1658                 if (bp->flow_ctrl & FLOW_CTRL_RX)
1659                        bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1660
1661         } else { /* link_down */
1662                 DP(NETIF_MSG_LINK, "link down\n");
1663
1664                 bp->link_up = 0;
1665
1666                 bp->line_speed = 0;
1667                 bp->duplex = DUPLEX_FULL;
1668                 bp->flow_ctrl = 0;
1669         }
1670
1671         DP(NETIF_MSG_LINK, "gp_status 0x%x  link_up %d\n"
1672            DP_LEVEL "  line_speed %d  duplex %d  flow_ctrl 0x%x"
1673                     "  link_status 0x%x\n",
1674            gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
1675            bp->link_status);
1676 }
1677
1678 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1679 {
1680         int port = bp->port;
1681
1682         /* first reset all status
1683          * we asume only one line will be change at a time */
1684         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1685                        (NIG_XGXS0_LINK_STATUS |
1686                         NIG_SERDES0_LINK_STATUS |
1687                         NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
1688         if (bp->link_up) {
1689                 if (is_10g) {
1690                         /* Disable the 10G link interrupt
1691                          * by writing 1 to the status register
1692                          */
1693                         DP(NETIF_MSG_LINK, "10G XGXS link up\n");
1694                         bnx2x_bits_en(bp,
1695                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1696                                       NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
1697
1698                 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1699                         /* Disable the link interrupt
1700                          * by writing 1 to the relevant lane
1701                          * in the status register
1702                          */
1703                         DP(NETIF_MSG_LINK, "1G XGXS link up\n");
1704                         bnx2x_bits_en(bp,
1705                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1706                                       ((1 << bp->ser_lane) <<
1707                                        NIG_XGXS0_LINK_STATUS_SIZE));
1708
1709                 } else { /* SerDes */
1710                         DP(NETIF_MSG_LINK, "SerDes link up\n");
1711                         /* Disable the link interrupt
1712                          * by writing 1 to the status register
1713                          */
1714                         bnx2x_bits_en(bp,
1715                                       NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1716                                       NIG_SERDES0_LINK_STATUS);
1717                 }
1718
1719         } else { /* link_down */
1720         }
1721 }
1722
1723 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1724 {
1725         u32 ext_phy_type;
1726         u32 ext_phy_addr;
1727         u32 local_phy;
1728         u32 val = 0;
1729         u32 rx_sd, pcs_status;
1730
1731         if (bp->phy_flags & PHY_XGXS_FLAG) {
1732                 local_phy = bp->phy_addr;
1733                 ext_phy_addr = ((bp->ext_phy_config &
1734                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1735                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1736                 bp->phy_addr = (u8)ext_phy_addr;
1737
1738                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1739                 switch (ext_phy_type) {
1740                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1741                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
1742                         val = 1;
1743                         break;
1744
1745                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1746                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
1747                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1748                                           EXT_PHY_OPT_LASI_STATUS, &val);
1749                         DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1750
1751                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1752                                           EXT_PHY_OPT_LASI_STATUS, &val);
1753                         DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1754
1755                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1756                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1757                         val = (rx_sd & 0x1);
1758                         break;
1759
1760                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1761                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
1762                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1763                                           EXT_PHY_OPT_LASI_STATUS, &val);
1764                         DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1765
1766                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1767                                           EXT_PHY_OPT_LASI_STATUS, &val);
1768                         DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1769
1770                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1771                                           EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1772                         bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
1773                                          EXT_PHY_OPT_PCS_STATUS, &pcs_status);
1774                         DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1775                            "  pcs_status 0x%x\n", rx_sd, pcs_status);
1776                         /* link is up if both bit 0 of pmd_rx and
1777                          * bit 0 of pcs_status are set
1778                          */
1779                         val = (rx_sd & pcs_status);
1780                         break;
1781
1782                 default:
1783                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1784                            bp->ext_phy_config);
1785                         val = 0;
1786                         break;
1787                 }
1788                 bp->phy_addr = local_phy;
1789
1790         } else { /* SerDes */
1791                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1792                 switch (ext_phy_type) {
1793                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1794                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
1795                         val = 1;
1796                         break;
1797
1798                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1799                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
1800                         val = 1;
1801                         break;
1802
1803                 default:
1804                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1805                            bp->ext_phy_config);
1806                         val = 0;
1807                         break;
1808                 }
1809         }
1810
1811         return val;
1812 }
1813
1814 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1815 {
1816         int port = bp->port;
1817         u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1818                                NIG_REG_INGRESS_BMAC0_MEM;
1819         u32 wb_write[2];
1820         u32 val;
1821
1822         DP(NETIF_MSG_LINK, "enableing BigMAC\n");
1823         /* reset and unreset the BigMac */
1824         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1825                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1826         msleep(5);
1827         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1828                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1829
1830         /* enable access for bmac registers */
1831         NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
1832
1833         /* XGXS control */
1834         wb_write[0] = 0x3c;
1835         wb_write[1] = 0;
1836         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1837                     wb_write, 2);
1838
1839         /* tx MAC SA */
1840         wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
1841                        (bp->dev->dev_addr[3] << 16) |
1842                        (bp->dev->dev_addr[4] << 8) |
1843                         bp->dev->dev_addr[5]);
1844         wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
1845                         bp->dev->dev_addr[1]);
1846         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
1847                     wb_write, 2);
1848
1849         /* tx control */
1850         val = 0xc0;
1851         if (bp->flow_ctrl & FLOW_CTRL_TX)
1852                 val |= 0x800000;
1853         wb_write[0] = val;
1854         wb_write[1] = 0;
1855         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
1856
1857         /* set tx mtu */
1858         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
1859         wb_write[1] = 0;
1860         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
1861
1862         /* mac control */
1863         val = 0x3;
1864         if (is_lb) {
1865                 val |= 0x4;
1866                 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
1867         }
1868         wb_write[0] = val;
1869         wb_write[1] = 0;
1870         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
1871                     wb_write, 2);
1872
1873         /* rx control set to don't strip crc */
1874         val = 0x14;
1875         if (bp->flow_ctrl & FLOW_CTRL_RX)
1876                 val |= 0x20;
1877         wb_write[0] = val;
1878         wb_write[1] = 0;
1879         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
1880
1881         /* set rx mtu */
1882         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1883         wb_write[1] = 0;
1884         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
1885
1886         /* set cnt max size */
1887         wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
1888         wb_write[1] = 0;
1889         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
1890                     wb_write, 2);
1891
1892         /* configure safc */
1893         wb_write[0] = 0x1000200;
1894         wb_write[1] = 0;
1895         REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1896                     wb_write, 2);
1897
1898         /* fix for emulation */
1899         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1900                 wb_write[0] = 0xf000;
1901                 wb_write[1] = 0;
1902                 REG_WR_DMAE(bp,
1903                             bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1904                             wb_write, 2);
1905         }
1906
1907         /* reset old bmac stats */
1908         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
1909
1910         NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
1911
1912         /* select XGXS */
1913         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
1914         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
1915
1916         /* disable the NIG in/out to the emac */
1917         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
1918         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
1919         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
1920
1921         /* enable the NIG in/out to the bmac */
1922         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
1923
1924         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
1925         val = 0;
1926         if (bp->flow_ctrl & FLOW_CTRL_TX)
1927                 val = 1;
1928         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
1929         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
1930
1931         bp->phy_flags |= PHY_BMAC_FLAG;
1932
1933         bp->stats_state = STATS_STATE_ENABLE;
1934 }
1935
1936 static void bnx2x_emac_enable(struct bnx2x *bp)
1937 {
1938         int port = bp->port;
1939         u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1940         u32 val;
1941         int timeout;
1942
1943         DP(NETIF_MSG_LINK, "enableing EMAC\n");
1944         /* reset and unreset the emac core */
1945         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1946                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1947         msleep(5);
1948         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1949                (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1950
1951         /* enable emac and not bmac */
1952         NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1953
1954         /* for paladium */
1955         if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1956                 /* Use lane 1 (of lanes 0-3) */
1957                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1958                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1959         }
1960         /* for fpga */
1961         else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
1962                 /* Use lane 1 (of lanes 0-3) */
1963                 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1964                 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1965         }
1966         /* ASIC */
1967         else {
1968                 if (bp->phy_flags & PHY_XGXS_FLAG) {
1969                         DP(NETIF_MSG_LINK, "XGXS\n");
1970                         /* select the master lanes (out of 0-3) */
1971                         NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
1972                                bp->ser_lane);
1973                         /* select XGXS */
1974                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1975
1976                 } else { /* SerDes */
1977                         DP(NETIF_MSG_LINK, "SerDes\n");
1978                         /* select SerDes */
1979                         NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1980                 }
1981         }
1982
1983         /* enable emac */
1984         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
1985
1986         /* init emac - use read-modify-write */
1987         /* self clear reset */
1988         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1989         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
1990
1991         timeout = 200;
1992         while (val & EMAC_MODE_RESET) {
1993                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1994                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1995                 if (!timeout) {
1996                         BNX2X_ERR("EMAC timeout!\n");
1997                         break;
1998                 }
1999                 timeout--;
2000         }
2001
2002         /* reset tx part */
2003         EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2004
2005         timeout = 200;
2006         while (val & EMAC_TX_MODE_RESET) {
2007                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2008                 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2009                 if (!timeout) {
2010                         BNX2X_ERR("EMAC timeout!\n");
2011                         break;
2012                 }
2013                 timeout--;
2014         }
2015
2016         if (CHIP_REV_IS_SLOW(bp)) {
2017                 /* config GMII mode */
2018                 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2019                 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2020
2021         } else { /* ASIC */
2022                 /* pause enable/disable */
2023                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2024                                EMAC_RX_MODE_FLOW_EN);
2025                 if (bp->flow_ctrl & FLOW_CTRL_RX)
2026                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2027                                       EMAC_RX_MODE_FLOW_EN);
2028
2029                 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2030                                EMAC_TX_MODE_EXT_PAUSE_EN);
2031                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2032                         bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2033                                       EMAC_TX_MODE_EXT_PAUSE_EN);
2034         }
2035
2036         /* KEEP_VLAN_TAG, promiscous */
2037         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2038         val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2039         EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2040
2041         /* identify magic packets */
2042         val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2043         EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2044
2045         /* enable emac for jumbo packets */
2046         EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2047                 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2048                  (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2049
2050         /* strip CRC */
2051         NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2052
2053         val = ((bp->dev->dev_addr[0] << 8) |
2054                 bp->dev->dev_addr[1]);
2055         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2056
2057         val = ((bp->dev->dev_addr[2] << 24) |
2058                (bp->dev->dev_addr[3] << 16) |
2059                (bp->dev->dev_addr[4] << 8) |
2060                 bp->dev->dev_addr[5]);
2061         EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2062
2063         /* disable the NIG in/out to the bmac */
2064         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2065         NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2066         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2067
2068         /* enable the NIG in/out to the emac */
2069         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2070         val = 0;
2071         if (bp->flow_ctrl & FLOW_CTRL_TX)
2072                 val = 1;
2073         NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2074         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2075
2076         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2077                 /* take the BigMac out of reset */
2078                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2079                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2080
2081                 /* enable access for bmac registers */
2082                 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2083         }
2084
2085         bp->phy_flags |= PHY_EMAC_FLAG;
2086
2087         bp->stats_state = STATS_STATE_ENABLE;
2088 }
2089
2090 static void bnx2x_emac_program(struct bnx2x *bp)
2091 {
2092         u16 mode = 0;
2093         int port = bp->port;
2094
2095         DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2096         bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2097                        (EMAC_MODE_25G_MODE |
2098                         EMAC_MODE_PORT_MII_10M |
2099                         EMAC_MODE_HALF_DUPLEX));
2100         switch (bp->line_speed) {
2101         case SPEED_10:
2102                 mode |= EMAC_MODE_PORT_MII_10M;
2103                 break;
2104
2105         case SPEED_100:
2106                 mode |= EMAC_MODE_PORT_MII;
2107                 break;
2108
2109         case SPEED_1000:
2110                 mode |= EMAC_MODE_PORT_GMII;
2111                 break;
2112
2113         case SPEED_2500:
2114                 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2115                 break;
2116
2117         default:
2118                 /* 10G not valid for EMAC */
2119                 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2120                 break;
2121         }
2122
2123         if (bp->duplex == DUPLEX_HALF)
2124                 mode |= EMAC_MODE_HALF_DUPLEX;
2125         bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2126                       mode);
2127
2128         bnx2x_leds_set(bp, bp->line_speed);
2129 }
2130
2131 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2132 {
2133         u32 lp_up2;
2134         u32 tx_driver;
2135
2136         /* read precomp */
2137         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2138         bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2139
2140         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2141         bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2142
2143         /* bits [10:7] at lp_up2, positioned at [15:12] */
2144         lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2145                    MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2146                   MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2147
2148         if ((lp_up2 != 0) &&
2149             (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2150                 /* replace tx_driver bits [15:12] */
2151                 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2152                 tx_driver |= lp_up2;
2153                 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2154         }
2155 }
2156
2157 static void bnx2x_pbf_update(struct bnx2x *bp)
2158 {
2159         int port = bp->port;
2160         u32 init_crd, crd;
2161         u32 count = 1000;
2162         u32 pause = 0;
2163
2164
2165         /* disable port */
2166         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2167
2168         /* wait for init credit */
2169         init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2170         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2171         DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
2172
2173         while ((init_crd != crd) && count) {
2174                 msleep(5);
2175
2176                 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2177                 count--;
2178         }
2179         crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2180         if (init_crd != crd)
2181                 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2182
2183         if (bp->flow_ctrl & FLOW_CTRL_RX)
2184                 pause = 1;
2185         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2186         if (pause) {
2187                 /* update threshold */
2188                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2189                 /* update init credit */
2190                 init_crd = 778;         /* (800-18-4) */
2191
2192         } else {
2193                 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2194
2195                 /* update threshold */
2196                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2197                 /* update init credit */
2198                 switch (bp->line_speed) {
2199                 case SPEED_10:
2200                 case SPEED_100:
2201                 case SPEED_1000:
2202                         init_crd = thresh + 55 - 22;
2203                         break;
2204
2205                 case SPEED_2500:
2206                         init_crd = thresh + 138 - 22;
2207                         break;
2208
2209                 case SPEED_10000:
2210                         init_crd = thresh + 553 - 22;
2211                         break;
2212
2213                 default:
2214                         BNX2X_ERR("Invalid line_speed 0x%x\n",
2215                                   bp->line_speed);
2216                         break;
2217                 }
2218         }
2219         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2220         DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2221            bp->line_speed, init_crd);
2222
2223         /* probe the credit changes */
2224         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2225         msleep(5);
2226         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2227
2228         /* enable port */
2229         REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2230 }
2231
2232 static void bnx2x_update_mng(struct bnx2x *bp)
2233 {
2234         if (!nomcp)
2235                 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
2236                          bp->link_status);
2237 }
2238
2239 static void bnx2x_link_report(struct bnx2x *bp)
2240 {
2241         if (bp->link_up) {
2242                 netif_carrier_on(bp->dev);
2243                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2244
2245                 printk("%d Mbps ", bp->line_speed);
2246
2247                 if (bp->duplex == DUPLEX_FULL)
2248                         printk("full duplex");
2249                 else
2250                         printk("half duplex");
2251
2252                 if (bp->flow_ctrl) {
2253                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
2254                                 printk(", receive ");
2255                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
2256                                         printk("& transmit ");
2257                         } else {
2258                                 printk(", transmit ");
2259                         }
2260                         printk("flow control ON");
2261                 }
2262                 printk("\n");
2263
2264         } else { /* link_down */
2265                 netif_carrier_off(bp->dev);
2266                 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2267         }
2268 }
2269
2270 static void bnx2x_link_up(struct bnx2x *bp)
2271 {
2272         int port = bp->port;
2273
2274         /* PBF - link up */
2275         bnx2x_pbf_update(bp);
2276
2277         /* disable drain */
2278         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2279
2280         /* update shared memory */
2281         bnx2x_update_mng(bp);
2282
2283         /* indicate link up */
2284         bnx2x_link_report(bp);
2285 }
2286
2287 static void bnx2x_link_down(struct bnx2x *bp)
2288 {
2289         int port = bp->port;
2290
2291         /* notify stats */
2292         if (bp->stats_state != STATS_STATE_DISABLE) {
2293                 bp->stats_state = STATS_STATE_STOP;
2294                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2295         }
2296
2297         /* indicate link down */
2298         bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2299
2300         /* reset BigMac */
2301         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2302                (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2303
2304         /* ignore drain flag interrupt */
2305         /* activate nig drain */
2306         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2307
2308         /* update shared memory */
2309         bnx2x_update_mng(bp);
2310
2311         /* indicate link down */
2312         bnx2x_link_report(bp);
2313 }
2314
2315 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2316
2317 /* This function is called upon link interrupt */
2318 static void bnx2x_link_update(struct bnx2x *bp)
2319 {
2320         u32 gp_status;
2321         int port = bp->port;
2322         int i;
2323         int link_10g;
2324
2325         DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x,"
2326            " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2327            " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG),
2328            REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2329            REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2330            REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2331            REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2332            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2333            REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2334         );
2335
2336         might_sleep();
2337         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2338         /* avoid fast toggling */
2339         for (i = 0 ; i < 10 ; i++) {
2340                 msleep(10);
2341                 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2342                                   &gp_status);
2343         }
2344
2345         bnx2x_link_settings_status(bp, gp_status);
2346
2347         /* anything 10 and over uses the bmac */
2348         link_10g = ((bp->line_speed >= SPEED_10000) &&
2349                     (bp->line_speed <= SPEED_16000));
2350
2351         bnx2x_link_int_ack(bp, link_10g);
2352
2353         /* link is up only if both local phy and external phy are up */
2354         if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
2355                 if (link_10g) {
2356                         bnx2x_bmac_enable(bp, 0);
2357                         bnx2x_leds_set(bp, SPEED_10000);
2358
2359                 } else {
2360                         bnx2x_emac_enable(bp);
2361                         bnx2x_emac_program(bp);
2362
2363                         /* AN complete? */
2364                         if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2365                                 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2366                                         bnx2x_set_sgmii_tx_driver(bp);
2367                         }
2368                 }
2369                 bnx2x_link_up(bp);
2370
2371         } else { /* link down */
2372                 bnx2x_leds_unset(bp);
2373                 bnx2x_link_down(bp);
2374         }
2375
2376         bnx2x_init_mac_stats(bp);
2377 }
2378
2379 /*
2380  * Init service functions
2381  */
2382
2383 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2384 {
2385         u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2386                                         (bp->phy_addr + bp->ser_lane) : 0;
2387
2388         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2389         bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2390 }
2391
2392 static void bnx2x_set_master_ln(struct bnx2x *bp)
2393 {
2394         u32 new_master_ln;
2395
2396         /* set the master_ln for AN */
2397         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2398         bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2399                           &new_master_ln);
2400         bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2401                            (new_master_ln | bp->ser_lane));
2402 }
2403
2404 static void bnx2x_reset_unicore(struct bnx2x *bp)
2405 {
2406         u32 mii_control;
2407         int i;
2408
2409         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2410         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2411         /* reset the unicore */
2412         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2413                            (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2414
2415         /* wait for the reset to self clear */
2416         for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2417                 udelay(5);
2418
2419                 /* the reset erased the previous bank value */
2420                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2421                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2422                                   &mii_control);
2423
2424                 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2425                         udelay(5);
2426                         return;
2427                 }
2428         }
2429
2430         BNX2X_ERR("BUG! unicore is still in reset!\n");
2431 }
2432
2433 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2434 {
2435         /* Each two bits represents a lane number:
2436            No swap is 0123 => 0x1b no need to enable the swap */
2437
2438         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2439         if (bp->rx_lane_swap != 0x1b) {
2440                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2441                                    (bp->rx_lane_swap |
2442                                     MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2443                                    MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2444         } else {
2445                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2446         }
2447
2448         if (bp->tx_lane_swap != 0x1b) {
2449                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2450                                    (bp->tx_lane_swap |
2451                                     MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2452         } else {
2453                 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2454         }
2455 }
2456
2457 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2458 {
2459         u32 control2;
2460
2461         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2462         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2463                           &control2);
2464
2465         if (bp->autoneg & AUTONEG_PARALLEL) {
2466                 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2467         } else {
2468                 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2469         }
2470         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2471                            control2);
2472
2473         if (bp->phy_flags & PHY_XGXS_FLAG) {
2474                 DP(NETIF_MSG_LINK, "XGXS\n");
2475                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2476
2477                 bnx2x_mdio22_write(bp,
2478                                    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2479                                MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2480
2481                 bnx2x_mdio22_read(bp,
2482                                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2483                                   &control2);
2484
2485                 if (bp->autoneg & AUTONEG_PARALLEL) {
2486                         control2 |=
2487                     MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2488                 } else {
2489                         control2 &=
2490                    ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2491                 }
2492                 bnx2x_mdio22_write(bp,
2493                                  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2494                                    control2);
2495         }
2496 }
2497
2498 static void bnx2x_set_autoneg(struct bnx2x *bp)
2499 {
2500         u32 reg_val;
2501
2502         /* CL37 Autoneg */
2503         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2504         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2505         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2506             (bp->autoneg & AUTONEG_CL37)) {
2507                 /* CL37 Autoneg Enabled */
2508                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2509         } else {
2510                 /* CL37 Autoneg Disabled */
2511                 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2512                              MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2513         }
2514         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2515
2516         /* Enable/Disable Autodetection */
2517         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2518         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2519         reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2520
2521         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2522             (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2523                 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2524         } else {
2525                 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2526         }
2527         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2528
2529         /* Enable TetonII and BAM autoneg */
2530         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2531         bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2532                           &reg_val);
2533         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2534             (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2535                 /* Enable BAM aneg Mode and TetonII aneg Mode */
2536                 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2537                             MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2538         } else {
2539                 /* TetonII and BAM Autoneg Disabled */
2540                 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2541                              MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2542         }
2543         bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2544                            reg_val);
2545
2546         /* Enable Clause 73 Aneg */
2547         if ((bp->req_autoneg & AUTONEG_SPEED) &&
2548             (bp->autoneg & AUTONEG_CL73)) {
2549                 /* Enable BAM Station Manager */
2550                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2551                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2552                                    (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2553                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2554                         MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2555
2556                 /* Merge CL73 and CL37 aneg resolution */
2557                 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2558                                   &reg_val);
2559                 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2560                                    (reg_val |
2561                         MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2562
2563                 /* Set the CL73 AN speed */
2564                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2565                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2566                 /* In the SerDes we support only the 1G.
2567                    In the XGXS we support the 10G KX4
2568                    but we currently do not support the KR */
2569                 if (bp->phy_flags & PHY_XGXS_FLAG) {
2570                         DP(NETIF_MSG_LINK, "XGXS\n");
2571                         /* 10G KX4 */
2572                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2573                 } else {
2574                         DP(NETIF_MSG_LINK, "SerDes\n");
2575                         /* 1000M KX */
2576                         reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2577                 }
2578                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2579
2580                 /* CL73 Autoneg Enabled */
2581                 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2582         } else {
2583                 /* CL73 Autoneg Disabled */
2584                 reg_val = 0;
2585         }
2586         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2587         bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2588 }
2589
2590 /* program SerDes, forced speed */
2591 static void bnx2x_program_serdes(struct bnx2x *bp)
2592 {
2593         u32 reg_val;
2594
2595         /* program duplex, disable autoneg */
2596         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2597         bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2598         reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2599                      MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2600         if (bp->req_duplex == DUPLEX_FULL)
2601                 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2602         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2603
2604         /* program speed
2605            - needed only if the speed is greater than 1G (2.5G or 10G) */
2606         if (bp->req_line_speed > SPEED_1000) {
2607                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2608                 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2609                 /* clearing the speed value before setting the right speed */
2610                 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2611                 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2612                             MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2613                 if (bp->req_line_speed == SPEED_10000)
2614                         reg_val |=
2615                                 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2616                 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2617         }
2618 }
2619
2620 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2621 {
2622         u32 val = 0;
2623
2624         /* configure the 48 bits for BAM AN */
2625         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2626
2627         /* set extended capabilities */
2628         if (bp->advertising & ADVERTISED_2500baseT_Full)
2629                 val |= MDIO_OVER_1G_UP1_2_5G;
2630         if (bp->advertising & ADVERTISED_10000baseT_Full)
2631                 val |= MDIO_OVER_1G_UP1_10G;
2632         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
2633
2634         bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
2635 }
2636
2637 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2638 {
2639         u32 an_adv;
2640
2641         /* for AN, we are always publishing full duplex */
2642         an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2643
2644         /* set pause */
2645         switch (bp->pause_mode) {
2646         case PAUSE_SYMMETRIC:
2647                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2648                 break;
2649         case PAUSE_ASYMMETRIC:
2650                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2651                 break;
2652         case PAUSE_BOTH:
2653                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2654                 break;
2655         case PAUSE_NONE:
2656                 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
2657                 break;
2658         }
2659
2660         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2661         bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
2662 }
2663
2664 static void bnx2x_restart_autoneg(struct bnx2x *bp)
2665 {
2666         if (bp->autoneg & AUTONEG_CL73) {
2667                 /* enable and restart clause 73 aneg */
2668                 u32 an_ctrl;
2669
2670                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2671                 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2672                                   &an_ctrl);
2673                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2674                                    (an_ctrl |
2675                                     MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2676                                 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2677
2678         } else {
2679                 /* Enable and restart BAM/CL37 aneg */
2680                 u32 mii_control;
2681
2682                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2683                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2684                                   &mii_control);
2685                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2686                                    (mii_control |
2687                                     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2688                                     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2689         }
2690 }
2691
2692 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2693 {
2694         u32 control1;
2695
2696         /* in SGMII mode, the unicore is always slave */
2697         MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2698         bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2699                           &control1);
2700         control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2701         /* set sgmii mode (and not fiber) */
2702         control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2703                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2704                       MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2705         bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2706                            control1);
2707
2708         /* if forced speed */
2709         if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2710                 /* set speed, disable autoneg */
2711                 u32 mii_control;
2712
2713                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2714                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2715                                   &mii_control);
2716                 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2717                                MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
2718                                  MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
2719
2720                 switch (bp->req_line_speed) {
2721                 case SPEED_100:
2722                         mii_control |=
2723                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
2724                         break;
2725                 case SPEED_1000:
2726                         mii_control |=
2727                                 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
2728                         break;
2729                 case SPEED_10:
2730                         /* there is nothing to set for 10M */
2731                         break;
2732                 default:
2733                         /* invalid speed for SGMII */
2734                         DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
2735                            bp->req_line_speed);
2736                         break;
2737                 }
2738
2739                 /* setting the full duplex */
2740                 if (bp->req_duplex == DUPLEX_FULL)
2741                         mii_control |=
2742                                 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2743                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2744                                    mii_control);
2745
2746         } else { /* AN mode */
2747                 /* enable and restart AN */
2748                 bnx2x_restart_autoneg(bp);
2749         }
2750 }
2751
2752 static void bnx2x_link_int_enable(struct bnx2x *bp)
2753 {
2754         int port = bp->port;
2755
2756         /* setting the status to report on link up
2757            for either XGXS or SerDes */
2758         bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2759                        (NIG_XGXS0_LINK_STATUS |
2760                         NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
2761                         NIG_SERDES0_LINK_STATUS));
2762
2763         if (bp->phy_flags & PHY_XGXS_FLAG) {
2764                 /* TBD -
2765                  * in force mode (not AN) we can enable just the relevant
2766                  * interrupt
2767                  * Even in AN we might enable only one according to the AN
2768                  * speed mask
2769                  */
2770                 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2771                               (NIG_MASK_XGXS0_LINK_STATUS |
2772                                NIG_MASK_XGXS0_LINK10G));
2773                 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
2774
2775         } else { /* SerDes */
2776                 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2777                               NIG_MASK_SERDES0_LINK_STATUS);
2778                 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
2779         }
2780 }
2781
2782 static void bnx2x_ext_phy_init(struct bnx2x *bp)
2783 {
2784         int port = bp->port;
2785         u32 ext_phy_type;
2786         u32 ext_phy_addr;
2787         u32 local_phy;
2788
2789         if (bp->phy_flags & PHY_XGXS_FLAG) {
2790                 local_phy = bp->phy_addr;
2791                 ext_phy_addr = ((bp->ext_phy_config &
2792                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2793                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2794
2795                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2796                 switch (ext_phy_type) {
2797                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2798                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
2799                         break;
2800
2801                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2802                         DP(NETIF_MSG_LINK, "XGXS 8705\n");
2803                         bnx2x_bits_en(bp,
2804                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2805                                       NIG_MASK_MI_INT);
2806                         DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2807
2808                         bp->phy_addr = ext_phy_type;
2809                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2810                                             EXT_PHY_OPT_PMD_MISC_CNTL,
2811                                             0x8288);
2812                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2813                                             EXT_PHY_OPT_PHY_IDENTIFIER,
2814                                             0x7fbf);
2815                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2816                                             EXT_PHY_OPT_CMU_PLL_BYPASS,
2817                                             0x0100);
2818                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
2819                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
2820                         break;
2821
2822                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2823                         DP(NETIF_MSG_LINK, "XGXS 8706\n");
2824                         bnx2x_bits_en(bp,
2825                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2826                                       NIG_MASK_MI_INT);
2827                         DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2828
2829                         bp->phy_addr = ext_phy_type;
2830                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2831                                             EXT_PHY_OPT_PMD_DIGITAL_CNT,
2832                                             0x400);
2833                         bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2834                                             EXT_PHY_OPT_LASI_CNTL, 0x1);
2835                         break;
2836
2837                 default:
2838                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2839                            bp->ext_phy_config);
2840                         break;
2841                 }
2842                 bp->phy_addr = local_phy;
2843
2844         } else { /* SerDes */
2845 /*              ext_phy_addr = ((bp->ext_phy_config &
2846                                  PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2847                                 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2848 */
2849                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2850                 switch (ext_phy_type) {
2851                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2852                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2853                         break;
2854
2855                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2856                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2857                         bnx2x_bits_en(bp,
2858                                       NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2859                                       NIG_MASK_MI_INT);
2860                         DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2861                         break;
2862
2863                 default:
2864                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2865                            bp->ext_phy_config);
2866                         break;
2867                 }
2868         }
2869 }
2870
2871 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2872 {
2873         u32 ext_phy_type;
2874         u32 ext_phy_addr;
2875         u32 local_phy;
2876
2877         if (bp->phy_flags & PHY_XGXS_FLAG) {
2878                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2879                 switch (ext_phy_type) {
2880                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2881                         DP(NETIF_MSG_LINK, "XGXS Direct\n");
2882                         break;
2883
2884                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2885                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2886                         DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
2887                         local_phy = bp->phy_addr;
2888                         ext_phy_addr = ((bp->ext_phy_config &
2889                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2890                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2891                         bp->phy_addr = (u8)ext_phy_addr;
2892                         bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2893                                            EXT_PHY_OPT_CNTL, 0xa040);
2894                         bp->phy_addr = local_phy;
2895                         break;
2896
2897                 default:
2898                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2899                            bp->ext_phy_config);
2900                         break;
2901                 }
2902
2903         } else { /* SerDes */
2904                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2905                 switch (ext_phy_type) {
2906                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2907                         DP(NETIF_MSG_LINK, "SerDes Direct\n");
2908                         break;
2909
2910                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2911                         DP(NETIF_MSG_LINK, "SerDes 5482\n");
2912                         break;
2913
2914                 default:
2915                         DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2916                            bp->ext_phy_config);
2917                         break;
2918                 }
2919         }
2920 }
2921
2922 static void bnx2x_link_initialize(struct bnx2x *bp)
2923 {
2924         int port = bp->port;
2925
2926         /* disable attentions */
2927         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2928                        (NIG_MASK_XGXS0_LINK_STATUS |
2929                         NIG_MASK_XGXS0_LINK10G |
2930                         NIG_MASK_SERDES0_LINK_STATUS |
2931                         NIG_MASK_MI_INT));
2932
2933         bnx2x_ext_phy_reset(bp);
2934
2935         bnx2x_set_aer_mmd(bp);
2936
2937         if (bp->phy_flags & PHY_XGXS_FLAG)
2938                 bnx2x_set_master_ln(bp);
2939
2940         /* reset the SerDes and wait for reset bit return low */
2941         bnx2x_reset_unicore(bp);
2942
2943         bnx2x_set_aer_mmd(bp);
2944
2945         /* setting the masterLn_def again after the reset */
2946         if (bp->phy_flags & PHY_XGXS_FLAG) {
2947                 bnx2x_set_master_ln(bp);
2948                 bnx2x_set_swap_lanes(bp);
2949         }
2950
2951         /* Set Parallel Detect */
2952         if (bp->req_autoneg & AUTONEG_SPEED)
2953                 bnx2x_set_parallel_detection(bp);
2954
2955         if (bp->phy_flags & PHY_XGXS_FLAG) {
2956                 if (bp->req_line_speed &&
2957                     bp->req_line_speed < SPEED_1000) {
2958                         bp->phy_flags |= PHY_SGMII_FLAG;
2959                 } else {
2960                         bp->phy_flags &= ~PHY_SGMII_FLAG;
2961                 }
2962         }
2963
2964         if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
2965                 u16 bank, rx_eq;
2966
2967                 rx_eq = ((bp->serdes_config &
2968                           PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2969                          PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2970
2971                 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
2972                 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2973                             bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
2974                         MDIO_SET_REG_BANK(bp, bank);
2975                         bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
2976                                            ((rx_eq &
2977                                 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2978                                 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2979                 }
2980
2981                 /* forced speed requested? */
2982                 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2983                         DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2984
2985                         /* disable autoneg */
2986                         bnx2x_set_autoneg(bp);
2987
2988                         /* program speed and duplex */
2989                         bnx2x_program_serdes(bp);
2990
2991                 } else { /* AN_mode */
2992                         DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2993
2994                         /* AN enabled */
2995                         bnx2x_set_brcm_cl37_advertisment(bp);
2996
2997                         /* program duplex & pause advertisment (for aneg) */
2998                         bnx2x_set_ieee_aneg_advertisment(bp);
2999
3000                         /* enable autoneg */
3001                         bnx2x_set_autoneg(bp);
3002
3003                         /* enalbe and restart AN */
3004                         bnx2x_restart_autoneg(bp);
3005                 }
3006
3007         } else { /* SGMII mode */
3008                 DP(NETIF_MSG_LINK, "SGMII\n");
3009
3010                 bnx2x_initialize_sgmii_process(bp);
3011         }
3012
3013         /* enable the interrupt */
3014         bnx2x_link_int_enable(bp);
3015
3016         /* init ext phy and enable link state int */
3017         bnx2x_ext_phy_init(bp);
3018 }
3019
3020 static void bnx2x_phy_deassert(struct bnx2x *bp)
3021 {
3022         int port = bp->port;
3023         u32 val;
3024
3025         if (bp->phy_flags & PHY_XGXS_FLAG) {
3026                 DP(NETIF_MSG_LINK, "XGXS\n");
3027                 val = XGXS_RESET_BITS;
3028
3029         } else { /* SerDes */
3030                 DP(NETIF_MSG_LINK, "SerDes\n");
3031                 val = SERDES_RESET_BITS;
3032         }
3033
3034         val = val << (port*16);
3035
3036         /* reset and unreset the SerDes/XGXS */
3037         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3038         msleep(5);
3039         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3040 }
3041
3042 static int bnx2x_phy_init(struct bnx2x *bp)
3043 {
3044         DP(NETIF_MSG_LINK, "started\n");
3045         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3046                 bp->phy_flags |= PHY_EMAC_FLAG;
3047                 bp->link_up = 1;
3048                 bp->line_speed = SPEED_10000;
3049                 bp->duplex = DUPLEX_FULL;
3050                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3051                 bnx2x_emac_enable(bp);
3052                 bnx2x_link_report(bp);
3053                 return 0;
3054
3055         } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3056                 bp->phy_flags |= PHY_BMAC_FLAG;
3057                 bp->link_up = 1;
3058                 bp->line_speed = SPEED_10000;
3059                 bp->duplex = DUPLEX_FULL;
3060                 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3061                 bnx2x_bmac_enable(bp, 0);
3062                 bnx2x_link_report(bp);
3063                 return 0;
3064
3065         } else {
3066                 bnx2x_phy_deassert(bp);
3067                 bnx2x_link_initialize(bp);
3068         }
3069
3070         return 0;
3071 }
3072
3073 static void bnx2x_link_reset(struct bnx2x *bp)
3074 {
3075         int port = bp->port;
3076
3077         /* disable attentions */
3078         bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3079                        (NIG_MASK_XGXS0_LINK_STATUS |
3080                         NIG_MASK_XGXS0_LINK10G |
3081                         NIG_MASK_SERDES0_LINK_STATUS |
3082                         NIG_MASK_MI_INT));
3083
3084         bnx2x_ext_phy_reset(bp);
3085
3086         /* reset the SerDes/XGXS */
3087         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3088                (0x1ff << (port*16)));
3089
3090         /* reset EMAC / BMAC and disable NIG interfaces */
3091         NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3092         NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3093
3094         NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3095         NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3096         NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3097
3098         NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3099 }
3100
3101 #ifdef BNX2X_XGXS_LB
3102 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3103 {
3104         int port = bp->port;
3105
3106         if (is_10g) {
3107                 u32 md_devad;
3108
3109                 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3110
3111                 /* change the uni_phy_addr in the nig */
3112                 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3113                        &md_devad);
3114                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3115
3116                 /* change the aer mmd */
3117                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3118                 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3119
3120                 /* config combo IEEE0 control reg for loopback */
3121                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3122                 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3123                                    0x6041);
3124
3125                 /* set aer mmd back */
3126                 bnx2x_set_aer_mmd(bp);
3127
3128                 /* and md_devad */
3129                 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3130
3131         } else {
3132                 u32 mii_control;
3133
3134                 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3135
3136                 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3137                 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3138                                   &mii_control);
3139                 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3140                                    (mii_control |
3141                                     MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3142         }
3143 }
3144 #endif
3145
3146 /* end of PHY/MAC */
3147
3148 /* slow path */
3149
3150 /*
3151  * General service functions
3152  */
3153
3154 /* the slow path queue is odd since completions arrive on the fastpath ring */
3155 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3156                          u32 data_hi, u32 data_lo, int common)
3157 {
3158         int port = bp->port;
3159
3160         DP(NETIF_MSG_TIMER,
3161            "spe (%x:%x)  command %x  hw_cid %x  data (%x:%x)  left %x\n",
3162            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3163            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3164            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3165
3166 #ifdef BNX2X_STOP_ON_ERROR
3167         if (unlikely(bp->panic))
3168                 return -EIO;
3169 #endif
3170
3171         spin_lock(&bp->spq_lock);
3172
3173         if (!bp->spq_left) {
3174                 BNX2X_ERR("BUG! SPQ ring full!\n");
3175                 spin_unlock(&bp->spq_lock);
3176                 bnx2x_panic();
3177                 return -EBUSY;
3178         }
3179         /* CID needs port number to be encoded int it */
3180         bp->spq_prod_bd->hdr.conn_and_cmd_data =
3181                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3182                                      HW_CID(bp, cid)));
3183         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3184         if (common)
3185                 bp->spq_prod_bd->hdr.type |=
3186                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3187
3188         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3189         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3190
3191         bp->spq_left--;
3192
3193         if (bp->spq_prod_bd == bp->spq_last_bd) {
3194                 bp->spq_prod_bd = bp->spq;
3195                 bp->spq_prod_idx = 0;
3196                 DP(NETIF_MSG_TIMER, "end of spq\n");
3197
3198         } else {
3199                 bp->spq_prod_bd++;
3200                 bp->spq_prod_idx++;
3201         }
3202
3203         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
3204                bp->spq_prod_idx);
3205
3206         spin_unlock(&bp->spq_lock);
3207         return 0;
3208 }
3209
3210 /* acquire split MCP access lock register */
3211 static int bnx2x_lock_alr(struct bnx2x *bp)
3212 {
3213         int rc = 0;
3214         u32 i, j, val;
3215
3216         might_sleep();
3217         i = 100;
3218         for (j = 0; j < i*10; j++) {
3219                 val = (1UL << 31);
3220                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3221                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3222                 if (val & (1L << 31))
3223                         break;
3224
3225                 msleep(5);
3226         }
3227
3228         if (!(val & (1L << 31))) {
3229                 BNX2X_ERR("Cannot acquire nvram interface\n");
3230
3231                 rc = -EBUSY;
3232         }
3233
3234         return rc;
3235 }
3236
3237 /* Release split MCP access lock register */
3238 static void bnx2x_unlock_alr(struct bnx2x *bp)
3239 {
3240         u32 val = 0;
3241
3242         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3243 }
3244
3245 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3246 {
3247         struct host_def_status_block *def_sb = bp->def_status_blk;
3248         u16 rc = 0;
3249
3250         barrier(); /* status block is written to by the chip */
3251
3252         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3253                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3254                 rc |= 1;
3255         }
3256         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
3257                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
3258                 rc |= 2;
3259         }
3260         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
3261                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
3262                 rc |= 4;
3263         }
3264         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
3265                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
3266                 rc |= 8;
3267         }
3268         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
3269                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
3270                 rc |= 16;
3271         }
3272         return rc;
3273 }
3274
3275 /*
3276  * slow path service functions
3277  */
3278
3279 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3280 {
3281         int port = bp->port;
3282         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3283         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3284                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3285         u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3286                                    NIG_REG_MASK_INTERRUPT_PORT0;
3287
3288         if (~bp->aeu_mask & (asserted & 0xff))
3289                 BNX2X_ERR("IGU ERROR\n");
3290         if (bp->attn_state & asserted)
3291                 BNX2X_ERR("IGU ERROR\n");
3292
3293         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3294            bp->aeu_mask, asserted);
3295         bp->aeu_mask &= ~(asserted & 0xff);
3296         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
3297
3298         REG_WR(bp, aeu_addr, bp->aeu_mask);
3299
3300         bp->attn_state |= asserted;
3301
3302         if (asserted & ATTN_HARD_WIRED_MASK) {
3303                 if (asserted & ATTN_NIG_FOR_FUNC) {
3304                         u32 nig_status_port;
3305                         u32 nig_int_addr = port ?
3306                                         NIG_REG_STATUS_INTERRUPT_PORT1 :
3307                                         NIG_REG_STATUS_INTERRUPT_PORT0;
3308
3309                         bp->nig_mask = REG_RD(bp, nig_mask_addr);
3310                         REG_WR(bp, nig_mask_addr, 0);
3311
3312                         nig_status_port = REG_RD(bp, nig_int_addr);
3313                         bnx2x_link_update(bp);
3314
3315                         /* handle unicore attn? */
3316                 }
3317                 if (asserted & ATTN_SW_TIMER_4_FUNC)
3318                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3319
3320                 if (asserted & GPIO_2_FUNC)
3321                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3322
3323                 if (asserted & GPIO_3_FUNC)
3324                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3325
3326                 if (asserted & GPIO_4_FUNC)
3327                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3328
3329                 if (port == 0) {
3330                         if (asserted & ATTN_GENERAL_ATTN_1) {
3331                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3332                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3333                         }
3334                         if (asserted & ATTN_GENERAL_ATTN_2) {
3335                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3336                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3337                         }
3338                         if (asserted & ATTN_GENERAL_ATTN_3) {
3339                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3340                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3341                         }
3342                 } else {
3343                         if (asserted & ATTN_GENERAL_ATTN_4) {
3344                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3345                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3346                         }
3347                         if (asserted & ATTN_GENERAL_ATTN_5) {
3348                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3349                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3350                         }
3351                         if (asserted & ATTN_GENERAL_ATTN_6) {
3352                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3353                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3354                         }
3355                 }
3356
3357         } /* if hardwired */
3358
3359         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
3360            asserted, BAR_IGU_INTMEM + igu_addr);
3361         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
3362
3363         /* now set back the mask */
3364         if (asserted & ATTN_NIG_FOR_FUNC)
3365                 REG_WR(bp, nig_mask_addr, bp->nig_mask);
3366 }
3367
3368 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3369 {
3370         int port = bp->port;
3371         int index;
3372         struct attn_route attn;
3373         struct attn_route group_mask;
3374         u32 reg_addr;
3375         u32 val;
3376
3377         /* need to take HW lock because MCP or other port might also
3378            try to handle this event */
3379         bnx2x_lock_alr(bp);
3380
3381         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3382         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3383         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3384         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3385         DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
3386
3387         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3388                 if (deasserted & (1 << index)) {
3389                         group_mask = bp->attn_group[index];
3390
3391                         DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3392                            (unsigned long long)group_mask.sig[0]);
3393
3394                         if (attn.sig[3] & group_mask.sig[3] &
3395                             EVEREST_GEN_ATTN_IN_USE_MASK) {
3396
3397                                 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
3398
3399                                         BNX2X_ERR("MC assert!\n");
3400                                         bnx2x_panic();
3401
3402                                 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3403
3404                                         BNX2X_ERR("MCP assert!\n");
3405                                         REG_WR(bp,
3406                                              MISC_REG_AEU_GENERAL_ATTN_11, 0);
3407                                         bnx2x_mc_assert(bp);
3408
3409                                 } else {
3410                                         BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3411                                 }
3412                         }
3413
3414                         if (attn.sig[1] & group_mask.sig[1] &
3415                             BNX2X_DOORQ_ASSERT) {
3416
3417                                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3418                                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3419                                 /* DORQ discard attention */
3420                                 if (val & 0x2)
3421                                         BNX2X_ERR("FATAL error from DORQ\n");
3422                         }
3423
3424                         if (attn.sig[2] & group_mask.sig[2] &
3425                             AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3426
3427                                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3428                                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3429                                 /* CFC error attention */
3430                                 if (val & 0x2)
3431                                         BNX2X_ERR("FATAL error from CFC\n");
3432                         }
3433
3434                         if (attn.sig[2] & group_mask.sig[2] &
3435                             AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3436
3437                                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3438                                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3439                                 /* RQ_USDMDP_FIFO_OVERFLOW */
3440                                 if (val & 0x18000)
3441                                         BNX2X_ERR("FATAL error from PXP\n");
3442                         }
3443
3444                         if (attn.sig[3] & group_mask.sig[3] &
3445                             EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3446
3447                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3448                                        0x7ff);
3449                                 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3450                                    attn.sig[3]);
3451                         }
3452
3453                         if ((attn.sig[0] & group_mask.sig[0] &
3454                                                 HW_INTERRUT_ASSERT_SET_0) ||
3455                             (attn.sig[1] & group_mask.sig[1] &
3456                                                 HW_INTERRUT_ASSERT_SET_1) ||
3457                             (attn.sig[2] & group_mask.sig[2] &
3458                                                 HW_INTERRUT_ASSERT_SET_2))
3459                                 BNX2X_ERR("FATAL HW block attention\n");
3460
3461                         if ((attn.sig[0] & group_mask.sig[0] &
3462                                                 HW_PRTY_ASSERT_SET_0) ||
3463                             (attn.sig[1] & group_mask.sig[1] &
3464                                                 HW_PRTY_ASSERT_SET_1) ||
3465                             (attn.sig[2] & group_mask.sig[2] &
3466                                                 HW_PRTY_ASSERT_SET_2))
3467                                 BNX2X_ERR("FATAL HW block parity atention\n");
3468                 }
3469         }
3470
3471         bnx2x_unlock_alr(bp);
3472
3473         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
3474
3475         val = ~deasserted;
3476 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
3477            val, BAR_IGU_INTMEM + reg_addr); */
3478         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
3479
3480         if (bp->aeu_mask & (deasserted & 0xff))
3481                 BNX2X_ERR("IGU BUG\n");
3482         if (~bp->attn_state & deasserted)
3483                 BNX2X_ERR("IGU BUG\n");
3484
3485         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3486                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3487
3488         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
3489         bp->aeu_mask |= (deasserted & 0xff);
3490
3491         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
3492         REG_WR(bp, reg_addr, bp->aeu_mask);
3493
3494         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3495         bp->attn_state &= ~deasserted;
3496         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3497 }
3498
3499 static void bnx2x_attn_int(struct bnx2x *bp)
3500 {
3501         /* read local copy of bits */
3502         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
3503         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
3504         u32 attn_state = bp->attn_state;
3505
3506         /* look for changed bits */
3507         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3508         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3509
3510         DP(NETIF_MSG_HW,
3511            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3512            attn_bits, attn_ack, asserted, deasserted);
3513
3514         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3515                 BNX2X_ERR("bad attention state\n");
3516
3517         /* handle bits that were raised */
3518         if (asserted)
3519                 bnx2x_attn_int_asserted(bp, asserted);
3520
3521         if (deasserted)
3522                 bnx2x_attn_int_deasserted(bp, deasserted);
3523 }
3524
3525 static void bnx2x_sp_task(struct work_struct *work)
3526 {
3527         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
3528         u16 status;
3529
3530         /* Return here if interrupt is disabled */
3531         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3532                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3533                 return;
3534         }
3535
3536         status = bnx2x_update_dsb_idx(bp);
3537         if (status == 0)
3538                 BNX2X_ERR("spurious slowpath interrupt!\n");
3539
3540         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3541
3542         if (status & 0x1) {
3543                 /* HW attentions */
3544                 bnx2x_attn_int(bp);
3545         }
3546
3547         /* CStorm events: query_stats, cfc delete ramrods */
3548         if (status & 0x2)
3549                 bp->stat_pending = 0;
3550
3551         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
3552                      IGU_INT_NOP, 1);
3553         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3554                      IGU_INT_NOP, 1);
3555         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3556                      IGU_INT_NOP, 1);
3557         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3558                      IGU_INT_NOP, 1);
3559         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3560                      IGU_INT_ENABLE, 1);
3561 }
3562
3563 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3564 {
3565         struct net_device *dev = dev_instance;
3566         struct bnx2x *bp = netdev_priv(dev);
3567
3568         /* Return here if interrupt is disabled */
3569         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3570                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3571                 return IRQ_HANDLED;
3572         }
3573
3574         bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3575
3576 #ifdef BNX2X_STOP_ON_ERROR
3577         if (unlikely(bp->panic))
3578                 return IRQ_HANDLED;
3579 #endif
3580
3581         schedule_work(&bp->sp_task);
3582
3583         return IRQ_HANDLED;
3584 }
3585
3586 /* end of slow path */
3587
3588 /* Statistics */
3589
3590 /****************************************************************************
3591 * Macros
3592 ****************************************************************************/
3593
3594 #define UPDATE_STAT(s, t) \
3595         do { \
3596                 estats->t += new->s - old->s; \
3597                 old->s = new->s; \
3598         } while (0)
3599
3600 /* sum[hi:lo] += add[hi:lo] */
3601 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3602         do { \
3603                 s_lo += a_lo; \
3604                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
3605         } while (0)
3606
3607 /* difference = minuend - subtrahend */
3608 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3609         do { \
3610                 if (m_lo < s_lo) {      /* underflow */ \
3611                         d_hi = m_hi - s_hi; \
3612                         if (d_hi > 0) { /* we can 'loan' 1 */ \
3613                                 d_hi--; \
3614                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3615                         } else {        /* m_hi <= s_hi */ \
3616                                 d_hi = 0; \
3617                                 d_lo = 0; \
3618                         } \
3619                 } else {                /* m_lo >= s_lo */ \
3620                         if (m_hi < s_hi) { \
3621                             d_hi = 0; \
3622                             d_lo = 0; \
3623                         } else {        /* m_hi >= s_hi */ \
3624                             d_hi = m_hi - s_hi; \
3625                             d_lo = m_lo - s_lo; \
3626                         } \
3627                 } \
3628         } while (0)
3629
3630 /* minuend -= subtrahend */
3631 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3632         do { \
3633                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3634         } while (0)
3635
3636 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
3637         do { \
3638                 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
3639                         diff.lo, new->s_lo, old->s_lo); \
3640                 old->s_hi = new->s_hi; \
3641                 old->s_lo = new->s_lo; \
3642                 ADD_64(estats->t_hi, diff.hi, \
3643                        estats->t_lo, diff.lo); \
3644         } while (0)
3645
3646 /* sum[hi:lo] += add */
3647 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3648         do { \
3649                 s_lo += a; \
3650                 s_hi += (s_lo < a) ? 1 : 0; \
3651         } while (0)
3652
3653 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
3654         do { \
3655                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
3656         } while (0)
3657
3658 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
3659         do { \
3660                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3661                 old_tclient->s = le32_to_cpu(tclient->s); \
3662                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
3663         } while (0)
3664
3665 /*
3666  * General service functions
3667  */
3668
3669 static inline long bnx2x_hilo(u32 *hiref)
3670 {
3671         u32 lo = *(hiref + 1);
3672 #if (BITS_PER_LONG == 64)
3673         u32 hi = *hiref;
3674
3675         return HILO_U64(hi, lo);
3676 #else
3677         return lo;
3678 #endif
3679 }
3680
3681 /*
3682  * Init service functions
3683  */
3684
3685 static void bnx2x_init_mac_stats(struct bnx2x *bp)
3686 {
3687         struct dmae_command *dmae;
3688         int port = bp->port;
3689         int loader_idx = port * 8;
3690         u32 opcode;
3691         u32 mac_addr;
3692
3693         bp->executer_idx = 0;
3694         if (bp->fw_mb) {
3695                 /* MCP */
3696                 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3697                           DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3698 #ifdef __BIG_ENDIAN
3699                           DMAE_CMD_ENDIANITY_B_DW_SWAP |
3700 #else
3701                           DMAE_CMD_ENDIANITY_DW_SWAP |
3702 #endif
3703                           (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3704
3705                 if (bp->link_up)
3706                         opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
3707
3708                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3709                 dmae->opcode = opcode;
3710                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
3711                                            sizeof(u32));
3712                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
3713                                            sizeof(u32));
3714                 dmae->dst_addr_lo = bp->fw_mb >> 2;
3715                 dmae->dst_addr_hi = 0;
3716                 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
3717                              sizeof(u32)) >> 2;
3718                 if (bp->link_up) {
3719                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720                         dmae->comp_addr_hi = 0;
3721                         dmae->comp_val = 1;
3722                 } else {
3723                         dmae->comp_addr_lo = 0;
3724                         dmae->comp_addr_hi = 0;
3725                         dmae->comp_val = 0;
3726                 }
3727         }
3728
3729         if (!bp->link_up) {
3730                 /* no need to collect statistics in link down */
3731                 return;
3732         }
3733
3734         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3735                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3736                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3737 #ifdef __BIG_ENDIAN
3738                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3739 #else
3740                   DMAE_CMD_ENDIANITY_DW_SWAP |
3741 #endif
3742                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3743
3744         if (bp->phy_flags & PHY_BMAC_FLAG) {
3745
3746                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3747                                    NIG_REG_INGRESS_BMAC0_MEM);
3748
3749                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3750                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3751                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3752                 dmae->opcode = opcode;
3753                 dmae->src_addr_lo = (mac_addr +
3754                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3755                 dmae->src_addr_hi = 0;
3756                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3757                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3758                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3759                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3760                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3761                 dmae->comp_addr_hi = 0;
3762                 dmae->comp_val = 1;
3763
3764                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3765                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3766                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3767                 dmae->opcode = opcode;
3768                 dmae->src_addr_lo = (mac_addr +
3769                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3770                 dmae->src_addr_hi = 0;
3771                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3772                                         offsetof(struct bmac_stats, rx_gr64));
3773                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3774                                         offsetof(struct bmac_stats, rx_gr64));
3775                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3776                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3777                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3778                 dmae->comp_addr_hi = 0;
3779                 dmae->comp_val = 1;
3780
3781         } else if (bp->phy_flags & PHY_EMAC_FLAG) {
3782
3783                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3784
3785                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3786                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3787                 dmae->opcode = opcode;
3788                 dmae->src_addr_lo = (mac_addr +
3789                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3790                 dmae->src_addr_hi = 0;
3791                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3792                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3793                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3794                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3795                 dmae->comp_addr_hi = 0;
3796                 dmae->comp_val = 1;
3797
3798                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3799                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3800                 dmae->opcode = opcode;
3801                 dmae->src_addr_lo = (mac_addr +
3802                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3803                 dmae->src_addr_hi = 0;
3804                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3805                                            offsetof(struct emac_stats,
3806                                                     rx_falsecarriererrors));
3807                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3808                                            offsetof(struct emac_stats,
3809                                                     rx_falsecarriererrors));
3810                 dmae->len = 1;
3811                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3812                 dmae->comp_addr_hi = 0;
3813                 dmae->comp_val = 1;
3814
3815                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3816                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3817                 dmae->opcode = opcode;
3818                 dmae->src_addr_lo = (mac_addr +
3819                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3820                 dmae->src_addr_hi = 0;
3821                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3822                                            offsetof(struct emac_stats,
3823                                                     tx_ifhcoutoctets));
3824                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3825                                            offsetof(struct emac_stats,
3826                                                     tx_ifhcoutoctets));
3827                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3828                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3829                 dmae->comp_addr_hi = 0;
3830                 dmae->comp_val = 1;
3831         }
3832
3833         /* NIG */
3834         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3836                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3837                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3838 #ifdef __BIG_ENDIAN
3839                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3840 #else
3841                         DMAE_CMD_ENDIANITY_DW_SWAP |
3842 #endif
3843                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3844         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3845                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3846         dmae->src_addr_hi = 0;
3847         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
3848         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
3849         dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
3850         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
3851                                     offsetof(struct nig_stats, done));
3852         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
3853                                     offsetof(struct nig_stats, done));
3854         dmae->comp_val = 0xffffffff;
3855 }
3856
3857 static void bnx2x_init_stats(struct bnx2x *bp)
3858 {
3859         int port = bp->port;
3860
3861         bp->stats_state = STATS_STATE_DISABLE;
3862         bp->executer_idx = 0;
3863
3864         bp->old_brb_discard = REG_RD(bp,
3865                                      NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3866
3867         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
3868         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3869         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3870
3871         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
3872         REG_WR(bp, BAR_XSTRORM_INTMEM +
3873                XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3874
3875         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
3876         REG_WR(bp, BAR_TSTRORM_INTMEM +
3877                TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3878
3879         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
3880         REG_WR(bp, BAR_CSTRORM_INTMEM +
3881                CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3882
3883         REG_WR(bp, BAR_XSTRORM_INTMEM +
3884                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3885                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3886         REG_WR(bp, BAR_XSTRORM_INTMEM +
3887                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3888                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3889
3890         REG_WR(bp, BAR_TSTRORM_INTMEM +
3891                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3892                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3893         REG_WR(bp, BAR_TSTRORM_INTMEM +
3894                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3895                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3896 }
3897
3898 static void bnx2x_stop_stats(struct bnx2x *bp)
3899 {
3900         might_sleep();
3901         if (bp->stats_state != STATS_STATE_DISABLE) {
3902                 int timeout = 10;
3903
3904                 bp->stats_state = STATS_STATE_STOP;
3905                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
3906
3907                 while (bp->stats_state != STATS_STATE_DISABLE) {
3908                         if (!timeout) {
3909                                 BNX2X_ERR("timeout wating for stats stop\n");
3910                                 break;
3911                         }
3912                         timeout--;
3913                         msleep(100);
3914                 }
3915         }
3916         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
3917 }
3918
3919 /*
3920  * Statistics service functions
3921  */
3922
3923 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
3924 {
3925         struct regp diff;
3926         struct regp sum;
3927         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
3928         struct bmac_stats *old = &bp->old_bmac;
3929         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3930
3931         sum.hi = 0;
3932         sum.lo = 0;
3933
3934         UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
3935                       tx_gtbyt.lo, total_bytes_transmitted_lo);
3936
3937         UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
3938                       tx_gtmca.lo, total_multicast_packets_transmitted_lo);
3939         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3940
3941         UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
3942                       tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
3943         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3944
3945         UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
3946                       tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
3947         SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
3948                estats->total_unicast_packets_transmitted_lo, sum.lo);
3949
3950         UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
3951         UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
3952         UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
3953         UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
3954         UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
3955         UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
3956         UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
3957         UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
3958         UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
3959         UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
3960         UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
3961
3962         UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
3963         UPDATE_STAT(rx_grund.lo, runt_packets_received);
3964         UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
3965         UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
3966         UPDATE_STAT(rx_grxcf.lo, control_frames_received);
3967         /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
3968         UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
3969         UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
3970
3971         UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
3972                       rx_grerb.lo, stat_IfHCInBadOctets_lo);
3973         UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
3974                       tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
3975         UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
3976         /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
3977         estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
3978 }
3979
3980 static void bnx2x_update_emac_stats(struct bnx2x *bp)
3981 {
3982         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
3983         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3984
3985         UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
3986                                              total_bytes_transmitted_lo);
3987         UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
3988                                         total_unicast_packets_transmitted_hi,
3989                                         total_unicast_packets_transmitted_lo);
3990         UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
3991                                       total_multicast_packets_transmitted_hi,
3992                                       total_multicast_packets_transmitted_lo);
3993         UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
3994                                       total_broadcast_packets_transmitted_hi,
3995                                       total_broadcast_packets_transmitted_lo);
3996
3997         estats->pause_xon_frames_transmitted += new->tx_outxonsent;
3998         estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
3999         estats->single_collision_transmit_frames +=
4000                                 new->tx_dot3statssinglecollisionframes;
4001         estats->multiple_collision_transmit_frames +=
4002                                 new->tx_dot3statsmultiplecollisionframes;
4003         estats->late_collision_frames += new->tx_dot3statslatecollisions;
4004         estats->excessive_collision_frames +=
4005                                 new->tx_dot3statsexcessivecollisions;
4006         estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4007         estats->frames_transmitted_65_127_bytes +=
4008                                 new->tx_etherstatspkts65octetsto127octets;
4009         estats->frames_transmitted_128_255_bytes +=
4010                                 new->tx_etherstatspkts128octetsto255octets;
4011         estats->frames_transmitted_256_511_bytes +=
4012                                 new->tx_etherstatspkts256octetsto511octets;
4013         estats->frames_transmitted_512_1023_bytes +=
4014                                 new->tx_etherstatspkts512octetsto1023octets;
4015         estats->frames_transmitted_1024_1522_bytes +=
4016                                 new->tx_etherstatspkts1024octetsto1522octet;
4017         estats->frames_transmitted_1523_9022_bytes +=
4018                                 new->tx_etherstatspktsover1522octets;
4019
4020         estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4021         estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4022         estats->false_carrier_detections += new->rx_falsecarriererrors;
4023         estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4024         estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4025         estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4026         estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4027         estats->control_frames_received += new->rx_maccontrolframesreceived;
4028         estats->error_runt_packets_received += new->rx_etherstatsfragments;
4029         estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4030
4031         UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4032                                                stat_IfHCInBadOctets_lo);
4033         UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4034                                                 stat_IfHCOutBadOctets_lo);
4035         estats->stat_Dot3statsInternalMacTransmitErrors +=
4036                                 new->tx_dot3statsinternalmactransmiterrors;
4037         estats->stat_Dot3StatsCarrierSenseErrors +=
4038                                 new->rx_dot3statscarriersenseerrors;
4039         estats->stat_Dot3StatsDeferredTransmissions +=
4040                                 new->tx_dot3statsdeferredtransmissions;
4041         estats->stat_FlowControlDone += new->tx_flowcontroldone;
4042         estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4043 }
4044
4045 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4046 {
4047         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4048         struct tstorm_common_stats *tstats = &stats->tstorm_common;
4049         struct tstorm_per_client_stats *tclient =
4050                                                 &tstats->client_statistics[0];
4051         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4052         struct xstorm_common_stats *xstats = &stats->xstorm_common;
4053         struct nig_stats *nstats = bnx2x_sp(bp, nig);
4054         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4055         u32 diff;
4056
4057         /* are DMAE stats valid? */
4058         if (nstats->done != 0xffffffff) {
4059                 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4060                 return -1;
4061         }
4062
4063         /* are storm stats valid? */
4064         if (tstats->done.hi != 0xffffffff) {
4065                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4066                 return -2;
4067         }
4068         if (xstats->done.hi != 0xffffffff) {
4069                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4070                 return -3;
4071         }
4072
4073         estats->total_bytes_received_hi =
4074         estats->valid_bytes_received_hi =
4075                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
4076         estats->total_bytes_received_lo =
4077         estats->valid_bytes_received_lo =
4078                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
4079         ADD_64(estats->total_bytes_received_hi,
4080                le32_to_cpu(tclient->rcv_error_bytes.hi),
4081                estats->total_bytes_received_lo,
4082                le32_to_cpu(tclient->rcv_error_bytes.lo));
4083
4084         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085                                         total_unicast_packets_received_hi,
4086                                         total_unicast_packets_received_lo);
4087         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4088                                         total_multicast_packets_received_hi,
4089                                         total_multicast_packets_received_lo);
4090         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4091                                         total_broadcast_packets_received_hi,
4092                                         total_broadcast_packets_received_lo);
4093
4094         estats->frames_received_64_bytes = MAC_STX_NA;
4095         estats->frames_received_65_127_bytes = MAC_STX_NA;
4096         estats->frames_received_128_255_bytes = MAC_STX_NA;
4097         estats->frames_received_256_511_bytes = MAC_STX_NA;
4098         estats->frames_received_512_1023_bytes = MAC_STX_NA;
4099         estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4100         estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4101
4102         estats->x_total_sent_bytes_hi =
4103                                 le32_to_cpu(xstats->total_sent_bytes.hi);
4104         estats->x_total_sent_bytes_lo =
4105                                 le32_to_cpu(xstats->total_sent_bytes.lo);
4106         estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4107
4108         estats->t_rcv_unicast_bytes_hi =
4109                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4110         estats->t_rcv_unicast_bytes_lo =
4111                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4112         estats->t_rcv_broadcast_bytes_hi =
4113                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4114         estats->t_rcv_broadcast_bytes_lo =
4115                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4116         estats->t_rcv_multicast_bytes_hi =
4117                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4118         estats->t_rcv_multicast_bytes_lo =
4119                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4120         estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4121
4122         estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4123         estats->packets_too_big_discard =
4124                                 le32_to_cpu(tclient->packets_too_big_discard);
4125         estats->jabber_packets_received = estats->packets_too_big_discard +
4126                                           estats->stat_Dot3statsFramesTooLong;
4127         estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4128         estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4129         estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4130         estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4131         estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4132         estats->brb_truncate_discard =
4133                                 le32_to_cpu(tstats->brb_truncate_discard);
4134
4135         estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4136         bp->old_brb_discard = nstats->brb_discard;
4137
4138         estats->brb_packet = nstats->brb_packet;
4139         estats->brb_truncate = nstats->brb_truncate;
4140         estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4141         estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4142         estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4143         estats->mng_discard = nstats->mng_discard;
4144         estats->mng_octet_inp = nstats->mng_octet_inp;
4145         estats->mng_octet_out = nstats->mng_octet_out;
4146         estats->mng_packet_inp = nstats->mng_packet_inp;
4147         estats->mng_packet_out = nstats->mng_packet_out;
4148         estats->pbf_octets = nstats->pbf_octets;
4149         estats->pbf_packet = nstats->pbf_packet;
4150         estats->safc_inp = nstats->safc_inp;
4151
4152         xstats->done.hi = 0;
4153         tstats->done.hi = 0;
4154         nstats->done = 0;
4155
4156         return 0;
4157 }
4158
4159 static void bnx2x_update_net_stats(struct bnx2x *bp)
4160 {
4161         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4162         struct net_device_stats *nstats = &bp->dev->stats;
4163
4164         nstats->rx_packets =
4165                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4166                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4167                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4168
4169         nstats->tx_packets =
4170                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4171                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4172                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4173
4174         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4175
4176         nstats->tx_bytes =
4177                 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4178
4179         nstats->rx_dropped = estats->checksum_discard +
4180                                    estats->mac_discard;
4181         nstats->tx_dropped = 0;
4182
4183         nstats->multicast =
4184                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4185
4186         nstats->collisions =
4187                 estats->single_collision_transmit_frames +
4188                 estats->multiple_collision_transmit_frames +
4189                 estats->late_collision_frames +
4190                 estats->excessive_collision_frames;
4191
4192         nstats->rx_length_errors = estats->runt_packets_received +
4193                                    estats->jabber_packets_received;
4194         nstats->rx_over_errors = estats->no_buff_discard;
4195         nstats->rx_crc_errors = estats->crc_receive_errors;
4196         nstats->rx_frame_errors = estats->alignment_errors;
4197         nstats->rx_fifo_errors = estats->brb_discard +
4198                                        estats->brb_truncate_discard;
4199         nstats->rx_missed_errors = estats->xxoverflow_discard;
4200
4201         nstats->rx_errors = nstats->rx_length_errors +
4202                             nstats->rx_over_errors +
4203                             nstats->rx_crc_errors +
4204                             nstats->rx_frame_errors +
4205                             nstats->rx_fifo_errors;
4206
4207         nstats->tx_aborted_errors = estats->late_collision_frames +
4208                                           estats->excessive_collision_frames;
4209         nstats->tx_carrier_errors = estats->false_carrier_detections;
4210         nstats->tx_fifo_errors = 0;
4211         nstats->tx_heartbeat_errors = 0;
4212         nstats->tx_window_errors = 0;
4213
4214         nstats->tx_errors = nstats->tx_aborted_errors +
4215                             nstats->tx_carrier_errors;
4216
4217         estats->mac_stx_start = ++estats->mac_stx_end;
4218 }
4219
4220 static void bnx2x_update_stats(struct bnx2x *bp)
4221 {
4222         int i;
4223
4224         if (!bnx2x_update_storm_stats(bp)) {
4225
4226                 if (bp->phy_flags & PHY_BMAC_FLAG) {
4227                         bnx2x_update_bmac_stats(bp);
4228
4229                 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4230                         bnx2x_update_emac_stats(bp);
4231
4232                 } else { /* unreached */
4233                         BNX2X_ERR("no MAC active\n");
4234                         return;
4235                 }
4236
4237                 bnx2x_update_net_stats(bp);
4238         }
4239
4240         if (bp->msglevel & NETIF_MSG_TIMER) {
4241                 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4242                 struct net_device_stats *nstats = &bp->dev->stats;
4243
4244                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4245                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4246                                   "  tx pkt (%lx)\n",
4247                        bnx2x_tx_avail(bp->fp),
4248                        *bp->fp->tx_cons_sb, nstats->tx_packets);
4249                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4250                                   "  rx pkt (%lx)\n",
4251                        (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
4252                        *bp->fp->rx_cons_sb, nstats->rx_packets);
4253                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
4254                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
4255                        estats->driver_xoff, estats->brb_discard);
4256                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4257                         "packets_too_big_discard %u  no_buff_discard %u  "
4258                         "mac_discard %u  mac_filter_discard %u  "
4259                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4260                         "ttl0_discard %u\n",
4261                        estats->checksum_discard,
4262                        estats->packets_too_big_discard,
4263                        estats->no_buff_discard, estats->mac_discard,
4264                        estats->mac_filter_discard, estats->xxoverflow_discard,
4265                        estats->brb_truncate_discard, estats->ttl0_discard);
4266
4267                 for_each_queue(bp, i) {
4268                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4269                                bnx2x_fp(bp, i, tx_pkt),
4270                                bnx2x_fp(bp, i, rx_pkt),
4271                                bnx2x_fp(bp, i, rx_calls));
4272                 }
4273         }
4274
4275         if (bp->state != BNX2X_STATE_OPEN) {
4276                 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
4277                 return;
4278         }
4279
4280 #ifdef BNX2X_STOP_ON_ERROR
4281         if (unlikely(bp->panic))
4282                 return;
4283 #endif
4284
4285         /* loader */
4286         if (bp->executer_idx) {
4287                 struct dmae_command *dmae = &bp->dmae;
4288                 int port = bp->port;
4289                 int loader_idx = port * 8;
4290
4291                 memset(dmae, 0, sizeof(struct dmae_command));
4292
4293                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4294                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4295                                 DMAE_CMD_DST_RESET |
4296 #ifdef __BIG_ENDIAN
4297                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4298 #else
4299                                 DMAE_CMD_ENDIANITY_DW_SWAP |
4300 #endif
4301                                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4302                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
4303                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
4304                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
4305                                      sizeof(struct dmae_command) *
4306                                      (loader_idx + 1)) >> 2;
4307                 dmae->dst_addr_hi = 0;
4308                 dmae->len = sizeof(struct dmae_command) >> 2;
4309                 dmae->len--;    /* !!! for A0/1 only */
4310                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
4311                 dmae->comp_addr_hi = 0;
4312                 dmae->comp_val = 1;
4313
4314                 bnx2x_post_dmae(bp, dmae, loader_idx);
4315         }
4316
4317         if (bp->stats_state != STATS_STATE_ENABLE) {
4318                 bp->stats_state = STATS_STATE_DISABLE;
4319                 return;
4320         }
4321
4322         if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
4323                 /* stats ramrod has it's own slot on the spe */
4324                 bp->spq_left++;
4325                 bp->stat_pending = 1;
4326         }
4327 }
4328
4329 static void bnx2x_timer(unsigned long data)
4330 {
4331         struct bnx2x *bp = (struct bnx2x *) data;
4332
4333         if (!netif_running(bp->dev))
4334                 return;
4335
4336         if (atomic_read(&bp->intr_sem) != 0)
4337                 goto bnx2x_restart_timer;
4338
4339         if (poll) {
4340                 struct bnx2x_fastpath *fp = &bp->fp[0];
4341                 int rc;
4342
4343                 bnx2x_tx_int(fp, 1000);
4344                 rc = bnx2x_rx_int(fp, 1000);
4345         }
4346
4347         if (!nomcp && (bp->bc_ver >= 0x040003)) {
4348                 int port = bp->port;
4349                 u32 drv_pulse;
4350                 u32 mcp_pulse;
4351
4352                 ++bp->fw_drv_pulse_wr_seq;
4353                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4354                 /* TBD - add SYSTEM_TIME */
4355                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4356                 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse);
4357
4358                 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) &
4359                              MCP_PULSE_SEQ_MASK);
4360                 /* The delta between driver pulse and mcp response
4361                  * should be 1 (before mcp response) or 0 (after mcp response)
4362                  */
4363                 if ((drv_pulse != mcp_pulse) &&
4364                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4365                         /* someone lost a heartbeat... */
4366                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4367                                   drv_pulse, mcp_pulse);
4368                 }
4369         }
4370
4371         if (bp->stats_state == STATS_STATE_DISABLE)
4372                 goto bnx2x_restart_timer;
4373
4374         bnx2x_update_stats(bp);
4375
4376 bnx2x_restart_timer:
4377         mod_timer(&bp->timer, jiffies + bp->current_interval);
4378 }
4379
4380 /* end of Statistics */
4381
4382 /* nic init */
4383
4384 /*
4385  * nic init service functions
4386  */
4387
4388 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4389                           dma_addr_t mapping, int id)
4390 {
4391         int port = bp->port;
4392         u64 section;
4393         int index;
4394
4395         /* USTORM */
4396         section = ((u64)mapping) + offsetof(struct host_status_block,
4397                                             u_status_block);
4398         sb->u_status_block.status_block_id = id;
4399
4400         REG_WR(bp, BAR_USTRORM_INTMEM +
4401                USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4402         REG_WR(bp, BAR_USTRORM_INTMEM +
4403                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4404                U64_HI(section));
4405
4406         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4407                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4408                          USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4409
4410         /* CSTORM */
4411         section = ((u64)mapping) + offsetof(struct host_status_block,
4412                                             c_status_block);
4413         sb->c_status_block.status_block_id = id;
4414
4415         REG_WR(bp, BAR_CSTRORM_INTMEM +
4416                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4417         REG_WR(bp, BAR_CSTRORM_INTMEM +
4418                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4419                U64_HI(section));
4420
4421         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4422                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4423                          CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4424
4425         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4426 }
4427
4428 static void bnx2x_init_def_sb(struct bnx2x *bp,
4429                               struct host_def_status_block *def_sb,
4430                               dma_addr_t mapping, int id)
4431 {
4432         int port = bp->port;
4433         int index, val, reg_offset;
4434         u64 section;
4435
4436         /* ATTN */
4437         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4438                                             atten_status_block);
4439         def_sb->atten_status_block.status_block_id = id;
4440
4441         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4442                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4443
4444         for (index = 0; index < 3; index++) {
4445                 bp->attn_group[index].sig[0] = REG_RD(bp,
4446                                                      reg_offset + 0x10*index);
4447                 bp->attn_group[index].sig[1] = REG_RD(bp,
4448                                                reg_offset + 0x4 + 0x10*index);
4449                 bp->attn_group[index].sig[2] = REG_RD(bp,
4450                                                reg_offset + 0x8 + 0x10*index);
4451                 bp->attn_group[index].sig[3] = REG_RD(bp,
4452                                                reg_offset + 0xc + 0x10*index);
4453         }
4454
4455         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4456                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
4457
4458         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4459                              HC_REG_ATTN_MSG0_ADDR_L);
4460
4461         REG_WR(bp, reg_offset, U64_LO(section));
4462         REG_WR(bp, reg_offset + 4, U64_HI(section));
4463
4464         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4465
4466         val = REG_RD(bp, reg_offset);
4467         val |= id;
4468         REG_WR(bp, reg_offset, val);
4469
4470         /* USTORM */
4471         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4472                                             u_def_status_block);
4473         def_sb->u_def_status_block.status_block_id = id;
4474
4475         REG_WR(bp, BAR_USTRORM_INTMEM +
4476                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4477         REG_WR(bp, BAR_USTRORM_INTMEM +
4478                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4479                U64_HI(section));
4480         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
4481                BNX2X_BTR);
4482
4483         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4484                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4485                          USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4486
4487         /* CSTORM */
4488         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4489                                             c_def_status_block);
4490         def_sb->c_def_status_block.status_block_id = id;
4491
4492         REG_WR(bp, BAR_CSTRORM_INTMEM +
4493                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4494         REG_WR(bp, BAR_CSTRORM_INTMEM +
4495                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4496                U64_HI(section));
4497         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
4498                BNX2X_BTR);
4499
4500         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4501                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4502                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4503
4504         /* TSTORM */
4505         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4506                                             t_def_status_block);
4507         def_sb->t_def_status_block.status_block_id = id;
4508
4509         REG_WR(bp, BAR_TSTRORM_INTMEM +
4510                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4511         REG_WR(bp, BAR_TSTRORM_INTMEM +
4512                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4513                U64_HI(section));
4514         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
4515                BNX2X_BTR);
4516
4517         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4518                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4519                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4520
4521         /* XSTORM */
4522         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4523                                             x_def_status_block);
4524         def_sb->x_def_status_block.status_block_id = id;
4525
4526         REG_WR(bp, BAR_XSTRORM_INTMEM +
4527                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4528         REG_WR(bp, BAR_XSTRORM_INTMEM +
4529                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4530                U64_HI(section));
4531         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
4532                BNX2X_BTR);
4533
4534         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4535                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4536                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4537
4538         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4539 }
4540
4541 static void bnx2x_update_coalesce(struct bnx2x *bp)
4542 {
4543         int port = bp->port;
4544         int i;
4545
4546         for_each_queue(bp, i) {
4547
4548                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4549                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4550                         USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4551                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4552                         bp->rx_ticks_int/12);
4553                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4554                          USTORM_SB_HC_DISABLE_OFFSET(port, i,
4555                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4556                          bp->rx_ticks_int ? 0 : 1);
4557
4558                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4559                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4560                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4561                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4562                         bp->tx_ticks_int/12);
4563                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4564                          CSTORM_SB_HC_DISABLE_OFFSET(port, i,
4565                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4566                          bp->tx_ticks_int ? 0 : 1);
4567         }
4568 }
4569
4570 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4571 {
4572         u16 ring_prod;
4573         int i, j;
4574         int port = bp->port;
4575
4576         bp->rx_buf_use_size = bp->dev->mtu;
4577
4578         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4579         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4580
4581         for_each_queue(bp, j) {
4582                 struct bnx2x_fastpath *fp = &bp->fp[j];
4583
4584                 fp->rx_bd_cons = 0;
4585                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4586
4587                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4588                         struct eth_rx_bd *rx_bd;
4589
4590                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4591                         rx_bd->addr_hi =
4592                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4593                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4594                         rx_bd->addr_lo =
4595                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4596                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4597
4598                 }
4599
4600                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4601                         struct eth_rx_cqe_next_page *nextpg;
4602
4603                         nextpg = (struct eth_rx_cqe_next_page *)
4604                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4605                         nextpg->addr_hi =
4606                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4607                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4608                         nextpg->addr_lo =
4609                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4610                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4611                 }
4612
4613                 /* rx completion queue */
4614                 fp->rx_comp_cons = ring_prod = 0;
4615
4616                 for (i = 0; i < bp->rx_ring_size; i++) {
4617                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4618                                 BNX2X_ERR("was only able to allocate "
4619                                           "%d rx skbs\n", i);
4620                                 break;
4621                         }
4622                         ring_prod = NEXT_RX_IDX(ring_prod);
4623                         BUG_TRAP(ring_prod > i);
4624                 }
4625
4626                 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4627                 fp->rx_pkt = fp->rx_calls = 0;
4628
4629                 /* Warning! this will genrate an interrupt (to the TSTORM) */
4630                 /* must only be done when chip is initialized */
4631                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4632                        TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
4633                 if (j != 0)
4634                         continue;
4635
4636                 REG_WR(bp, BAR_USTRORM_INTMEM +
4637                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
4638                        U64_LO(fp->rx_comp_mapping));
4639                 REG_WR(bp, BAR_USTRORM_INTMEM +
4640                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
4641                        U64_HI(fp->rx_comp_mapping));
4642         }
4643 }
4644
4645 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4646 {
4647         int i, j;
4648
4649         for_each_queue(bp, j) {
4650                 struct bnx2x_fastpath *fp = &bp->fp[j];
4651
4652                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4653                         struct eth_tx_bd *tx_bd =
4654                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4655
4656                         tx_bd->addr_hi =
4657                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4658                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4659                         tx_bd->addr_lo =
4660                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4661                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4662                 }
4663
4664                 fp->tx_pkt_prod = 0;
4665                 fp->tx_pkt_cons = 0;
4666                 fp->tx_bd_prod = 0;
4667                 fp->tx_bd_cons = 0;
4668                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4669                 fp->tx_pkt = 0;
4670         }
4671 }
4672
4673 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4674 {
4675         int port = bp->port;
4676
4677         spin_lock_init(&bp->spq_lock);
4678
4679         bp->spq_left = MAX_SPQ_PENDING;
4680         bp->spq_prod_idx = 0;
4681         bp->dsb_sp_prod_idx = 0;
4682         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4683         bp->spq_prod_bd = bp->spq;
4684         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4685
4686         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
4687                U64_LO(bp->spq_mapping));
4688         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
4689                U64_HI(bp->spq_mapping));
4690
4691         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
4692                bp->spq_prod_idx);
4693 }
4694
4695 static void bnx2x_init_context(struct bnx2x *bp)
4696 {
4697         int i;
4698
4699         for_each_queue(bp, i) {
4700                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4701                 struct bnx2x_fastpath *fp = &bp->fp[i];
4702
4703                 context->xstorm_st_context.tx_bd_page_base_hi =
4704                                                 U64_HI(fp->tx_desc_mapping);
4705                 context->xstorm_st_context.tx_bd_page_base_lo =
4706                                                 U64_LO(fp->tx_desc_mapping);
4707                 context->xstorm_st_context.db_data_addr_hi =
4708                                                 U64_HI(fp->tx_prods_mapping);
4709                 context->xstorm_st_context.db_data_addr_lo =
4710                                                 U64_LO(fp->tx_prods_mapping);
4711
4712                 context->ustorm_st_context.rx_bd_page_base_hi =
4713                                                 U64_HI(fp->rx_desc_mapping);
4714                 context->ustorm_st_context.rx_bd_page_base_lo =
4715                                                 U64_LO(fp->rx_desc_mapping);
4716                 context->ustorm_st_context.status_block_id = i;
4717                 context->ustorm_st_context.sb_index_number =
4718                                                 HC_INDEX_U_ETH_RX_CQ_CONS;
4719                 context->ustorm_st_context.rcq_base_address_hi =
4720                                                 U64_HI(fp->rx_comp_mapping);
4721                 context->ustorm_st_context.rcq_base_address_lo =
4722                                                 U64_LO(fp->rx_comp_mapping);
4723                 context->ustorm_st_context.flags =
4724                                 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
4725                 context->ustorm_st_context.mc_alignment_size = 64;
4726                 context->ustorm_st_context.num_rss = bp->num_queues;
4727
4728                 context->cstorm_st_context.sb_index_number =
4729                                                 HC_INDEX_C_ETH_TX_CQ_CONS;
4730                 context->cstorm_st_context.status_block_id = i;
4731
4732                 context->xstorm_ag_context.cdu_reserved =
4733                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4734                                                CDU_REGION_NUMBER_XCM_AG,
4735                                                ETH_CONNECTION_TYPE);
4736                 context->ustorm_ag_context.cdu_usage =
4737                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4738                                                CDU_REGION_NUMBER_UCM_AG,
4739                                                ETH_CONNECTION_TYPE);
4740         }
4741 }
4742
4743 static void bnx2x_init_ind_table(struct bnx2x *bp)
4744 {
4745         int port = bp->port;
4746         int i;
4747
4748         if (!is_multi(bp))
4749                 return;
4750
4751         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4752                 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4753                         i % bp->num_queues);
4754
4755         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4756 }
4757
4758 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4759 {
4760         int mode = bp->rx_mode;
4761         int port = bp->port;
4762         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4763         int i;
4764
4765         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4766
4767         switch (mode) {
4768         case BNX2X_RX_MODE_NONE: /* no Rx */
4769                 tstorm_mac_filter.ucast_drop_all = 1;
4770                 tstorm_mac_filter.mcast_drop_all = 1;
4771                 tstorm_mac_filter.bcast_drop_all = 1;
4772                 break;
4773         case BNX2X_RX_MODE_NORMAL:
4774                 tstorm_mac_filter.bcast_accept_all = 1;
4775                 break;
4776         case BNX2X_RX_MODE_ALLMULTI:
4777                 tstorm_mac_filter.mcast_accept_all = 1;
4778                 tstorm_mac_filter.bcast_accept_all = 1;
4779                 break;
4780         case BNX2X_RX_MODE_PROMISC:
4781                 tstorm_mac_filter.ucast_accept_all = 1;
4782                 tstorm_mac_filter.mcast_accept_all = 1;
4783                 tstorm_mac_filter.bcast_accept_all = 1;
4784                 break;
4785         default:
4786                 BNX2X_ERR("bad rx mode (%d)\n", mode);
4787         }
4788
4789         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4790                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4791                        TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
4792                        ((u32 *)&tstorm_mac_filter)[i]);
4793
4794 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4795                    ((u32 *)&tstorm_mac_filter)[i]); */
4796         }
4797 }
4798
4799 static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
4800 {
4801 #ifdef BCM_VLAN
4802         int mode = bp->rx_mode;
4803 #endif
4804         int port = bp->port;
4805         struct tstorm_eth_client_config tstorm_client = {0};
4806
4807         tstorm_client.mtu = bp->dev->mtu;
4808         tstorm_client.statistics_counter_id = 0;
4809         tstorm_client.config_flags =
4810                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4811 #ifdef BCM_VLAN
4812         if (mode && bp->vlgrp) {
4813                 tstorm_client.config_flags |=
4814                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4815                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4816         }
4817 #endif
4818         tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4819                                     TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4820                                     TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4821                                     TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4822
4823         REG_WR(bp, BAR_TSTRORM_INTMEM +
4824                TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4825                ((u32 *)&tstorm_client)[0]);
4826         REG_WR(bp, BAR_TSTRORM_INTMEM +
4827                TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4828                ((u32 *)&tstorm_client)[1]);
4829
4830 /*      DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4831            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4832 }
4833
4834 static void bnx2x_init_internal(struct bnx2x *bp)
4835 {
4836         int port = bp->port;
4837         struct tstorm_eth_function_common_config tstorm_config = {0};
4838         struct stats_indication_flags stats_flags = {0};
4839         int i;
4840
4841         if (is_multi(bp)) {
4842                 tstorm_config.config_flags = MULTI_FLAGS;
4843                 tstorm_config.rss_result_mask = MULTI_MASK;
4844         }
4845
4846         REG_WR(bp, BAR_TSTRORM_INTMEM +
4847                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
4848                (*(u32 *)&tstorm_config));
4849
4850 /*      DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4851            (*(u32 *)&tstorm_config)); */
4852
4853         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */
4854         bnx2x_set_storm_rx_mode(bp);
4855
4856         for_each_queue(bp, i)
4857                 bnx2x_set_client_config(bp, i);
4858
4859
4860         stats_flags.collect_eth = cpu_to_le32(1);
4861
4862         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4863                ((u32 *)&stats_flags)[0]);
4864         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4865                ((u32 *)&stats_flags)[1]);
4866
4867         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4868                ((u32 *)&stats_flags)[0]);
4869         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4870                ((u32 *)&stats_flags)[1]);
4871
4872         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4873                ((u32 *)&stats_flags)[0]);
4874         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4875                ((u32 *)&stats_flags)[1]);
4876
4877 /*      DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4878            ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4879 }
4880
4881 static void bnx2x_nic_init(struct bnx2x *bp)
4882 {
4883         int i;
4884
4885         for_each_queue(bp, i) {
4886                 struct bnx2x_fastpath *fp = &bp->fp[i];
4887
4888                 fp->state = BNX2X_FP_STATE_CLOSED;
4889                 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
4890                    bp, fp->status_blk, i);
4891                 fp->index = i;
4892                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
4893         }
4894
4895         bnx2x_init_def_sb(bp, bp->def_status_blk,
4896                           bp->def_status_blk_mapping, 0x10);
4897         bnx2x_update_coalesce(bp);
4898         bnx2x_init_rx_rings(bp);
4899         bnx2x_init_tx_ring(bp);
4900         bnx2x_init_sp_ring(bp);
4901         bnx2x_init_context(bp);
4902         bnx2x_init_internal(bp);
4903         bnx2x_init_stats(bp);
4904         bnx2x_init_ind_table(bp);
4905         bnx2x_enable_int(bp);
4906
4907 }
4908
4909 /* end of nic init */
4910
4911 /*
4912  * gzip service functions
4913  */
4914
4915 static int bnx2x_gunzip_init(struct bnx2x *bp)
4916 {
4917         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4918                                               &bp->gunzip_mapping);
4919         if (bp->gunzip_buf  == NULL)
4920                 goto gunzip_nomem1;
4921
4922         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4923         if (bp->strm  == NULL)
4924                 goto gunzip_nomem2;
4925
4926         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4927                                       GFP_KERNEL);
4928         if (bp->strm->workspace == NULL)
4929                 goto gunzip_nomem3;
4930
4931         return 0;
4932
4933 gunzip_nomem3:
4934         kfree(bp->strm);
4935         bp->strm = NULL;
4936
4937 gunzip_nomem2:
4938         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4939                             bp->gunzip_mapping);
4940         bp->gunzip_buf = NULL;
4941
4942 gunzip_nomem1:
4943         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4944                " uncompression\n", bp->dev->name);
4945         return -ENOMEM;
4946 }
4947
4948 static void bnx2x_gunzip_end(struct bnx2x *bp)
4949 {
4950         kfree(bp->strm->workspace);
4951
4952         kfree(bp->strm);
4953         bp->strm = NULL;
4954
4955         if (bp->gunzip_buf) {
4956                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4957                                     bp->gunzip_mapping);
4958                 bp->gunzip_buf = NULL;
4959         }
4960 }
4961
4962 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4963 {
4964         int n, rc;
4965
4966         /* check gzip header */
4967         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4968                 return -EINVAL;
4969
4970         n = 10;
4971
4972 #define FNAME                           0x8
4973
4974         if (zbuf[3] & FNAME)
4975                 while ((zbuf[n++] != 0) && (n < len));
4976
4977         bp->strm->next_in = zbuf + n;
4978         bp->strm->avail_in = len - n;
4979         bp->strm->next_out = bp->gunzip_buf;
4980         bp->strm->avail_out = FW_BUF_SIZE;
4981
4982         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4983         if (rc != Z_OK)
4984                 return rc;
4985
4986         rc = zlib_inflate(bp->strm, Z_FINISH);
4987         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4988                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4989                        bp->dev->name, bp->strm->msg);
4990
4991         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4992         if (bp->gunzip_outlen & 0x3)
4993                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4994                                     " gunzip_outlen (%d) not aligned\n",
4995                        bp->dev->name, bp->gunzip_outlen);
4996         bp->gunzip_outlen >>= 2;
4997
4998         zlib_inflateEnd(bp->strm);
4999
5000         if (rc == Z_STREAM_END)
5001                 return 0;
5002
5003         return rc;
5004 }
5005
5006 /* nic load/unload */
5007
5008 /*
5009  * general service functions
5010  */
5011
5012 /* send a NIG loopback debug packet */
5013 static void bnx2x_lb_pckt(struct bnx2x *bp)
5014 {
5015 #ifdef USE_DMAE
5016         u32 wb_write[3];
5017 #endif
5018
5019         /* Ethernet source and destination addresses */
5020 #ifdef USE_DMAE
5021         wb_write[0] = 0x55555555;
5022         wb_write[1] = 0x55555555;
5023         wb_write[2] = 0x20;             /* SOP */
5024         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5025 #else
5026         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5027         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5028         /* SOP */
5029         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5030 #endif
5031
5032         /* NON-IP protocol */
5033 #ifdef USE_DMAE
5034         wb_write[0] = 0x09000000;
5035         wb_write[1] = 0x55555555;
5036         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5037         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5038 #else
5039         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5040         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5041         /* EOP, eop_bvalid = 0 */
5042         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5043 #endif
5044 }
5045
5046 /* some of the internal memories
5047  * are not directly readable from the driver
5048  * to test them we send debug packets
5049  */
5050 static int bnx2x_int_mem_test(struct bnx2x *bp)
5051 {
5052         int factor;
5053         int count, i;
5054         u32 val = 0;
5055
5056         switch (CHIP_REV(bp)) {
5057         case CHIP_REV_EMUL:
5058                 factor = 200;
5059                 break;
5060         case CHIP_REV_FPGA:
5061                 factor = 120;
5062                 break;
5063         default:
5064                 factor = 1;
5065                 break;
5066         }
5067
5068         DP(NETIF_MSG_HW, "start part1\n");
5069
5070         /* Disable inputs of parser neighbor blocks */
5071         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5072         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5073         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5074         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5075
5076         /*  Write 0 to parser credits for CFC search request */
5077         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5078
5079         /* send Ethernet packet */
5080         bnx2x_lb_pckt(bp);
5081
5082         /* TODO do i reset NIG statistic? */
5083         /* Wait until NIG register shows 1 packet of size 0x10 */
5084         count = 1000 * factor;
5085         while (count) {
5086 #ifdef BNX2X_DMAE_RD
5087                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5088                 val = *bnx2x_sp(bp, wb_data[0]);
5089 #else
5090                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5091                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5092 #endif
5093                 if (val == 0x10)
5094                         break;
5095
5096                 msleep(10);
5097                 count--;
5098         }
5099         if (val != 0x10) {
5100                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5101                 return -1;
5102         }
5103
5104         /* Wait until PRS register shows 1 packet */
5105         count = 1000 * factor;
5106         while (count) {
5107                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5108
5109                 if (val == 1)
5110                         break;
5111
5112                 msleep(10);
5113                 count--;
5114         }
5115         if (val != 0x1) {
5116                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5117                 return -2;
5118         }
5119
5120         /* Reset and init BRB, PRS */
5121         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5122         msleep(50);
5123         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5124         msleep(50);
5125         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5126         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5127
5128         DP(NETIF_MSG_HW, "part2\n");
5129
5130         /* Disable inputs of parser neighbor blocks */
5131         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5132         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5133         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5134         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5135
5136         /* Write 0 to parser credits for CFC search request */
5137         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5138
5139         /* send 10 Ethernet packets */
5140         for (i = 0; i < 10; i++)
5141                 bnx2x_lb_pckt(bp);
5142
5143         /* Wait until NIG register shows 10 + 1
5144            packets of size 11*0x10 = 0xb0 */
5145         count = 1000 * factor;
5146         while (count) {
5147 #ifdef BNX2X_DMAE_RD
5148                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5149                 val = *bnx2x_sp(bp, wb_data[0]);
5150 #else
5151                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5152                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5153 #endif
5154                 if (val == 0xb0)
5155                         break;
5156
5157                 msleep(10);
5158                 count--;
5159         }
5160         if (val != 0xb0) {
5161                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5162                 return -3;
5163         }
5164
5165         /* Wait until PRS register shows 2 packets */
5166         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5167         if (val != 2)
5168                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5169
5170         /* Write 1 to parser credits for CFC search request */
5171         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5172
5173         /* Wait until PRS register shows 3 packets */
5174         msleep(10 * factor);
5175         /* Wait until NIG register shows 1 packet of size 0x10 */
5176         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5177         if (val != 3)
5178                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5179
5180         /* clear NIG EOP FIFO */
5181         for (i = 0; i < 11; i++)
5182                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5183         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5184         if (val != 1) {
5185                 BNX2X_ERR("clear of NIG failed\n");
5186                 return -4;
5187         }
5188
5189         /* Reset and init BRB, PRS, NIG */
5190         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5191         msleep(50);
5192         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5193         msleep(50);
5194         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5195         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5196 #ifndef BCM_ISCSI
5197         /* set NIC mode */
5198         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5199 #endif
5200
5201         /* Enable inputs of parser neighbor blocks */
5202         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5203         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5204         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5205         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5206
5207         DP(NETIF_MSG_HW, "done\n");
5208
5209         return 0; /* OK */
5210 }
5211
5212 static void enable_blocks_attention(struct bnx2x *bp)
5213 {
5214         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5215         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5216         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5217         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5218         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5219         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5220         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5221         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5222         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5223 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5224 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5225         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5226         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5227         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5228 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5229 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5230         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5231         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5232         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5233         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5234 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5235 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5236         REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
5237         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5238         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5239         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5240 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5241 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5242         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5243         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5244 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5245         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5246 }
5247
5248 static int bnx2x_function_init(struct bnx2x *bp, int mode)
5249 {
5250         int func = bp->port;
5251         int port = func ? PORT1 : PORT0;
5252         u32 val, i;
5253 #ifdef USE_DMAE
5254         u32 wb_write[2];
5255 #endif
5256
5257         DP(BNX2X_MSG_MCP, "function is %d  mode is %x\n", func, mode);
5258         if ((func != 0) && (func != 1)) {
5259                 BNX2X_ERR("BAD function number (%d)\n", func);
5260                 return -ENODEV;
5261         }
5262
5263         bnx2x_gunzip_init(bp);
5264
5265         if (mode & 0x1) {       /* init common */
5266                 DP(BNX2X_MSG_MCP, "starting common init  func %d  mode %x\n",
5267                    func, mode);
5268                 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff);
5269                 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc);
5270                 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5271
5272                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5273                 msleep(30);
5274                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5275
5276                 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5277                 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5278
5279                 bnx2x_init_pxp(bp);
5280
5281                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5282                         /* enable HW interrupt from PXP on USDM
5283                            overflow bit 16 on INT_MASK_0 */
5284                         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5285                 }
5286
5287 #ifdef __BIG_ENDIAN
5288                 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5289                 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5290                 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5291                 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5292                 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5293                 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5294
5295 /*              REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5296                 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5297                 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5298                 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5299                 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5300 #endif
5301
5302 #ifndef BCM_ISCSI
5303                 /* set NIC mode */
5304                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5305 #endif
5306
5307                 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
5308 #ifdef BCM_ISCSI
5309                 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5310                 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5311                 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5312 #endif
5313
5314                 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5315
5316                 /* let the HW do it's magic ... */
5317                 msleep(100);
5318                 /* finish PXP init
5319                    (can be moved up if we want to use the DMAE) */
5320                 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5321                 if (val != 1) {
5322                         BNX2X_ERR("PXP2 CFG failed\n");
5323                         return -EBUSY;
5324                 }
5325
5326                 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5327                 if (val != 1) {
5328                         BNX2X_ERR("PXP2 RD_INIT failed\n");
5329                         return -EBUSY;
5330                 }
5331
5332                 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5333                 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5334
5335                 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5336
5337                 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5338                 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5339                 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5340                 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5341
5342 #ifdef BNX2X_DMAE_RD
5343                 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5344                 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5345                 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5346                 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5347 #else
5348                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
5349                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
5350                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
5351                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
5352                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
5353                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
5354                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
5355                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
5356                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
5357                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
5358                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
5359                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5360 #endif
5361                 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5362                 /* softrest pulse */
5363                 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5364                 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5365
5366 #ifdef BCM_ISCSI
5367                 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5368 #endif
5369                 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5370                 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
5371                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5372                         /* enable hw interrupt from doorbell Q */
5373                         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5374                 }
5375
5376                 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5377
5378                 if (CHIP_REV_IS_SLOW(bp)) {
5379                         /* fix for emulation and FPGA for no pause */
5380                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5381                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5382                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5383                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5384                 }
5385
5386                 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5387
5388                 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5389                 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5390                 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5391                 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5392
5393                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5394                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5395                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5396                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5397
5398                 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5399                 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5400                 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5401                 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5402
5403                 /* sync semi rtc */
5404                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5405                        0x80000000);
5406                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5407                        0x80000000);
5408
5409                 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5410                 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5411                 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5412
5413                 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5414                 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5415                         REG_WR(bp, i, 0xc0cac01a);
5416                         /* TODO: repleace with something meaningfull */
5417                 }
5418                 /* SRCH COMMON comes here */
5419                 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5420
5421                 if (sizeof(union cdu_context) != 1024) {
5422                         /* we currently assume that a context is 1024 bytes */
5423                         printk(KERN_ALERT PFX "please adjust the size of"
5424                                " cdu_context(%ld)\n",
5425                                (long)sizeof(union cdu_context));
5426                 }
5427                 val = (4 << 24) + (0 << 12) + 1024;
5428                 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5429                 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5430
5431                 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5432                 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5433
5434                 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5435                 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
5436                                  MISC_AEU_COMMON_END);
5437                 /* RXPCS COMMON comes here */
5438                 /* EMAC0 COMMON comes here */
5439                 /* EMAC1 COMMON comes here */
5440                 /* DBU COMMON comes here */
5441                 /* DBG COMMON comes here */
5442                 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5443
5444                 if (CHIP_REV_IS_SLOW(bp))
5445                         msleep(200);
5446
5447                 /* finish CFC init */
5448                 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
5449                 if (val != 1) {
5450                         BNX2X_ERR("CFC LL_INIT failed\n");
5451                         return -EBUSY;
5452                 }
5453
5454                 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
5455                 if (val != 1) {
5456                         BNX2X_ERR("CFC AC_INIT failed\n");
5457                         return -EBUSY;
5458                 }
5459
5460                 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
5461                 if (val != 1) {
5462                         BNX2X_ERR("CFC CAM_INIT failed\n");
5463                         return -EBUSY;
5464                 }
5465
5466                 REG_WR(bp, CFC_REG_DEBUG0, 0);
5467
5468                 /* read NIG statistic
5469                    to see if this is our first up since powerup */
5470 #ifdef BNX2X_DMAE_RD
5471                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5472                 val = *bnx2x_sp(bp, wb_data[0]);
5473 #else
5474                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5475                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5476 #endif
5477                 /* do internal memory self test */
5478                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5479                         BNX2X_ERR("internal mem selftest failed\n");
5480                         return -EBUSY;
5481                 }
5482
5483                 /* clear PXP2 attentions */
5484                 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
5485
5486                 enable_blocks_attention(bp);
5487                 /* enable_blocks_parity(bp); */
5488
5489         } /* end of common init */
5490
5491         /* per port init */
5492
5493         /* the phys address is shifted right 12 bits and has an added
5494            1=valid bit added to the 53rd bit
5495            then since this is a wide register(TM)
5496            we split it into two 32 bit writes
5497          */
5498 #define RQ_ONCHIP_AT_PORT_SIZE  384
5499 #define ONCHIP_ADDR1(x)   ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5500 #define ONCHIP_ADDR2(x)   ((u32)((1 << 20) | ((u64)x >> 44)))
5501 #define PXP_ONE_ILT(x)    ((x << 10) | x)
5502
5503         DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
5504
5505         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
5506
5507         /* Port PXP comes here */
5508         /* Port PXP2 comes here */
5509
5510         /* Offset is
5511          * Port0  0
5512          * Port1  384 */
5513         i = func * RQ_ONCHIP_AT_PORT_SIZE;
5514 #ifdef USE_DMAE
5515         wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
5516         wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
5517         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5518 #else
5519         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
5520                    ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
5521         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
5522                    ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
5523 #endif
5524         REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
5525
5526 #ifdef BCM_ISCSI
5527         /* Port0  1
5528          * Port1  385 */
5529         i++;
5530         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5531         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5532         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5533         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5534
5535         /* Port0  2
5536          * Port1  386 */
5537         i++;
5538         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5539         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5540         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5541         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5542
5543         /* Port0  3
5544          * Port1  387 */
5545         i++;
5546         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5547         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5548         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5549         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5550 #endif
5551
5552         /* Port TCM comes here */
5553         /* Port UCM comes here */
5554         /* Port CCM comes here */
5555         bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
5556                              func ? XCM_PORT1_END : XCM_PORT0_END);
5557
5558 #ifdef USE_DMAE
5559         wb_write[0] = 0;
5560         wb_write[1] = 0;
5561 #endif
5562         for (i = 0; i < 32; i++) {
5563                 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
5564 #ifdef USE_DMAE
5565                 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
5566 #else
5567                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
5568                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
5569 #endif
5570         }
5571         REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
5572
5573         /* Port QM comes here */
5574
5575 #ifdef BCM_ISCSI
5576         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5577         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5578
5579         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5580                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5581 #endif
5582         /* Port DQ comes here */
5583         /* Port BRB1 comes here */
5584         bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
5585                              func ? PRS_PORT1_END : PRS_PORT0_END);
5586         /* Port TSDM comes here */
5587         /* Port CSDM comes here */
5588         /* Port USDM comes here */
5589         /* Port XSDM comes here */
5590         bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
5591                              func ? TSEM_PORT1_END : TSEM_PORT0_END);
5592         bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
5593                              func ? USEM_PORT1_END : USEM_PORT0_END);
5594         bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
5595                              func ? CSEM_PORT1_END : CSEM_PORT0_END);
5596         bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
5597                              func ? XSEM_PORT1_END : XSEM_PORT0_END);
5598         /* Port UPB comes here */
5599         /* Port XSDM comes here */
5600         bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
5601                              func ? PBF_PORT1_END : PBF_PORT0_END);
5602
5603         /* configure PBF to work without PAUSE mtu 9000 */
5604         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
5605
5606         /* update threshold */
5607         REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
5608         /* update init credit */
5609         REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
5610
5611         /* probe changes */
5612         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
5613         msleep(5);
5614         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
5615
5616 #ifdef BCM_ISCSI
5617         /* tell the searcher where the T2 table is */
5618         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5619
5620         wb_write[0] = U64_LO(bp->t2_mapping);
5621         wb_write[1] = U64_HI(bp->t2_mapping);
5622         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5623         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5624         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5625         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5626
5627         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5628         /* Port SRCH comes here */
5629 #endif
5630         /* Port CDU comes here */
5631         /* Port CFC comes here */
5632         bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
5633                              func ? HC_PORT1_END : HC_PORT0_END);
5634         bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
5635                                     MISC_AEU_PORT0_START,
5636                              func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5637         /* Port PXPCS comes here */
5638         /* Port EMAC0 comes here */
5639         /* Port EMAC1 comes here */
5640         /* Port DBU comes here */
5641         /* Port DBG comes here */
5642         bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
5643                              func ? NIG_PORT1_END : NIG_PORT0_END);
5644         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
5645         /* Port MCP comes here */
5646         /* Port DMAE comes here */
5647
5648         bnx2x_link_reset(bp);
5649
5650         /* Reset pciex errors for debug */
5651         REG_WR(bp, 0x2114, 0xffffffff);
5652         REG_WR(bp, 0x2120, 0xffffffff);
5653         REG_WR(bp, 0x2814, 0xffffffff);
5654
5655         /* !!! move to init_values.h */
5656         REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5657         REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5658         REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5659         REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5660
5661         REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
5662         REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
5663         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5664         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
5665
5666         bnx2x_gunzip_end(bp);
5667
5668         if (!nomcp) {
5669                 port = bp->port;
5670
5671                 bp->fw_drv_pulse_wr_seq =
5672                                 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
5673                                  DRV_PULSE_SEQ_MASK);
5674                 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
5675                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  fw_mb 0x%x\n",
5676                    bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5677         } else {
5678                 bp->fw_mb = 0;
5679         }
5680
5681         return 0;
5682 }
5683
5684
5685 /* send the MCP a request, block untill there is a reply */
5686 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687 {
5688         u32 rc = 0;
5689         u32 seq = ++bp->fw_seq;
5690         int port = bp->port;
5691
5692         SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
5693         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
5694
5695         /* let the FW do it's magic ... */
5696         msleep(100); /* TBD */
5697
5698         if (CHIP_REV_IS_SLOW(bp))
5699                 msleep(900);
5700
5701         rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
5702
5703         DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5704
5705         /* is this a reply to our command? */
5706         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707                 rc &= FW_MSG_CODE_MASK;
5708         } else {
5709                 /* FW BUG! */
5710                 BNX2X_ERR("FW failed to respond!\n");
5711                 bnx2x_fw_dump(bp);
5712                 rc = 0;
5713         }
5714         return rc;
5715 }
5716
5717 static void bnx2x_free_mem(struct bnx2x *bp)
5718 {
5719
5720 #define BNX2X_PCI_FREE(x, y, size) \
5721         do { \
5722                 if (x) { \
5723                         pci_free_consistent(bp->pdev, size, x, y); \
5724                         x = NULL; \
5725                         y = 0; \
5726                 } \
5727         } while (0)
5728
5729 #define BNX2X_FREE(x) \
5730         do { \
5731                 if (x) { \
5732                         vfree(x); \
5733                         x = NULL; \
5734                 } \
5735         } while (0)
5736
5737         int i;
5738
5739         /* fastpath */
5740         for_each_queue(bp, i) {
5741
5742                 /* Status blocks */
5743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5744                                bnx2x_fp(bp, i, status_blk_mapping),
5745                                sizeof(struct host_status_block) +
5746                                sizeof(struct eth_tx_db_data));
5747
5748                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5749                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5750                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5751                                bnx2x_fp(bp, i, tx_desc_mapping),
5752                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5753
5754                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5756                                bnx2x_fp(bp, i, rx_desc_mapping),
5757                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5758
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5760                                bnx2x_fp(bp, i, rx_comp_mapping),
5761                                sizeof(struct eth_fast_path_rx_cqe) *
5762                                NUM_RCQ_BD);
5763         }
5764
5765         BNX2X_FREE(bp->fp);
5766
5767         /* end of fastpath */
5768
5769         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5770                        (sizeof(struct host_def_status_block)));
5771
5772         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5773                        (sizeof(struct bnx2x_slowpath)));
5774
5775 #ifdef BCM_ISCSI
5776         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5780 #endif
5781         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
5782
5783 #undef BNX2X_PCI_FREE
5784 #undef BNX2X_KFREE
5785 }
5786
5787 static int bnx2x_alloc_mem(struct bnx2x *bp)
5788 {
5789
5790 #define BNX2X_PCI_ALLOC(x, y, size) \
5791         do { \
5792                 x = pci_alloc_consistent(bp->pdev, size, y); \
5793                 if (x == NULL) \
5794                         goto alloc_mem_err; \
5795                 memset(x, 0, size); \
5796         } while (0)
5797
5798 #define BNX2X_ALLOC(x, size) \
5799         do { \
5800                 x = vmalloc(size); \
5801                 if (x == NULL) \
5802                         goto alloc_mem_err; \
5803                 memset(x, 0, size); \
5804         } while (0)
5805
5806         int i;
5807
5808         /* fastpath */
5809         BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
5810
5811         for_each_queue(bp, i) {
5812                 bnx2x_fp(bp, i, bp) = bp;
5813
5814                 /* Status blocks */
5815                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5816                                 &bnx2x_fp(bp, i, status_blk_mapping),
5817                                 sizeof(struct host_status_block) +
5818                                 sizeof(struct eth_tx_db_data));
5819
5820                 bnx2x_fp(bp, i, hw_tx_prods) =
5821                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5822
5823                 bnx2x_fp(bp, i, tx_prods_mapping) =
5824                                 bnx2x_fp(bp, i, status_blk_mapping) +
5825                                 sizeof(struct host_status_block);
5826
5827                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5828                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5829                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5831                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5832                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5833
5834                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5835                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5836                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5837                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5838                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5839
5840                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5841                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5842                                 sizeof(struct eth_fast_path_rx_cqe) *
5843                                 NUM_RCQ_BD);
5844
5845         }
5846         /* end of fastpath */
5847
5848         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849                         sizeof(struct host_def_status_block));
5850
5851         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852                         sizeof(struct bnx2x_slowpath));
5853
5854 #ifdef BCM_ISCSI
5855         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857         /* Initialize T1 */
5858         for (i = 0; i < 64*1024; i += 64) {
5859                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861         }
5862
5863         /* allocate searcher T2 table
5864            we allocate 1/4 of alloc num for T2
5865           (which is not entered into the ILT) */
5866         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868         /* Initialize T2 */
5869         for (i = 0; i < 16*1024; i += 64)
5870                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
5872         /* now sixup the last line in the block to point to the next block */
5873         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878         /* QM queues (128*MAX_CONN) */
5879         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880 #endif
5881
5882         /* Slow path ring */
5883         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885         return 0;
5886
5887 alloc_mem_err:
5888         bnx2x_free_mem(bp);
5889         return -ENOMEM;
5890
5891 #undef BNX2X_PCI_ALLOC
5892 #undef BNX2X_ALLOC
5893 }
5894
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896 {
5897         int i;
5898
5899         for_each_queue(bp, i) {
5900                 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902                 u16 bd_cons = fp->tx_bd_cons;
5903                 u16 sw_prod = fp->tx_pkt_prod;
5904                 u16 sw_cons = fp->tx_pkt_cons;
5905
5906                 BUG_TRAP(fp->tx_buf_ring != NULL);
5907
5908                 while (sw_cons != sw_prod) {
5909                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5910                         sw_cons++;
5911                 }
5912         }
5913 }
5914
5915 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5916 {
5917         int i, j;
5918
5919         for_each_queue(bp, j) {
5920                 struct bnx2x_fastpath *fp = &bp->fp[j];
5921
5922                 BUG_TRAP(fp->rx_buf_ring != NULL);
5923
5924                 for (i = 0; i < NUM_RX_BD; i++) {
5925                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5926                         struct sk_buff *skb = rx_buf->skb;
5927
5928                         if (skb == NULL)
5929                                 continue;
5930
5931                         pci_unmap_single(bp->pdev,
5932                                          pci_unmap_addr(rx_buf, mapping),
5933                                          bp->rx_buf_use_size,
5934                                          PCI_DMA_FROMDEVICE);
5935
5936                         rx_buf->skb = NULL;
5937                         dev_kfree_skb(skb);
5938                 }
5939         }
5940 }
5941
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5943 {
5944         bnx2x_free_tx_skbs(bp);
5945         bnx2x_free_rx_skbs(bp);
5946 }
5947
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949 {
5950         int i;
5951
5952         free_irq(bp->msix_table[0].vector, bp->dev);
5953         DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n",
5954            bp->msix_table[0].vector);
5955
5956         for_each_queue(bp, i) {
5957                 DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq  "
5958                    "state(%x)\n", i, bp->msix_table[i + 1].vector,
5959                    bnx2x_fp(bp, i, state));
5960
5961                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
5962
5963                         free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
5964                         bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
5965
5966                 } else
5967                         DP(NETIF_MSG_IFDOWN, "irq not freed\n");
5968
5969         }
5970
5971 }
5972
5973 static void bnx2x_free_irq(struct bnx2x *bp)
5974 {
5975
5976         if (bp->flags & USING_MSIX_FLAG) {
5977
5978                 bnx2x_free_msix_irqs(bp);
5979                 pci_disable_msix(bp->pdev);
5980
5981                 bp->flags &= ~USING_MSIX_FLAG;
5982
5983         } else
5984                 free_irq(bp->pdev->irq, bp->dev);
5985 }
5986
5987 static int bnx2x_enable_msix(struct bnx2x *bp)
5988 {
5989
5990         int i;
5991
5992         bp->msix_table[0].entry = 0;
5993         for_each_queue(bp, i)
5994                 bp->msix_table[i + 1].entry = i + 1;
5995
5996         if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997                                      bp->num_queues + 1)){
5998                 BNX2X_ERR("failed to enable msix\n");
5999                 return -1;
6000
6001         }
6002
6003         bp->flags |= USING_MSIX_FLAG;
6004
6005         return 0;
6006
6007 }
6008
6009
6010 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6011 {
6012
6013
6014         int i, rc;
6015
6016         DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6017
6018         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6019                          bp->dev->name, bp->dev);
6020
6021         if (rc) {
6022                 BNX2X_ERR("request sp irq failed\n");
6023                 return -EBUSY;
6024         }
6025
6026         for_each_queue(bp, i) {
6027                 rc = request_irq(bp->msix_table[i + 1].vector,
6028                                  bnx2x_msix_fp_int, 0,
6029                                  bp->dev->name, &bp->fp[i]);
6030
6031                 if (rc) {
6032                         BNX2X_ERR("request fp #%d irq failed\n", i);
6033                         bnx2x_free_msix_irqs(bp);
6034                         return -EBUSY;
6035                 }
6036
6037                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6038
6039         }
6040
6041         return 0;
6042
6043 }
6044
6045 static int bnx2x_req_irq(struct bnx2x *bp)
6046 {
6047
6048         int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6049                              IRQF_SHARED, bp->dev->name, bp->dev);
6050         if (!rc)
6051                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6052
6053         return rc;
6054
6055 }
6056
6057 /*
6058  * Init service functions
6059  */
6060
6061 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6062 {
6063         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6064
6065         /* CAM allocation
6066          * unicasts 0-31:port0 32-63:port1
6067          * multicast 64-127:port0 128-191:port1
6068          */
6069         config->hdr.length_6b = 2;
6070         config->hdr.offset = bp->port ? 31 : 0;
6071         config->hdr.reserved0 = 0;
6072         config->hdr.reserved1 = 0;
6073
6074         /* primary MAC */
6075         config->config_table[0].cam_entry.msb_mac_addr =
6076                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6077         config->config_table[0].cam_entry.middle_mac_addr =
6078                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6079         config->config_table[0].cam_entry.lsb_mac_addr =
6080                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6081         config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6082         config->config_table[0].target_table_entry.flags = 0;
6083         config->config_table[0].target_table_entry.client_id = 0;
6084         config->config_table[0].target_table_entry.vlan_id = 0;
6085
6086         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6087            config->config_table[0].cam_entry.msb_mac_addr,
6088            config->config_table[0].cam_entry.middle_mac_addr,
6089            config->config_table[0].cam_entry.lsb_mac_addr);
6090
6091         /* broadcast */
6092         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6093         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6094         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6095         config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6096         config->config_table[1].target_table_entry.flags =
6097                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6098         config->config_table[1].target_table_entry.client_id = 0;
6099         config->config_table[1].target_table_entry.vlan_id = 0;
6100
6101         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6102                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6103                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6104 }
6105
6106 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6107                              int *state_p, int poll)
6108 {
6109         /* can take a while if any port is running */
6110         int timeout = 500;
6111
6112         /* DP("waiting for state to become %d on IDX [%d]\n",
6113         state, sb_idx); */
6114
6115         might_sleep();
6116
6117         while (timeout) {
6118
6119                 if (poll) {
6120                         bnx2x_rx_int(bp->fp, 10);
6121                         /* If index is different from 0
6122                          * The reply for some commands will
6123                          * be on the none default queue
6124                          */
6125                         if (idx)
6126                                 bnx2x_rx_int(&bp->fp[idx], 10);
6127                 }
6128
6129                 mb(); /* state is changed by bnx2x_sp_event()*/
6130
6131                 if (*state_p != state)
6132                         return 0;
6133
6134                 timeout--;
6135                 msleep(1);
6136
6137         }
6138
6139
6140         /* timeout! */
6141         BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
6142         return -EBUSY;
6143
6144 }
6145
6146 static int bnx2x_setup_leading(struct bnx2x *bp)
6147 {
6148
6149         /* reset IGU staae */
6150         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6151
6152         /* SETUP ramrod */
6153         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6154
6155         return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6156
6157 }
6158
6159 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6160 {
6161
6162         /* reset IGU state */
6163         bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6164
6165         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6166         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6167
6168         /* Wait for completion */
6169         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6170                                  &(bp->fp[index].state), 1);
6171
6172 }
6173
6174
6175 static int bnx2x_poll(struct napi_struct *napi, int budget);
6176 static void bnx2x_set_rx_mode(struct net_device *dev);
6177
6178 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6179 {
6180         int rc;
6181         int i = 0;
6182
6183         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6184
6185         /* Send LOAD_REQUEST command to MCP.
6186            Returns the type of LOAD command: if it is the
6187            first port to be initialized common blocks should be
6188            initialized, otherwise - not.
6189         */
6190         if (!nomcp) {
6191                 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6192                 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6193                         return -EBUSY; /* other port in diagnostic mode */
6194                 }
6195         } else {
6196                 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
6197         }
6198
6199         DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
6200
6201         /* if we can't use msix we only need one fp,
6202          * so try to enable msix with the requested number of fp's
6203          * and fallback to inta with one fp
6204          */
6205         if (req_irq) {
6206
6207                 if (use_inta) {
6208                         bp->num_queues = 1;
6209                 } else {
6210                         if (use_multi > 1 && use_multi <= 16)
6211                                 /* user requested number */
6212                                 bp->num_queues = use_multi;
6213                         else if (use_multi == 1)
6214                                 bp->num_queues = num_online_cpus();
6215                         else
6216                                 bp->num_queues = 1;
6217
6218                         if (bnx2x_enable_msix(bp)) {
6219                                 /* faild to enable msix */
6220                                 bp->num_queues = 1;
6221                                 if (use_multi)
6222                                         BNX2X_ERR("Muti requested but failed"
6223                                                   " to enable MSI-X\n");
6224                         }
6225                 }
6226         }
6227
6228         if (bnx2x_alloc_mem(bp))
6229                 return -ENOMEM;
6230
6231         if (req_irq) {
6232                 if (bp->flags & USING_MSIX_FLAG) {
6233                         if (bnx2x_req_msix_irqs(bp)) {
6234                                 pci_disable_msix(bp->pdev);
6235                                 goto out_error;
6236                         }
6237
6238                 } else {
6239                         if (bnx2x_req_irq(bp)) {
6240                                 BNX2X_ERR("IRQ request failed, aborting\n");
6241                                 goto out_error;
6242                         }
6243                 }
6244         }
6245
6246         for_each_queue(bp, i)
6247                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6248                                bnx2x_poll, 128);
6249
6250
6251         /* Initialize HW */
6252         if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
6253                 BNX2X_ERR("HW init failed, aborting\n");
6254                 goto out_error;
6255         }
6256
6257
6258         atomic_set(&bp->intr_sem, 0);
6259
6260         /* Reenable SP tasklet */
6261         /*if (bp->sp_task_en) {                */
6262         /*        tasklet_enable(&bp->sp_task);*/
6263         /*} else {                             */
6264         /*        bp->sp_task_en = 1;          */
6265         /*}                                    */
6266
6267         /* Setup NIC internals and enable interrupts */
6268         bnx2x_nic_init(bp);
6269
6270         /* Send LOAD_DONE command to MCP */
6271         if (!nomcp) {
6272                 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6273                 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
6274                 if (!rc) {
6275                         BNX2X_ERR("MCP response failure, unloading\n");
6276                         goto int_disable;
6277                 }
6278         }
6279
6280         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6281
6282         /* Enable Rx interrupt handling before sending the ramrod
6283            as it's completed on Rx FP queue */
6284         for_each_queue(bp, i)
6285                 napi_enable(&bnx2x_fp(bp, i, napi));
6286
6287         if (bnx2x_setup_leading(bp))
6288                 goto stop_netif;
6289
6290         for_each_nondefault_queue(bp, i)
6291                 if (bnx2x_setup_multi(bp, i))
6292                         goto stop_netif;
6293
6294         bnx2x_set_mac_addr(bp);
6295
6296         bnx2x_phy_init(bp);
6297
6298         /* Start fast path */
6299         if (req_irq) { /* IRQ is only requested from bnx2x_open */
6300                 netif_start_queue(bp->dev);
6301                 if (bp->flags & USING_MSIX_FLAG)
6302                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6303                                bp->dev->name);
6304
6305         /* Otherwise Tx queue should be only reenabled */
6306         } else if (netif_running(bp->dev)) {
6307                 netif_wake_queue(bp->dev);
6308                 bnx2x_set_rx_mode(bp->dev);
6309         }
6310
6311         /* start the timer */
6312         mod_timer(&bp->timer, jiffies + bp->current_interval);
6313
6314         return 0;
6315
6316 stop_netif:
6317         for_each_queue(bp, i)
6318                 napi_disable(&bnx2x_fp(bp, i, napi));
6319
6320 int_disable:
6321         bnx2x_disable_int_sync(bp);
6322
6323         bnx2x_free_skbs(bp);
6324         bnx2x_free_irq(bp);
6325
6326 out_error:
6327         bnx2x_free_mem(bp);
6328
6329         /* TBD we really need to reset the chip
6330            if we want to recover from this */
6331         return rc;
6332 }
6333
6334 static void bnx2x_netif_stop(struct bnx2x *bp)
6335 {
6336         int i;
6337
6338         bp->rx_mode = BNX2X_RX_MODE_NONE;
6339         bnx2x_set_storm_rx_mode(bp);
6340
6341         bnx2x_disable_int_sync(bp);
6342         bnx2x_link_reset(bp);
6343
6344         for_each_queue(bp, i)
6345                 napi_disable(&bnx2x_fp(bp, i, napi));
6346
6347         if (netif_running(bp->dev)) {
6348                 netif_tx_disable(bp->dev);
6349                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6350         }
6351 }
6352
6353 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6354 {
6355         int port = bp->port;
6356 #ifdef USE_DMAE
6357         u32 wb_write[2];
6358 #endif
6359         int base, i;
6360
6361         DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
6362
6363         /* Do not rcv packets to BRB */
6364         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6365         /* Do not direct rcv packets that are not for MCP to the BRB */
6366         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6367                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6368
6369         /* Configure IGU and AEU */
6370         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6371         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6372
6373         /* TODO: Close Doorbell port? */
6374
6375         /* Clear ILT */
6376 #ifdef USE_DMAE
6377         wb_write[0] = 0;
6378         wb_write[1] = 0;
6379 #endif
6380         base = port * RQ_ONCHIP_AT_PORT_SIZE;
6381         for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
6382 #ifdef USE_DMAE
6383                 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6384 #else
6385                 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
6386                 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
6387 #endif
6388         }
6389
6390         if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6391                 /* reset_common */
6392                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6393                        0xd3ffff7f);
6394                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6395                        0x1403);
6396         }
6397 }
6398
6399 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6400 {
6401
6402         int rc;
6403
6404         /* halt the connnection */
6405         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6406         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6407
6408
6409         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6410                                        &(bp->fp[index].state), 1);
6411         if (rc) /* timout */
6412                 return rc;
6413
6414         /* delete cfc entry */
6415         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6416
6417         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
6418                                  &(bp->fp[index].state), 1);
6419
6420 }
6421
6422
6423 static void bnx2x_stop_leading(struct bnx2x *bp)
6424 {
6425
6426         /* if the other port is hadling traffic,
6427            this can take a lot of time */
6428         int timeout = 500;
6429
6430         might_sleep();
6431
6432         /* Send HALT ramrod */
6433         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6434         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
6435
6436         if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6437                                &(bp->fp[0].state), 1))
6438                 return;
6439
6440         bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
6441
6442         /* Send CFC_DELETE ramrod */
6443         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6444
6445         /*
6446            Wait for completion.
6447            we are going to reset the chip anyway
6448            so there is not much to do if this times out
6449          */
6450         while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
6451                         timeout--;
6452                         msleep(1);
6453         }
6454
6455 }
6456
6457 static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
6458 {
6459         u32 reset_code = 0;
6460         int rc;
6461         int i;
6462
6463         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6464
6465         /* Calling flush_scheduled_work() may deadlock because
6466          * linkwatch_event() may be on the workqueue and it will try to get
6467          * the rtnl_lock which we are holding.
6468          */
6469
6470         while (bp->in_reset_task)
6471                 msleep(1);
6472
6473         /* Delete the timer: do it before disabling interrupts, as it
6474            may be stil STAT_QUERY ramrod pending after stopping the timer */
6475         del_timer_sync(&bp->timer);
6476
6477         /* Wait until stat ramrod returns and all SP tasks complete */
6478         while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
6479                 msleep(1);
6480
6481         /* Stop fast path, disable MAC, disable interrupts, disable napi */
6482         bnx2x_netif_stop(bp);
6483
6484         if (bp->flags & NO_WOL_FLAG)
6485                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6486         else if (bp->wol) {
6487                 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
6488                 u8 *mac_addr = bp->dev->dev_addr;
6489                 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
6490                            EMAC_MODE_ACPI_RCVD);
6491
6492                 EMAC_WR(EMAC_REG_EMAC_MODE, val);
6493
6494                 val = (mac_addr[0] << 8) | mac_addr[1];
6495                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
6496
6497                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6498                       (mac_addr[4] << 8) | mac_addr[5];
6499                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
6500
6501                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6502         } else
6503                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6504
6505         for_each_nondefault_queue(bp, i)
6506                 if (bnx2x_stop_multi(bp, i))
6507                         goto error;
6508
6509
6510         bnx2x_stop_leading(bp);
6511
6512 error:
6513         if (!nomcp)
6514                 rc = bnx2x_fw_command(bp, reset_code);
6515         else
6516                 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6517
6518         /* Release IRQs */
6519         if (fre_irq)
6520                 bnx2x_free_irq(bp);
6521
6522         /* Reset the chip */
6523         bnx2x_reset_chip(bp, rc);
6524
6525         /* Report UNLOAD_DONE to MCP */
6526         if (!nomcp)
6527                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6528
6529         /* Free SKBs and driver internals */
6530         bnx2x_free_skbs(bp);
6531         bnx2x_free_mem(bp);
6532
6533         bp->state = BNX2X_STATE_CLOSED;
6534         /* Set link down */
6535         bp->link_up = 0;
6536         netif_carrier_off(bp->dev);
6537
6538         return 0;
6539 }
6540
6541 /* end of nic load/unload */
6542
6543 /* ethtool_ops */
6544
6545 /*
6546  * Init service functions
6547  */
6548
6549 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6550 {
6551         int port = bp->port;
6552         u32 ext_phy_type;
6553
6554         bp->phy_flags = 0;
6555
6556         switch (switch_cfg) {
6557         case SWITCH_CFG_1G:
6558                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6559
6560                 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
6561                 switch (ext_phy_type) {
6562                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6563                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6564                                        ext_phy_type);
6565
6566                         bp->supported |= (SUPPORTED_10baseT_Half |
6567                                           SUPPORTED_10baseT_Full |
6568                                           SUPPORTED_100baseT_Half |
6569                                           SUPPORTED_100baseT_Full |
6570                                           SUPPORTED_1000baseT_Full |
6571                                           SUPPORTED_2500baseT_Full |
6572                                           SUPPORTED_TP | SUPPORTED_FIBRE |
6573                                           SUPPORTED_Autoneg |
6574                                           SUPPORTED_Pause |
6575                                           SUPPORTED_Asym_Pause);
6576                         break;
6577
6578                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6579                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6580                                        ext_phy_type);
6581
6582                         bp->phy_flags |= PHY_SGMII_FLAG;
6583
6584                         bp->supported |= (/* SUPPORTED_10baseT_Half |
6585                                              SUPPORTED_10baseT_Full |
6586                                              SUPPORTED_100baseT_Half |
6587                                              SUPPORTED_100baseT_Full |*/
6588                                           SUPPORTED_1000baseT_Full |
6589                                           SUPPORTED_TP | SUPPORTED_FIBRE |
6590                                           SUPPORTED_Autoneg |
6591                                           SUPPORTED_Pause |
6592                                           SUPPORTED_Asym_Pause);
6593                         break;
6594
6595                 default:
6596                         BNX2X_ERR("NVRAM config error. "
6597                                   "BAD SerDes ext_phy_config 0x%x\n",
6598                                   bp->ext_phy_config);
6599                         return;
6600                 }
6601
6602                 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6603                                       port*0x10);
6604                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6605                 break;
6606
6607         case SWITCH_CFG_10G:
6608                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6609
6610                 bp->phy_flags |= PHY_XGXS_FLAG;
6611
6612                 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6613                 switch (ext_phy_type) {
6614                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6615                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6616                                        ext_phy_type);
6617
6618                         bp->supported |= (SUPPORTED_10baseT_Half |
6619                                           SUPPORTED_10baseT_Full |
6620                                           SUPPORTED_100baseT_Half |
6621                                           SUPPORTED_100baseT_Full |
6622                                           SUPPORTED_1000baseT_Full |
6623                                           SUPPORTED_2500baseT_Full |
6624                                           SUPPORTED_10000baseT_Full |
6625                                           SUPPORTED_TP | SUPPORTED_FIBRE |
6626                                           SUPPORTED_Autoneg |
6627                                           SUPPORTED_Pause |
6628                                           SUPPORTED_Asym_Pause);
6629                         break;
6630
6631                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6632                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6633                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n",
6634                                        ext_phy_type);
6635
6636                         bp->supported |= (SUPPORTED_10000baseT_Full |
6637                                           SUPPORTED_FIBRE |
6638                                           SUPPORTED_Pause |
6639                                           SUPPORTED_Asym_Pause);
6640                         break;
6641
6642                 default:
6643                         BNX2X_ERR("NVRAM config error. "
6644                                   "BAD XGXS ext_phy_config 0x%x\n",
6645                                   bp->ext_phy_config);
6646                         return;
6647                 }
6648
6649                 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6650                                       port*0x18);
6651                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6652
6653                 bp->ser_lane = ((bp->lane_config &
6654                                  PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
6655                                 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
6656                 bp->rx_lane_swap = ((bp->lane_config &
6657                                      PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
6658                                     PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
6659                 bp->tx_lane_swap = ((bp->lane_config &
6660                                      PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
6661                                     PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
6662                 BNX2X_DEV_INFO("rx_lane_swap 0x%x  tx_lane_swap 0x%x\n",
6663                                bp->rx_lane_swap, bp->tx_lane_swap);
6664                 break;
6665
6666         default:
6667                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6668                           bp->link_config);
6669                 return;
6670         }
6671
6672         /* mask what we support according to speed_cap_mask */
6673         if (!(bp->speed_cap_mask &
6674               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6675                 bp->supported &= ~SUPPORTED_10baseT_Half;
6676
6677         if (!(bp->speed_cap_mask &
6678               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6679                 bp->supported &= ~SUPPORTED_10baseT_Full;
6680
6681         if (!(bp->speed_cap_mask &
6682               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6683                 bp->supported &= ~SUPPORTED_100baseT_Half;
6684
6685         if (!(bp->speed_cap_mask &
6686               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6687                 bp->supported &= ~SUPPORTED_100baseT_Full;
6688
6689         if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6690                 bp->supported &= ~(SUPPORTED_1000baseT_Half |
6691                                    SUPPORTED_1000baseT_Full);
6692
6693         if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6694                 bp->supported &= ~SUPPORTED_2500baseT_Full;
6695
6696         if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6697                 bp->supported &= ~SUPPORTED_10000baseT_Full;
6698
6699         BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
6700 }
6701
6702 static void bnx2x_link_settings_requested(struct bnx2x *bp)
6703 {
6704         bp->req_autoneg = 0;
6705         bp->req_duplex = DUPLEX_FULL;
6706
6707         switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6708         case PORT_FEATURE_LINK_SPEED_AUTO:
6709                 if (bp->supported & SUPPORTED_Autoneg) {
6710                         bp->req_autoneg |= AUTONEG_SPEED;
6711                         bp->req_line_speed = 0;
6712                         bp->advertising = bp->supported;
6713                 } else {
6714                         u32 ext_phy_type;
6715
6716                         ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6717                         if ((ext_phy_type ==
6718                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6719                             (ext_phy_type ==
6720                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6721                                 /* force 10G, no AN */
6722                                 bp->req_line_speed = SPEED_10000;
6723                                 bp->advertising =
6724                                                 (ADVERTISED_10000baseT_Full |
6725                                                  ADVERTISED_FIBRE);
6726                                 break;
6727                         }
6728                         BNX2X_ERR("NVRAM config error. "
6729                                   "Invalid link_config 0x%x"
6730                                   "  Autoneg not supported\n",
6731                                   bp->link_config);
6732                         return;
6733                 }
6734                 break;
6735
6736         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6737                 if (bp->speed_cap_mask &
6738                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
6739                         bp->req_line_speed = SPEED_10;
6740                         bp->advertising = (ADVERTISED_10baseT_Full |
6741                                            ADVERTISED_TP);
6742                 } else {
6743                         BNX2X_ERR("NVRAM config error. "
6744                                   "Invalid link_config 0x%x"
6745                                   "  speed_cap_mask 0x%x\n",
6746                                   bp->link_config, bp->speed_cap_mask);
6747                         return;
6748                 }
6749                 break;
6750
6751         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6752                 if (bp->speed_cap_mask &
6753                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
6754                         bp->req_line_speed = SPEED_10;
6755                         bp->req_duplex = DUPLEX_HALF;
6756                         bp->advertising = (ADVERTISED_10baseT_Half |
6757                                            ADVERTISED_TP);
6758                 } else {
6759                         BNX2X_ERR("NVRAM config error. "
6760                                   "Invalid link_config 0x%x"
6761                                   "  speed_cap_mask 0x%x\n",
6762                                   bp->link_config, bp->speed_cap_mask);
6763                         return;
6764                 }
6765                 break;
6766
6767         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6768                 if (bp->speed_cap_mask &
6769                     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
6770                         bp->req_line_speed = SPEED_100;
6771                         bp->advertising = (ADVERTISED_100baseT_Full |
6772                                            ADVERTISED_TP);
6773                 } else {
6774                         BNX2X_ERR("NVRAM config error. "
6775                                   "Invalid link_config 0x%x"
6776                                   "  speed_cap_mask 0x%x\n",
6777                                   bp->link_config, bp->speed_cap_mask);
6778                         return;
6779                 }
6780                 break;
6781
6782         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6783                 if (bp->speed_cap_mask &
6784                     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
6785                         bp->req_line_speed = SPEED_100;
6786                         bp->req_duplex = DUPLEX_HALF;
6787                         bp->advertising = (ADVERTISED_100baseT_Half |
6788                                            ADVERTISED_TP);
6789                 } else {
6790                         BNX2X_ERR("NVRAM config error. "
6791                                   "Invalid link_config 0x%x"
6792                                   "  speed_cap_mask 0x%x\n",
6793                                   bp->link_config, bp->speed_cap_mask);
6794                         return;
6795                 }
6796                 break;
6797
6798         case PORT_FEATURE_LINK_SPEED_1G:
6799                 if (bp->speed_cap_mask &
6800                     PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
6801                         bp->req_line_speed = SPEED_1000;
6802                         bp->advertising = (ADVERTISED_1000baseT_Full |
6803                                            ADVERTISED_TP);
6804                 } else {
6805                         BNX2X_ERR("NVRAM config error. "
6806                                   "Invalid link_config 0x%x"
6807                                   "  speed_cap_mask 0x%x\n",
6808                                   bp->link_config, bp->speed_cap_mask);
6809                         return;
6810                 }
6811                 break;
6812
6813         case PORT_FEATURE_LINK_SPEED_2_5G:
6814                 if (bp->speed_cap_mask &
6815                     PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
6816                         bp->req_line_speed = SPEED_2500;
6817                         bp->advertising = (ADVERTISED_2500baseT_Full |
6818                                            ADVERTISED_TP);
6819                 } else {
6820                         BNX2X_ERR("NVRAM config error. "
6821                                   "Invalid link_config 0x%x"
6822                                   "  speed_cap_mask 0x%x\n",
6823                                   bp->link_config, bp->speed_cap_mask);
6824                         return;
6825                 }
6826                 break;
6827
6828         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6829         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6830         case PORT_FEATURE_LINK_SPEED_10G_KR:
6831                 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
6832                         BNX2X_ERR("NVRAM config error. "
6833                                   "Invalid link_config 0x%x"
6834                                   "  phy_flags 0x%x\n",
6835                                   bp->link_config, bp->phy_flags);
6836                         return;
6837                 }
6838                 if (bp->speed_cap_mask &
6839                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
6840                         bp->req_line_speed = SPEED_10000;
6841                         bp->advertising = (ADVERTISED_10000baseT_Full |
6842                                            ADVERTISED_FIBRE);
6843                 } else {
6844                         BNX2X_ERR("NVRAM config error. "
6845                                   "Invalid link_config 0x%x"
6846                                   "  speed_cap_mask 0x%x\n",
6847                                   bp->link_config, bp->speed_cap_mask);
6848                         return;
6849                 }
6850                 break;
6851
6852         default:
6853                 BNX2X_ERR("NVRAM config error. "
6854                           "BAD link speed link_config 0x%x\n",
6855                           bp->link_config);
6856                 bp->req_autoneg |= AUTONEG_SPEED;
6857                 bp->req_line_speed = 0;
6858                 bp->advertising = bp->supported;
6859                 break;
6860         }
6861         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d\n",
6862                        bp->req_line_speed, bp->req_duplex);
6863
6864         bp->req_flow_ctrl = (bp->link_config &
6865                              PORT_FEATURE_FLOW_CONTROL_MASK);
6866         /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
6867         switch (bp->req_flow_ctrl) {
6868         case FLOW_CTRL_AUTO:
6869                 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
6870                 if (bp->dev->mtu <= 4500) {
6871                         bp->pause_mode = PAUSE_BOTH;
6872                         bp->advertising |= (ADVERTISED_Pause |
6873                                             ADVERTISED_Asym_Pause);
6874                 } else {
6875                         bp->pause_mode = PAUSE_ASYMMETRIC;
6876                         bp->advertising |= ADVERTISED_Asym_Pause;
6877                 }
6878                 break;
6879
6880         case FLOW_CTRL_TX:
6881                 bp->pause_mode = PAUSE_ASYMMETRIC;
6882                 bp->advertising |= ADVERTISED_Asym_Pause;
6883                 break;
6884
6885         case FLOW_CTRL_RX:
6886         case FLOW_CTRL_BOTH:
6887                 bp->pause_mode = PAUSE_BOTH;
6888                 bp->advertising |= (ADVERTISED_Pause |
6889                                     ADVERTISED_Asym_Pause);
6890                 break;
6891
6892         case FLOW_CTRL_NONE:
6893         default:
6894                 bp->pause_mode = PAUSE_NONE;
6895                 bp->advertising &= ~(ADVERTISED_Pause |
6896                                      ADVERTISED_Asym_Pause);
6897                 break;
6898         }
6899         BNX2X_DEV_INFO("req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
6900              KERN_INFO "  pause_mode %d  advertising 0x%x\n",
6901                        bp->req_autoneg, bp->req_flow_ctrl,
6902                        bp->pause_mode, bp->advertising);
6903 }
6904
6905 static void bnx2x_get_hwinfo(struct bnx2x *bp)
6906 {
6907         u32 val, val2, val3, val4, id;
6908         int port = bp->port;
6909         u32 switch_cfg;
6910
6911         bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6912         BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
6913
6914         /* Get the chip revision id and number. */
6915         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6916         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6917         id = ((val & 0xffff) << 16);
6918         val = REG_RD(bp, MISC_REG_CHIP_REV);
6919         id |= ((val & 0xf) << 12);
6920         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6921         id |= ((val & 0xff) << 4);
6922         REG_RD(bp, MISC_REG_BOND_ID);
6923         id |= (val & 0xf);
6924         bp->chip_id = id;
6925         BNX2X_DEV_INFO("chip ID is %x\n", id);
6926
6927         if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
6928                 BNX2X_DEV_INFO("MCP not active\n");
6929                 nomcp = 1;
6930                 goto set_mac;
6931         }
6932
6933         val = SHMEM_RD(bp, validity_map[port]);
6934         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6935                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936                 BNX2X_ERR("MCP validity signature bad\n");
6937
6938         bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) &
6939                       DRV_MSG_SEQ_NUMBER_MASK);
6940
6941         bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6942
6943         bp->serdes_config =
6944                 SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config);
6945         bp->lane_config =
6946                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6947         bp->ext_phy_config =
6948                 SHMEM_RD(bp,
6949                          dev_info.port_hw_config[port].external_phy_config);
6950         bp->speed_cap_mask =
6951                 SHMEM_RD(bp,
6952                          dev_info.port_hw_config[port].speed_capability_mask);
6953
6954         bp->link_config =
6955                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6956
6957         BNX2X_DEV_INFO("hw_config (%08x)  serdes_config (%08x)\n"
6958              KERN_INFO "  lane_config (%08x)  ext_phy_config (%08x)\n"
6959              KERN_INFO "  speed_cap_mask (%08x)  link_config (%08x)"
6960                        "  fw_seq (%08x)\n",
6961                        bp->hw_config, bp->serdes_config, bp->lane_config,
6962                        bp->ext_phy_config, bp->speed_cap_mask,
6963                        bp->link_config, bp->fw_seq);
6964
6965         switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
6966         bnx2x_link_settings_supported(bp, switch_cfg);
6967
6968         bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
6969         /* for now disable cl73 */
6970         bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
6971         BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
6972
6973         bnx2x_link_settings_requested(bp);
6974
6975         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6976         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6977         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6978         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6979         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6980         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6981         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6982         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6983
6984         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
6985
6986
6987         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6988         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6989         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6990         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6991
6992         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6993                val, val2, val3, val4);
6994
6995         /* bc ver */
6996         if (!nomcp) {
6997                 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
6998                 BNX2X_DEV_INFO("bc_ver %X\n", val);
6999                 if (val < BNX2X_BC_VER) {
7000                         /* for now only warn
7001                          * later we might need to enforce this */
7002                         BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7003                                   " please upgrade BC\n", BNX2X_BC_VER, val);
7004                 }
7005         } else {
7006                 bp->bc_ver = 0;
7007         }
7008
7009         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7010         bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7011         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7012                        bp->flash_size, bp->flash_size);
7013
7014         return;
7015
7016 set_mac: /* only supposed to happen on emulation/FPGA */
7017         BNX2X_ERR("warning constant MAC workaround active\n");
7018         bp->dev->dev_addr[0] = 0;
7019         bp->dev->dev_addr[1] = 0x50;
7020         bp->dev->dev_addr[2] = 0xc2;
7021         bp->dev->dev_addr[3] = 0x2c;
7022         bp->dev->dev_addr[4] = 0x71;
7023         bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
7024
7025         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7026
7027 }
7028
7029 /*
7030  * ethtool service functions
7031  */
7032
7033 /* All ethtool functions called with rtnl_lock */
7034
7035 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7036 {
7037         struct bnx2x *bp = netdev_priv(dev);
7038
7039         cmd->supported = bp->supported;
7040         cmd->advertising = bp->advertising;
7041
7042         if (netif_carrier_ok(dev)) {
7043                 cmd->speed = bp->line_speed;
7044                 cmd->duplex = bp->duplex;
7045         } else {
7046                 cmd->speed = bp->req_line_speed;
7047                 cmd->duplex = bp->req_duplex;
7048         }
7049
7050         if (bp->phy_flags & PHY_XGXS_FLAG) {
7051                 cmd->port = PORT_FIBRE;
7052         } else {
7053                 cmd->port = PORT_TP;
7054         }
7055
7056         cmd->phy_address = bp->phy_addr;
7057         cmd->transceiver = XCVR_INTERNAL;
7058
7059         if (bp->req_autoneg & AUTONEG_SPEED) {
7060                 cmd->autoneg = AUTONEG_ENABLE;
7061         } else {
7062                 cmd->autoneg = AUTONEG_DISABLE;
7063         }
7064
7065         cmd->maxtxpkt = 0;
7066         cmd->maxrxpkt = 0;
7067
7068         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7069            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7070            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7071            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7072            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7073            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7074            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7075
7076         return 0;
7077 }
7078
7079 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7080 {
7081         struct bnx2x *bp = netdev_priv(dev);
7082         u32 advertising;
7083
7084         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7085            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7086            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7087            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7088            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7089            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7090            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7091
7092         switch (cmd->port) {
7093         case PORT_TP:
7094                 if (!(bp->supported & SUPPORTED_TP))
7095                         return -EINVAL;
7096
7097                 if (bp->phy_flags & PHY_XGXS_FLAG) {
7098                         bnx2x_link_reset(bp);
7099                         bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7100                         bnx2x_phy_deassert(bp);
7101                 }
7102                 break;
7103
7104         case PORT_FIBRE:
7105                 if (!(bp->supported & SUPPORTED_FIBRE))
7106                         return -EINVAL;
7107
7108                 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7109                         bnx2x_link_reset(bp);
7110                         bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7111                         bnx2x_phy_deassert(bp);
7112                 }
7113                 break;
7114
7115         default:
7116                 return -EINVAL;
7117         }
7118
7119         if (cmd->autoneg == AUTONEG_ENABLE) {
7120                 if (!(bp->supported & SUPPORTED_Autoneg))
7121                         return -EINVAL;
7122
7123                 /* advertise the requested speed and duplex if supported */
7124                 cmd->advertising &= bp->supported;
7125
7126                 bp->req_autoneg |= AUTONEG_SPEED;
7127                 bp->req_line_speed = 0;
7128                 bp->req_duplex = DUPLEX_FULL;
7129                 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7130
7131         } else { /* forced speed */
7132                 /* advertise the requested speed and duplex if supported */
7133                 switch (cmd->speed) {
7134                 case SPEED_10:
7135                         if (cmd->duplex == DUPLEX_FULL) {
7136                                 if (!(bp->supported & SUPPORTED_10baseT_Full))
7137                                         return -EINVAL;
7138
7139                                 advertising = (ADVERTISED_10baseT_Full |
7140                                                ADVERTISED_TP);
7141                         } else {
7142                                 if (!(bp->supported & SUPPORTED_10baseT_Half))
7143                                         return -EINVAL;
7144
7145                                 advertising = (ADVERTISED_10baseT_Half |
7146                                                ADVERTISED_TP);
7147                         }
7148                         break;
7149
7150                 case SPEED_100:
7151                         if (cmd->duplex == DUPLEX_FULL) {
7152                                 if (!(bp->supported &
7153                                                 SUPPORTED_100baseT_Full))
7154                                         return -EINVAL;
7155
7156                                 advertising = (ADVERTISED_100baseT_Full |
7157                                                ADVERTISED_TP);
7158                         } else {
7159                                 if (!(bp->supported &
7160                                                 SUPPORTED_100baseT_Half))
7161                                         return -EINVAL;
7162
7163                                 advertising = (ADVERTISED_100baseT_Half |
7164                                                ADVERTISED_TP);
7165                         }
7166                         break;
7167
7168                 case SPEED_1000:
7169                         if (cmd->duplex != DUPLEX_FULL)
7170                                 return -EINVAL;
7171
7172                         if (!(bp->supported & SUPPORTED_1000baseT_Full))
7173                                 return -EINVAL;
7174
7175                         advertising = (ADVERTISED_1000baseT_Full |
7176                                        ADVERTISED_TP);
7177                         break;
7178
7179                 case SPEED_2500:
7180                         if (cmd->duplex != DUPLEX_FULL)
7181                                 return -EINVAL;
7182
7183                         if (!(bp->supported & SUPPORTED_2500baseT_Full))
7184                                 return -EINVAL;
7185
7186                         advertising = (ADVERTISED_2500baseT_Full |
7187                                        ADVERTISED_TP);
7188                         break;
7189
7190                 case SPEED_10000:
7191                         if (cmd->duplex != DUPLEX_FULL)
7192                                 return -EINVAL;
7193
7194                         if (!(bp->supported & SUPPORTED_10000baseT_Full))
7195                                 return -EINVAL;
7196
7197                         advertising = (ADVERTISED_10000baseT_Full |
7198                                        ADVERTISED_FIBRE);
7199                         break;
7200
7201                 default:
7202                         return -EINVAL;
7203                 }
7204
7205                 bp->req_autoneg &= ~AUTONEG_SPEED;
7206                 bp->req_line_speed = cmd->speed;
7207                 bp->req_duplex = cmd->duplex;
7208                 bp->advertising = advertising;
7209         }
7210
7211         DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_line_speed %d\n"
7212            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7213            bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
7214            bp->advertising);
7215
7216         bnx2x_stop_stats(bp);
7217         bnx2x_link_initialize(bp);
7218
7219         return 0;
7220 }
7221
7222 static void bnx2x_get_drvinfo(struct net_device *dev,
7223                               struct ethtool_drvinfo *info)
7224 {
7225         struct bnx2x *bp = netdev_priv(dev);
7226
7227         strcpy(info->driver, DRV_MODULE_NAME);
7228         strcpy(info->version, DRV_MODULE_VERSION);
7229         snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
7230                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7231                  BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
7232                  bp->bc_ver);
7233         strcpy(info->bus_info, pci_name(bp->pdev));
7234         info->n_stats = BNX2X_NUM_STATS;
7235         info->testinfo_len = BNX2X_NUM_TESTS;
7236         info->eedump_len = bp->flash_size;
7237         info->regdump_len = 0;
7238 }
7239
7240 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7241 {
7242         struct bnx2x *bp = netdev_priv(dev);
7243
7244         if (bp->flags & NO_WOL_FLAG) {
7245                 wol->supported = 0;
7246                 wol->wolopts = 0;
7247         } else {
7248                 wol->supported = WAKE_MAGIC;
7249                 if (bp->wol)
7250                         wol->wolopts = WAKE_MAGIC;
7251                 else
7252                         wol->wolopts = 0;
7253         }
7254         memset(&wol->sopass, 0, sizeof(wol->sopass));
7255 }
7256
7257 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7258 {
7259         struct bnx2x *bp = netdev_priv(dev);
7260
7261         if (wol->wolopts & ~WAKE_MAGIC)
7262                 return -EINVAL;
7263
7264         if (wol->wolopts & WAKE_MAGIC) {
7265                 if (bp->flags & NO_WOL_FLAG)
7266                         return -EINVAL;
7267
7268                 bp->wol = 1;
7269         } else {
7270                 bp->wol = 0;
7271         }
7272         return 0;
7273 }
7274
7275 static u32 bnx2x_get_msglevel(struct net_device *dev)
7276 {
7277         struct bnx2x *bp = netdev_priv(dev);
7278
7279         return bp->msglevel;
7280 }
7281
7282 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7283 {
7284         struct bnx2x *bp = netdev_priv(dev);
7285
7286         if (capable(CAP_NET_ADMIN))
7287                 bp->msglevel = level;
7288 }
7289
7290 static int bnx2x_nway_reset(struct net_device *dev)
7291 {
7292         struct bnx2x *bp = netdev_priv(dev);
7293
7294         if (bp->state != BNX2X_STATE_OPEN) {
7295                 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7296                 return -EAGAIN;
7297         }
7298
7299         bnx2x_stop_stats(bp);
7300         bnx2x_link_initialize(bp);
7301
7302         return 0;
7303 }
7304
7305 static int bnx2x_get_eeprom_len(struct net_device *dev)
7306 {
7307         struct bnx2x *bp = netdev_priv(dev);
7308
7309         return bp->flash_size;
7310 }
7311
7312 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7313 {
7314         int port = bp->port;
7315         int count, i;
7316         u32 val = 0;
7317
7318         /* adjust timeout for emulation/FPGA */
7319         count = NVRAM_TIMEOUT_COUNT;
7320         if (CHIP_REV_IS_SLOW(bp))
7321                 count *= 100;
7322
7323         /* request access to nvram interface */
7324         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7325                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7326
7327         for (i = 0; i < count*10; i++) {
7328                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7329                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7330                         break;
7331
7332                 udelay(5);
7333         }
7334
7335         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7336                 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
7337                 return -EBUSY;
7338         }
7339
7340         return 0;
7341 }
7342
7343 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7344 {
7345         int port = bp->port;
7346         int count, i;
7347         u32 val = 0;
7348
7349         /* adjust timeout for emulation/FPGA */
7350         count = NVRAM_TIMEOUT_COUNT;
7351         if (CHIP_REV_IS_SLOW(bp))
7352                 count *= 100;
7353
7354         /* relinquish nvram interface */
7355         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7356                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7357
7358         for (i = 0; i < count*10; i++) {
7359                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7360                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7361                         break;
7362
7363                 udelay(5);
7364         }
7365
7366         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7367                 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
7368                 return -EBUSY;
7369         }
7370
7371         return 0;
7372 }
7373
7374 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7375 {
7376         u32 val;
7377
7378         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7379
7380         /* enable both bits, even on read */
7381         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7382                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7383                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7384 }
7385
7386 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7387 {
7388         u32 val;
7389
7390         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7391
7392         /* disable both bits, even after read */
7393         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7394                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7395                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7396 }
7397
7398 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7399                                   u32 cmd_flags)
7400 {
7401         int rc;
7402         int count, i;
7403         u32 val;
7404
7405         /* build the command word */
7406         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7407
7408         /* need to clear DONE bit separately */
7409         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7410
7411         /* address of the NVRAM to read from */
7412         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7413                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7414
7415         /* issue a read command */
7416         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7417
7418         /* adjust timeout for emulation/FPGA */
7419         count = NVRAM_TIMEOUT_COUNT;
7420         if (CHIP_REV_IS_SLOW(bp))
7421                 count *= 100;
7422
7423         /* wait for completion */
7424         *ret_val = 0;
7425         rc = -EBUSY;
7426         for (i = 0; i < count; i++) {
7427                 udelay(5);
7428                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7429
7430                 if (val & MCPR_NVM_COMMAND_DONE) {
7431                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7432                         DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7433                         /* we read nvram data in cpu order
7434                          * but ethtool sees it as an array of bytes
7435                          * converting to big-endian will do the work */
7436                         val = cpu_to_be32(val);
7437                         *ret_val = val;
7438                         rc = 0;
7439                         break;
7440                 }
7441         }
7442
7443         return rc;
7444 }
7445
7446 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7447                             int buf_size)
7448 {
7449         int rc;
7450         u32 cmd_flags;
7451         u32 val;
7452
7453         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7454                 DP(NETIF_MSG_NVM,
7455                    "Invalid paramter: offset 0x%x  buf_size 0x%x\n",
7456                    offset, buf_size);
7457                 return -EINVAL;
7458         }
7459
7460         if (offset + buf_size > bp->flash_size) {
7461                 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
7462                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7463                    offset, buf_size, bp->flash_size);
7464                 return -EINVAL;
7465         }
7466
7467         /* request access to nvram interface */
7468         rc = bnx2x_acquire_nvram_lock(bp);
7469         if (rc)
7470                 return rc;
7471
7472         /* enable access to nvram interface */
7473         bnx2x_enable_nvram_access(bp);
7474
7475         /* read the first word(s) */
7476         cmd_flags = MCPR_NVM_COMMAND_FIRST;
7477         while ((buf_size > sizeof(u32)) && (rc == 0)) {
7478                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7479                 memcpy(ret_buf, &val, 4);
7480
7481                 /* advance to the next dword */
7482                 offset += sizeof(u32);
7483                 ret_buf += sizeof(u32);
7484                 buf_size -= sizeof(u32);
7485                 cmd_flags = 0;
7486         }
7487
7488         if (rc == 0) {
7489                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7490                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7491                 memcpy(ret_buf, &val, 4);
7492         }
7493
7494         /* disable access to nvram interface */
7495         bnx2x_disable_nvram_access(bp);
7496         bnx2x_release_nvram_lock(bp);
7497
7498         return rc;
7499 }
7500
7501 static int bnx2x_get_eeprom(struct net_device *dev,
7502                             struct ethtool_eeprom *eeprom, u8 *eebuf)
7503 {
7504         struct bnx2x *bp = netdev_priv(dev);
7505         int rc;
7506
7507         DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7508            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
7509            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7510            eeprom->len, eeprom->len);
7511
7512         /* parameters already validated in ethtool_get_eeprom */
7513
7514         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7515
7516         return rc;
7517 }
7518
7519 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7520                                    u32 cmd_flags)
7521 {
7522         int rc;
7523         int count, i;
7524
7525         /* build the command word */
7526         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7527
7528         /* need to clear DONE bit separately */
7529         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7530
7531         /* write the data */
7532         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7533
7534         /* address of the NVRAM to write to */
7535         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7536                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7537
7538         /* issue the write command */
7539         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7540
7541         /* adjust timeout for emulation/FPGA */
7542         count = NVRAM_TIMEOUT_COUNT;
7543         if (CHIP_REV_IS_SLOW(bp))
7544                 count *= 100;
7545
7546         /* wait for completion */
7547         rc = -EBUSY;
7548         for (i = 0; i < count; i++) {
7549                 udelay(5);
7550                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7551                 if (val & MCPR_NVM_COMMAND_DONE) {
7552                         rc = 0;
7553                         break;
7554                 }
7555         }
7556
7557         return rc;
7558 }
7559
7560 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
7561
7562 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7563                               int buf_size)
7564 {
7565         int rc;
7566         u32 cmd_flags;
7567         u32 align_offset;
7568         u32 val;
7569
7570         if (offset + buf_size > bp->flash_size) {
7571                 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
7572                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7573                    offset, buf_size, bp->flash_size);
7574                 return -EINVAL;
7575         }
7576
7577         /* request access to nvram interface */
7578         rc = bnx2x_acquire_nvram_lock(bp);
7579         if (rc)
7580                 return rc;
7581
7582         /* enable access to nvram interface */
7583         bnx2x_enable_nvram_access(bp);
7584
7585         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
7586         align_offset = (offset & ~0x03);
7587         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
7588
7589         if (rc == 0) {
7590                 val &= ~(0xff << BYTE_OFFSET(offset));
7591                 val |= (*data_buf << BYTE_OFFSET(offset));
7592
7593                 /* nvram data is returned as an array of bytes
7594                  * convert it back to cpu order */
7595                 val = be32_to_cpu(val);
7596
7597                 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7598
7599                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
7600                                              cmd_flags);
7601         }
7602
7603         /* disable access to nvram interface */
7604         bnx2x_disable_nvram_access(bp);
7605         bnx2x_release_nvram_lock(bp);
7606
7607         return rc;
7608 }
7609
7610 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7611                              int buf_size)
7612 {
7613         int rc;
7614         u32 cmd_flags;
7615         u32 val;
7616         u32 written_so_far;
7617
7618         if (buf_size == 1) {    /* ethtool */
7619                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
7620         }
7621
7622         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7623                 DP(NETIF_MSG_NVM,
7624                    "Invalid paramter: offset 0x%x  buf_size 0x%x\n",
7625                    offset, buf_size);
7626                 return -EINVAL;
7627         }
7628
7629         if (offset + buf_size > bp->flash_size) {
7630                 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +"
7631                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7632                    offset, buf_size, bp->flash_size);
7633                 return -EINVAL;
7634         }
7635
7636         /* request access to nvram interface */
7637         rc = bnx2x_acquire_nvram_lock(bp);
7638         if (rc)
7639                 return rc;
7640
7641         /* enable access to nvram interface */
7642         bnx2x_enable_nvram_access(bp);
7643
7644         written_so_far = 0;
7645         cmd_flags = MCPR_NVM_COMMAND_FIRST;
7646         while ((written_so_far < buf_size) && (rc == 0)) {
7647                 if (written_so_far == (buf_size - sizeof(u32)))
7648                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
7649                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
7650                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
7651                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
7652                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
7653
7654                 memcpy(&val, data_buf, 4);
7655                 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7656
7657                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
7658
7659                 /* advance to the next dword */
7660                 offset += sizeof(u32);
7661                 data_buf += sizeof(u32);
7662                 written_so_far += sizeof(u32);
7663                 cmd_flags = 0;
7664         }
7665
7666         /* disable access to nvram interface */
7667         bnx2x_disable_nvram_access(bp);
7668         bnx2x_release_nvram_lock(bp);
7669
7670         return rc;
7671 }
7672
7673 static int bnx2x_set_eeprom(struct net_device *dev,
7674                             struct ethtool_eeprom *eeprom, u8 *eebuf)
7675 {
7676         struct bnx2x *bp = netdev_priv(dev);
7677         int rc;
7678
7679         DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7680            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
7681            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7682            eeprom->len, eeprom->len);
7683
7684         /* parameters already validated in ethtool_set_eeprom */
7685
7686         rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7687
7688         return rc;
7689 }
7690
7691 static int bnx2x_get_coalesce(struct net_device *dev,
7692                               struct ethtool_coalesce *coal)
7693 {
7694         struct bnx2x *bp = netdev_priv(dev);
7695
7696         memset(coal, 0, sizeof(struct ethtool_coalesce));
7697
7698         coal->rx_coalesce_usecs = bp->rx_ticks;
7699         coal->tx_coalesce_usecs = bp->tx_ticks;
7700         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7701
7702         return 0;
7703 }
7704
7705 static int bnx2x_set_coalesce(struct net_device *dev,
7706                               struct ethtool_coalesce *coal)
7707 {
7708         struct bnx2x *bp = netdev_priv(dev);
7709
7710         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7711         if (bp->rx_ticks > 3000)
7712                 bp->rx_ticks = 3000;
7713
7714         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7715         if (bp->tx_ticks > 0x3000)
7716                 bp->tx_ticks = 0x3000;
7717
7718         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7719         if (bp->stats_ticks > 0xffff00)
7720                 bp->stats_ticks = 0xffff00;
7721         bp->stats_ticks &= 0xffff00;
7722
7723         if (netif_running(bp->dev))
7724                 bnx2x_update_coalesce(bp);
7725
7726         return 0;
7727 }
7728
7729 static void bnx2x_get_ringparam(struct net_device *dev,
7730                                 struct ethtool_ringparam *ering)
7731 {
7732         struct bnx2x *bp = netdev_priv(dev);
7733
7734         ering->rx_max_pending = MAX_RX_AVAIL;
7735         ering->rx_mini_max_pending = 0;
7736         ering->rx_jumbo_max_pending = 0;
7737
7738         ering->rx_pending = bp->rx_ring_size;
7739         ering->rx_mini_pending = 0;
7740         ering->rx_jumbo_pending = 0;
7741
7742         ering->tx_max_pending = MAX_TX_AVAIL;
7743         ering->tx_pending = bp->tx_ring_size;
7744 }
7745
7746 static int bnx2x_set_ringparam(struct net_device *dev,
7747                                struct ethtool_ringparam *ering)
7748 {
7749         struct bnx2x *bp = netdev_priv(dev);
7750
7751         if ((ering->rx_pending > MAX_RX_AVAIL) ||
7752             (ering->tx_pending > MAX_TX_AVAIL) ||
7753             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
7754                 return -EINVAL;
7755
7756         bp->rx_ring_size = ering->rx_pending;
7757         bp->tx_ring_size = ering->tx_pending;
7758
7759         if (netif_running(bp->dev)) {
7760                 bnx2x_nic_unload(bp, 0);
7761                 bnx2x_nic_load(bp, 0);
7762         }
7763
7764         return 0;
7765 }
7766
7767 static void bnx2x_get_pauseparam(struct net_device *dev,
7768                                  struct ethtool_pauseparam *epause)
7769 {
7770         struct bnx2x *bp = netdev_priv(dev);
7771
7772         epause->autoneg =
7773                 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
7774         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
7775         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
7776
7777         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7778            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
7779            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7780 }
7781
7782 static int bnx2x_set_pauseparam(struct net_device *dev,
7783                                 struct ethtool_pauseparam *epause)
7784 {
7785         struct bnx2x *bp = netdev_priv(dev);
7786
7787         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7788            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
7789            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7790
7791         bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7792         if (epause->autoneg) {
7793                 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7794                 if (bp->dev->mtu <= 4500) {
7795                         bp->pause_mode = PAUSE_BOTH;
7796                         bp->advertising |= (ADVERTISED_Pause |
7797                                             ADVERTISED_Asym_Pause);
7798                 } else {
7799                         bp->pause_mode = PAUSE_ASYMMETRIC;
7800                         bp->advertising |= ADVERTISED_Asym_Pause;
7801                 }
7802
7803         } else {
7804                 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
7805
7806                 if (epause->rx_pause)
7807                         bp->req_flow_ctrl |= FLOW_CTRL_RX;
7808                 if (epause->tx_pause)
7809                         bp->req_flow_ctrl |= FLOW_CTRL_TX;
7810
7811                 switch (bp->req_flow_ctrl) {
7812                 case FLOW_CTRL_AUTO:
7813                         bp->req_flow_ctrl = FLOW_CTRL_NONE;
7814                         bp->pause_mode = PAUSE_NONE;
7815                         bp->advertising &= ~(ADVERTISED_Pause |
7816                                              ADVERTISED_Asym_Pause);
7817                         break;
7818
7819                 case FLOW_CTRL_TX:
7820                         bp->pause_mode = PAUSE_ASYMMETRIC;
7821                         bp->advertising |= ADVERTISED_Asym_Pause;
7822                         break;
7823
7824                 case FLOW_CTRL_RX:
7825                 case FLOW_CTRL_BOTH:
7826                         bp->pause_mode = PAUSE_BOTH;
7827                         bp->advertising |= (ADVERTISED_Pause |
7828                                             ADVERTISED_Asym_Pause);
7829                         break;
7830                 }
7831         }
7832
7833         DP(NETIF_MSG_LINK, "req_autoneg 0x%x  req_flow_ctrl 0x%x\n"
7834            DP_LEVEL "  pause_mode %d  advertising 0x%x\n",
7835            bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
7836            bp->advertising);
7837
7838         bnx2x_stop_stats(bp);
7839         bnx2x_link_initialize(bp);
7840
7841         return 0;
7842 }
7843
7844 static u32 bnx2x_get_rx_csum(struct net_device *dev)
7845 {
7846         struct bnx2x *bp = netdev_priv(dev);
7847
7848         return bp->rx_csum;
7849 }
7850
7851 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
7852 {
7853         struct bnx2x *bp = netdev_priv(dev);
7854
7855         bp->rx_csum = data;
7856         return 0;
7857 }
7858
7859 static int bnx2x_set_tso(struct net_device *dev, u32 data)
7860 {
7861         if (data)
7862                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7863         else
7864                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
7865         return 0;
7866 }
7867
7868 static struct {
7869         char string[ETH_GSTRING_LEN];
7870 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
7871         { "MC Errors  (online)" }
7872 };
7873
7874 static int bnx2x_self_test_count(struct net_device *dev)
7875 {
7876         return BNX2X_NUM_TESTS;
7877 }
7878
7879 static void bnx2x_self_test(struct net_device *dev,
7880                             struct ethtool_test *etest, u64 *buf)
7881 {
7882         struct bnx2x *bp = netdev_priv(dev);
7883         int stats_state;
7884
7885         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
7886
7887         if (bp->state != BNX2X_STATE_OPEN) {
7888                 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7889                 return;
7890         }
7891
7892         stats_state = bp->stats_state;
7893         bnx2x_stop_stats(bp);
7894
7895         if (bnx2x_mc_assert(bp) != 0) {
7896                 buf[0] = 1;
7897                 etest->flags |= ETH_TEST_FL_FAILED;
7898         }
7899
7900 #ifdef BNX2X_EXTRA_DEBUG
7901         bnx2x_panic_dump(bp);
7902 #endif
7903         bp->stats_state = stats_state;
7904 }
7905
7906 static struct {
7907         char string[ETH_GSTRING_LEN];
7908 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
7909         { "rx_bytes"},                           /*  0 */
7910         { "rx_error_bytes"},                     /*  1 */
7911         { "tx_bytes"},                           /*  2 */
7912         { "tx_error_bytes"},                     /*  3 */
7913         { "rx_ucast_packets"},                   /*  4 */
7914         { "rx_mcast_packets"},                   /*  5 */
7915         { "rx_bcast_packets"},                   /*  6 */
7916         { "tx_ucast_packets"},                   /*  7 */
7917         { "tx_mcast_packets"},                   /*  8 */
7918         { "tx_bcast_packets"},                   /*  9 */
7919         { "tx_mac_errors"},                      /* 10 */
7920         { "tx_carrier_errors"},                  /* 11 */
7921         { "rx_crc_errors"},                      /* 12 */
7922         { "rx_align_errors"},                    /* 13 */
7923         { "tx_single_collisions"},               /* 14 */
7924         { "tx_multi_collisions"},                /* 15 */
7925         { "tx_deferred"},                        /* 16 */
7926         { "tx_excess_collisions"},               /* 17 */
7927         { "tx_late_collisions"},                 /* 18 */
7928         { "tx_total_collisions"},                /* 19 */
7929         { "rx_fragments"},                       /* 20 */
7930         { "rx_jabbers"},                         /* 21 */
7931         { "rx_undersize_packets"},               /* 22 */
7932         { "rx_oversize_packets"},                /* 23 */
7933         { "rx_xon_frames"},                      /* 24 */
7934         { "rx_xoff_frames"},                     /* 25 */
7935         { "tx_xon_frames"},                      /* 26 */
7936         { "tx_xoff_frames"},                     /* 27 */
7937         { "rx_mac_ctrl_frames"},                 /* 28 */
7938         { "rx_filtered_packets"},                /* 29 */
7939         { "rx_discards"},                        /* 30 */
7940 };
7941
7942 #define STATS_OFFSET32(offset_name) \
7943         (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7944
7945 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
7946         STATS_OFFSET32(total_bytes_received_hi),                     /*  0 */
7947         STATS_OFFSET32(stat_IfHCInBadOctets_hi),                     /*  1 */
7948         STATS_OFFSET32(total_bytes_transmitted_hi),                  /*  2 */
7949         STATS_OFFSET32(stat_IfHCOutBadOctets_hi),                    /*  3 */
7950         STATS_OFFSET32(total_unicast_packets_received_hi),           /*  4 */
7951         STATS_OFFSET32(total_multicast_packets_received_hi),         /*  5 */
7952         STATS_OFFSET32(total_broadcast_packets_received_hi),         /*  6 */
7953         STATS_OFFSET32(total_unicast_packets_transmitted_hi),        /*  7 */
7954         STATS_OFFSET32(total_multicast_packets_transmitted_hi),      /*  8 */
7955         STATS_OFFSET32(total_broadcast_packets_transmitted_hi),      /*  9 */
7956         STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors),     /* 10 */
7957         STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),            /* 11 */
7958         STATS_OFFSET32(crc_receive_errors),                          /* 12 */
7959         STATS_OFFSET32(alignment_errors),                            /* 13 */
7960         STATS_OFFSET32(single_collision_transmit_frames),            /* 14 */
7961         STATS_OFFSET32(multiple_collision_transmit_frames),          /* 15 */
7962         STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),         /* 16 */
7963         STATS_OFFSET32(excessive_collision_frames),                  /* 17 */
7964         STATS_OFFSET32(late_collision_frames),                       /* 18 */
7965         STATS_OFFSET32(number_of_bugs_found_in_stats_spec),          /* 19 */
7966         STATS_OFFSET32(runt_packets_received),                       /* 20 */
7967         STATS_OFFSET32(jabber_packets_received),                     /* 21 */
7968         STATS_OFFSET32(error_runt_packets_received),                 /* 22 */
7969         STATS_OFFSET32(error_jabber_packets_received),               /* 23 */
7970         STATS_OFFSET32(pause_xon_frames_received),                   /* 24 */
7971         STATS_OFFSET32(pause_xoff_frames_received),                  /* 25 */
7972         STATS_OFFSET32(pause_xon_frames_transmitted),                /* 26 */
7973         STATS_OFFSET32(pause_xoff_frames_transmitted),               /* 27 */
7974         STATS_OFFSET32(control_frames_received),                     /* 28 */
7975         STATS_OFFSET32(mac_filter_discard),                          /* 29 */
7976         STATS_OFFSET32(no_buff_discard),                             /* 30 */
7977 };
7978
7979 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7980         8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7981         4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7982         4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
7983         4,
7984 };
7985
7986 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7987 {
7988         switch (stringset) {
7989         case ETH_SS_STATS:
7990                 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
7991                 break;
7992
7993         case ETH_SS_TEST:
7994                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
7995                 break;
7996         }
7997 }
7998
7999 static int bnx2x_get_stats_count(struct net_device *dev)
8000 {
8001         return BNX2X_NUM_STATS;
8002 }
8003
8004 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8005                                     struct ethtool_stats *stats, u64 *buf)
8006 {
8007         struct bnx2x *bp = netdev_priv(dev);
8008         u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8009         int i;
8010
8011         for (i = 0; i < BNX2X_NUM_STATS; i++) {
8012                 if (bnx2x_stats_len_arr[i] == 0) {
8013                         /* skip this counter */
8014                         buf[i] = 0;
8015                         continue;
8016                 }
8017                 if (!hw_stats) {
8018                         buf[i] = 0;
8019                         continue;
8020                 }
8021                 if (bnx2x_stats_len_arr[i] == 4) {
8022                         /* 4-byte counter */
8023                        buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8024                         continue;
8025                 }
8026                 /* 8-byte counter */
8027                 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8028                                  *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8029         }
8030 }
8031
8032 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8033 {
8034         struct bnx2x *bp = netdev_priv(dev);
8035         int i;
8036
8037         if (data == 0)
8038                 data = 2;
8039
8040         for (i = 0; i < (data * 2); i++) {
8041                 if ((i % 2) == 0) {
8042                         bnx2x_leds_set(bp, SPEED_1000);
8043                 } else {
8044                         bnx2x_leds_unset(bp);
8045                 }
8046                 msleep_interruptible(500);
8047                 if (signal_pending(current))
8048                         break;
8049         }
8050
8051         if (bp->link_up)
8052                 bnx2x_leds_set(bp, bp->line_speed);
8053
8054         return 0;
8055 }
8056
8057 static struct ethtool_ops bnx2x_ethtool_ops = {
8058         .get_settings           = bnx2x_get_settings,
8059         .set_settings           = bnx2x_set_settings,
8060         .get_drvinfo            = bnx2x_get_drvinfo,
8061         .get_wol                = bnx2x_get_wol,
8062         .set_wol                = bnx2x_set_wol,
8063         .get_msglevel           = bnx2x_get_msglevel,
8064         .set_msglevel           = bnx2x_set_msglevel,
8065         .nway_reset             = bnx2x_nway_reset,
8066         .get_link               = ethtool_op_get_link,
8067         .get_eeprom_len         = bnx2x_get_eeprom_len,
8068         .get_eeprom             = bnx2x_get_eeprom,
8069         .set_eeprom             = bnx2x_set_eeprom,
8070         .get_coalesce           = bnx2x_get_coalesce,
8071         .set_coalesce           = bnx2x_set_coalesce,
8072         .get_ringparam          = bnx2x_get_ringparam,
8073         .set_ringparam          = bnx2x_set_ringparam,
8074         .get_pauseparam         = bnx2x_get_pauseparam,
8075         .set_pauseparam         = bnx2x_set_pauseparam,
8076         .get_rx_csum            = bnx2x_get_rx_csum,
8077         .set_rx_csum            = bnx2x_set_rx_csum,
8078         .get_tx_csum            = ethtool_op_get_tx_csum,
8079         .set_tx_csum            = ethtool_op_set_tx_csum,
8080         .get_sg                 = ethtool_op_get_sg,
8081         .set_sg                 = ethtool_op_set_sg,
8082         .get_tso                = ethtool_op_get_tso,
8083         .set_tso                = bnx2x_set_tso,
8084         .self_test_count        = bnx2x_self_test_count,
8085         .self_test              = bnx2x_self_test,
8086         .get_strings            = bnx2x_get_strings,
8087         .phys_id                = bnx2x_phys_id,
8088         .get_stats_count        = bnx2x_get_stats_count,
8089         .get_ethtool_stats      = bnx2x_get_ethtool_stats
8090 };
8091
8092 /* end of ethtool_ops */
8093
8094 /****************************************************************************
8095 * General service functions
8096 ****************************************************************************/
8097
8098 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8099 {
8100         u16 pmcsr;
8101
8102         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8103
8104         switch (state) {
8105         case PCI_D0:
8106                 pci_write_config_word(bp->pdev,
8107                                       bp->pm_cap + PCI_PM_CTRL,
8108                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8109                                        PCI_PM_CTRL_PME_STATUS));
8110
8111                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8112                 /* delay required during transition out of D3hot */
8113                         msleep(20);
8114                 break;
8115
8116         case PCI_D3hot:
8117                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8118                 pmcsr |= 3;
8119
8120                 if (bp->wol)
8121                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8122
8123                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8124                                       pmcsr);
8125
8126                 /* No more memory access after this point until
8127                 * device is brought back to D0.
8128                 */
8129                 break;
8130
8131         default:
8132                 return -EINVAL;
8133         }
8134         return 0;
8135 }
8136
8137 /*
8138  * net_device service functions
8139  */
8140
8141 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
8142  * from set_multicast.
8143  */
8144 static void bnx2x_set_rx_mode(struct net_device *dev)
8145 {
8146         struct bnx2x *bp = netdev_priv(dev);
8147         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8148
8149         DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8150
8151         if (dev->flags & IFF_PROMISC)
8152                 rx_mode = BNX2X_RX_MODE_PROMISC;
8153
8154         else if ((dev->flags & IFF_ALLMULTI) ||
8155                  (dev->mc_count > BNX2X_MAX_MULTICAST))
8156                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8157
8158         else { /* some multicasts */
8159                 int i, old, offset;
8160                 struct dev_mc_list *mclist;
8161                 struct mac_configuration_cmd *config =
8162                                                 bnx2x_sp(bp, mcast_config);
8163
8164                 for (i = 0, mclist = dev->mc_list;
8165                      mclist && (i < dev->mc_count);
8166                      i++, mclist = mclist->next) {
8167
8168                         config->config_table[i].cam_entry.msb_mac_addr =
8169                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
8170                         config->config_table[i].cam_entry.middle_mac_addr =
8171                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
8172                         config->config_table[i].cam_entry.lsb_mac_addr =
8173                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
8174                         config->config_table[i].cam_entry.flags =
8175                                                         cpu_to_le16(bp->port);
8176                         config->config_table[i].target_table_entry.flags = 0;
8177                         config->config_table[i].target_table_entry.
8178                                                                 client_id = 0;
8179                         config->config_table[i].target_table_entry.
8180                                                                 vlan_id = 0;
8181
8182                         DP(NETIF_MSG_IFUP,
8183                            "setting MCAST[%d] (%04x:%04x:%04x)\n",
8184                            i, config->config_table[i].cam_entry.msb_mac_addr,
8185                            config->config_table[i].cam_entry.middle_mac_addr,
8186                            config->config_table[i].cam_entry.lsb_mac_addr);
8187                 }
8188                 old = config->hdr.length_6b;
8189                 if (old > i) {
8190                         for (; i < old; i++) {
8191                                 if (CAM_IS_INVALID(config->config_table[i])) {
8192                                         i--; /* already invalidated */
8193                                         break;
8194                                 }
8195                                 /* invalidate */
8196                                 CAM_INVALIDATE(config->config_table[i]);
8197                         }
8198                 }
8199
8200                 if (CHIP_REV_IS_SLOW(bp))
8201                         offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
8202                 else
8203                         offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
8204
8205                 config->hdr.length_6b = i;
8206                 config->hdr.offset = offset;
8207                 config->hdr.reserved0 = 0;
8208                 config->hdr.reserved1 = 0;
8209
8210                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8211                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8212                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8213         }
8214
8215         bp->rx_mode = rx_mode;
8216         bnx2x_set_storm_rx_mode(bp);
8217 }
8218
8219 static int bnx2x_poll(struct napi_struct *napi, int budget)
8220 {
8221         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
8222                                                  napi);
8223         struct bnx2x *bp = fp->bp;
8224         int work_done = 0;
8225
8226 #ifdef BNX2X_STOP_ON_ERROR
8227         if (unlikely(bp->panic))
8228                 goto out_panic;
8229 #endif
8230
8231         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
8232         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
8233         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
8234
8235         bnx2x_update_fpsb_idx(fp);
8236
8237         if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
8238                 bnx2x_tx_int(fp, budget);
8239
8240
8241         if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
8242                 work_done = bnx2x_rx_int(fp, budget);
8243
8244
8245         rmb(); /* bnx2x_has_work() reads the status block */
8246
8247         /* must not complete if we consumed full budget */
8248         if ((work_done < budget) && !bnx2x_has_work(fp)) {
8249
8250 #ifdef BNX2X_STOP_ON_ERROR
8251 out_panic:
8252 #endif
8253                 netif_rx_complete(bp->dev, napi);
8254
8255                 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
8256                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
8257                 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
8258                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
8259         }
8260
8261         return work_done;
8262 }
8263
8264 /* Called with netif_tx_lock.
8265  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
8266  * netif_wake_queue().
8267  */
8268 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8269 {
8270         struct bnx2x *bp = netdev_priv(dev);
8271         struct bnx2x_fastpath *fp;
8272         struct sw_tx_bd *tx_buf;
8273         struct eth_tx_bd *tx_bd;
8274         struct eth_tx_parse_bd *pbd = NULL;
8275         u16 pkt_prod, bd_prod;
8276         int nbd, fp_index = 0;
8277         dma_addr_t mapping;
8278
8279 #ifdef BNX2X_STOP_ON_ERROR
8280         if (unlikely(bp->panic))
8281                 return NETDEV_TX_BUSY;
8282 #endif
8283
8284         fp_index = smp_processor_id() % (bp->num_queues);
8285
8286         fp = &bp->fp[fp_index];
8287         if (unlikely(bnx2x_tx_avail(bp->fp) <
8288                                         (skb_shinfo(skb)->nr_frags + 3))) {
8289                 bp->slowpath->eth_stats.driver_xoff++,
8290                 netif_stop_queue(dev);
8291                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
8292                 return NETDEV_TX_BUSY;
8293         }
8294
8295         /*
8296         This is a bit ugly. First we use one BD which we mark as start,
8297         then for TSO or xsum we have a parsing info BD,
8298         and only then we have the rest of the TSO bds.
8299         (don't forget to mark the last one as last,
8300         and to unmap only AFTER you write to the BD ...)
8301         I would like to thank DovH for this mess.
8302         */
8303
8304         pkt_prod = fp->tx_pkt_prod++;
8305         bd_prod = fp->tx_bd_prod;
8306         bd_prod = TX_BD(bd_prod);
8307
8308         /* get a tx_buff and first bd */
8309         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8310         tx_bd = &fp->tx_desc_ring[bd_prod];
8311
8312         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
8313         tx_bd->general_data = (UNICAST_ADDRESS <<
8314                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8315         tx_bd->general_data |= 1; /* header nbd */
8316
8317         /* remeber the first bd of the packet */
8318         tx_buf->first_bd = bd_prod;
8319
8320         DP(NETIF_MSG_TX_QUEUED,
8321            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
8322            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
8323
8324         if (skb->ip_summed == CHECKSUM_PARTIAL) {
8325                 struct iphdr *iph = ip_hdr(skb);
8326                 u8 len;
8327
8328                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
8329
8330                 /* turn on parsing and get a bd */
8331                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8332                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
8333                 len = ((u8 *)iph - (u8 *)skb->data) / 2;
8334
8335                 /* for now NS flag is not used in Linux */
8336                 pbd->global_data = (len |
8337                                     ((skb->protocol == ETH_P_8021Q) <<
8338                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8339                 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8340                 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
8341                 if (iph->protocol == IPPROTO_TCP) {
8342                         struct tcphdr *th = tcp_hdr(skb);
8343
8344                         tx_bd->bd_flags.as_bitfield |=
8345                                                 ETH_TX_BD_FLAGS_TCP_CSUM;
8346                         pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
8347                         pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8348                         pbd->tcp_pseudo_csum = swab16(th->check);
8349
8350                 } else if (iph->protocol == IPPROTO_UDP) {
8351                         struct udphdr *uh = udp_hdr(skb);
8352
8353                         tx_bd->bd_flags.as_bitfield |=
8354                                                 ETH_TX_BD_FLAGS_TCP_CSUM;
8355                         pbd->total_hlen += cpu_to_le16(4);
8356                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
8357                         pbd->cs_offset = 5; /* 10 >> 1 */
8358                         pbd->tcp_pseudo_csum = 0;
8359                         /* HW bug: we need to subtract 10 bytes before the
8360                          * UDP header from the csum
8361                          */
8362                         uh->check = (u16) ~csum_fold(csum_sub(uh->check,
8363                                 csum_partial(((u8 *)(uh)-10), 10, 0)));
8364                 }
8365         }
8366
8367         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
8368                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
8369                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
8370         } else {
8371                 tx_bd->vlan = cpu_to_le16(pkt_prod);
8372         }
8373
8374         mapping = pci_map_single(bp->pdev, skb->data,
8375                                  skb->len, PCI_DMA_TODEVICE);
8376
8377         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8378         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8379         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
8380         tx_bd->nbd = cpu_to_le16(nbd);
8381         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8382
8383         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
8384            "  nbytes %d  flags %x  vlan %u\n",
8385            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
8386            tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
8387
8388         if (skb_shinfo(skb)->gso_size &&
8389             (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8390                 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
8391
8392                 DP(NETIF_MSG_TX_QUEUED,
8393                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
8394                    skb->len, hlen, skb_headlen(skb),
8395                    skb_shinfo(skb)->gso_size);
8396
8397                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
8398
8399                 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
8400                         /* we split the first bd into headers and data bds
8401                          * to ease the pain of our fellow micocode engineers
8402                          * we use one mapping for both bds
8403                          * So far this has only been observed to happen
8404                          * in Other Operating Systems(TM)
8405                          */
8406
8407                         /* first fix first bd */
8408                         nbd++;
8409                         tx_bd->nbd = cpu_to_le16(nbd);
8410                         tx_bd->nbytes = cpu_to_le16(hlen);
8411
8412                         /* we only print this as an error
8413                          * because we don't think this will ever happen.
8414                          */
8415                         BNX2X_ERR("TSO split header size is %d (%x:%x)"
8416                                   "  nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
8417                                   tx_bd->addr_lo, tx_bd->nbd);
8418
8419                         /* now get a new data bd
8420                          * (after the pbd) and fill it */
8421                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8422                         tx_bd = &fp->tx_desc_ring[bd_prod];
8423
8424                         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8425                         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
8426                         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
8427                         tx_bd->vlan = cpu_to_le16(pkt_prod);
8428                         /* this marks the bd
8429                          * as one that has no individual mapping
8430                          * the FW ignors this flag in a bd not maked start
8431                          */
8432                         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8433                         DP(NETIF_MSG_TX_QUEUED,
8434                            "TSO split data size is %d (%x:%x)\n",
8435                            tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
8436                 }
8437
8438                 if (!pbd) {
8439                         /* supposed to be unreached
8440                          * (and therefore not handled properly...)
8441                          */
8442                         BNX2X_ERR("LSO with no PBD\n");
8443                         BUG();
8444                 }
8445
8446                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
8447                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
8448                 pbd->ip_id = swab16(ip_hdr(skb)->id);
8449                 pbd->tcp_pseudo_csum =
8450                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
8451                                                           ip_hdr(skb)->daddr,
8452                                                           0, IPPROTO_TCP, 0));
8453                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
8454         }
8455
8456         {
8457                 int i;
8458
8459                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8460                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8461
8462                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8463                         tx_bd = &fp->tx_desc_ring[bd_prod];
8464
8465                         mapping = pci_map_page(bp->pdev, frag->page,
8466                                                frag->page_offset,
8467                                                frag->size, PCI_DMA_TODEVICE);
8468
8469                         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8470                         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8471                         tx_bd->nbytes = cpu_to_le16(frag->size);
8472                         tx_bd->vlan = cpu_to_le16(pkt_prod);
8473                         tx_bd->bd_flags.as_bitfield = 0;
8474                         DP(NETIF_MSG_TX_QUEUED, "frag %d  bd @%p"
8475                            "  addr (%x:%x)  nbytes %d  flags %x\n",
8476                            i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8477                            tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
8478                 } /* for */
8479         }
8480
8481         /* now at last mark the bd as the last bd */
8482         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
8483
8484         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
8485            tx_bd, tx_bd->bd_flags.as_bitfield);
8486
8487         tx_buf->skb = skb;
8488
8489         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8490
8491         /* now send a tx doorbell, counting the next bd
8492          * if the packet contains or ends with it
8493          */
8494         if (TX_BD_POFF(bd_prod) < nbd)
8495                 nbd++;
8496
8497         if (pbd)
8498                 DP(NETIF_MSG_TX_QUEUED,
8499                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
8500                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
8501                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
8502                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
8503                    pbd->tcp_send_seq, pbd->total_hlen);
8504
8505         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u  bd %d\n", nbd, bd_prod);
8506
8507         fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
8508         mb(); /* FW restriction: must not reorder writing nbd and packets */
8509         fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
8510         DOORBELL(bp, fp_index, 0);
8511
8512         mmiowb();
8513
8514         fp->tx_bd_prod = bd_prod;
8515         dev->trans_start = jiffies;
8516
8517         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
8518                 netif_stop_queue(dev);
8519                 bp->slowpath->eth_stats.driver_xoff++;
8520                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
8521                         netif_wake_queue(dev);
8522         }
8523         fp->tx_pkt++;
8524
8525         return NETDEV_TX_OK;
8526 }
8527
8528 static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
8529 {
8530         return &dev->stats;
8531 }
8532
8533 /* Called with rtnl_lock */
8534 static int bnx2x_open(struct net_device *dev)
8535 {
8536         struct bnx2x *bp = netdev_priv(dev);
8537
8538         bnx2x_set_power_state(bp, PCI_D0);
8539
8540         return bnx2x_nic_load(bp, 1);
8541 }
8542
8543 /* Called with rtnl_lock */
8544 static int bnx2x_close(struct net_device *dev)
8545 {
8546         int rc;
8547         struct bnx2x *bp = netdev_priv(dev);
8548
8549         /* Unload the driver, release IRQs */
8550         rc = bnx2x_nic_unload(bp, 1);
8551         if (rc) {
8552                 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
8553                 return rc;
8554         }
8555         bnx2x_set_power_state(bp, PCI_D3hot);
8556
8557         return 0;
8558 }
8559
8560 /* Called with rtnl_lock */
8561 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
8562 {
8563         struct sockaddr *addr = p;
8564         struct bnx2x *bp = netdev_priv(dev);
8565
8566         if (!is_valid_ether_addr(addr->sa_data))
8567                 return -EINVAL;
8568
8569         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8570         if (netif_running(dev))
8571                 bnx2x_set_mac_addr(bp);
8572
8573         return 0;
8574 }
8575
8576 /* Called with rtnl_lock */
8577 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8578 {
8579         struct mii_ioctl_data *data = if_mii(ifr);
8580         struct bnx2x *bp = netdev_priv(dev);
8581         int err;
8582
8583         switch (cmd) {
8584         case SIOCGMIIPHY:
8585                 data->phy_id = bp->phy_addr;
8586
8587                 /* fallthru */
8588         case SIOCGMIIREG: {
8589                 u32 mii_regval;
8590
8591                 spin_lock_bh(&bp->phy_lock);
8592                 if (bp->state == BNX2X_STATE_OPEN) {
8593                         err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
8594                                                 &mii_regval);
8595
8596                         data->val_out = mii_regval;
8597                 } else {
8598                         err = -EAGAIN;
8599                 }
8600                 spin_unlock_bh(&bp->phy_lock);
8601                 return err;
8602         }
8603
8604         case SIOCSMIIREG:
8605                 if (!capable(CAP_NET_ADMIN))
8606                         return -EPERM;
8607
8608                 spin_lock_bh(&bp->phy_lock);
8609                 if (bp->state == BNX2X_STATE_OPEN) {
8610                         err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
8611                                                  data->val_in);
8612                 } else {
8613                         err = -EAGAIN;
8614                 }
8615                 spin_unlock_bh(&bp->phy_lock);
8616                 return err;
8617
8618         default:
8619                 /* do nothing */
8620                 break;
8621         }
8622
8623         return -EOPNOTSUPP;
8624 }
8625
8626 /* Called with rtnl_lock */
8627 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8628 {
8629         struct bnx2x *bp = netdev_priv(dev);
8630
8631         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
8632             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
8633                 return -EINVAL;
8634
8635         /* This does not race with packet allocation
8636          * because the actuall alloc size is
8637          * only updated as part of load
8638          */
8639         dev->mtu = new_mtu;
8640
8641         if (netif_running(dev)) {
8642                 bnx2x_nic_unload(bp, 0);
8643                 bnx2x_nic_load(bp, 0);
8644         }
8645         return 0;
8646 }
8647
8648 static void bnx2x_tx_timeout(struct net_device *dev)
8649 {
8650         struct bnx2x *bp = netdev_priv(dev);
8651
8652 #ifdef BNX2X_STOP_ON_ERROR
8653         if (!bp->panic)
8654                 bnx2x_panic();
8655 #endif
8656         /* This allows the netif to be shutdown gracefully before resetting */
8657         schedule_work(&bp->reset_task);
8658 }
8659
8660 #ifdef BCM_VLAN
8661 /* Called with rtnl_lock */
8662 static void bnx2x_vlan_rx_register(struct net_device *dev,
8663                                    struct vlan_group *vlgrp)
8664 {
8665         struct bnx2x *bp = netdev_priv(dev);
8666
8667         bp->vlgrp = vlgrp;
8668         if (netif_running(dev))
8669                 bnx2x_set_rx_mode(dev);
8670 }
8671 #endif
8672
8673 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8674 static void poll_bnx2x(struct net_device *dev)
8675 {
8676         struct bnx2x *bp = netdev_priv(dev);
8677
8678         disable_irq(bp->pdev->irq);
8679         bnx2x_interrupt(bp->pdev->irq, dev);
8680         enable_irq(bp->pdev->irq);
8681 }
8682 #endif
8683
8684 static void bnx2x_reset_task(struct work_struct *work)
8685 {
8686         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8687
8688 #ifdef BNX2X_STOP_ON_ERROR
8689         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8690                   " so reset not done to allow debug dump,\n"
8691          KERN_ERR " you will need to reboot when done\n");
8692         return;
8693 #endif
8694
8695         if (!netif_running(bp->dev))
8696                 return;
8697
8698         bp->in_reset_task = 1;
8699
8700         bnx2x_netif_stop(bp);
8701
8702         bnx2x_nic_unload(bp, 0);
8703         bnx2x_nic_load(bp, 0);
8704
8705         bp->in_reset_task = 0;
8706 }
8707
8708 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8709                                       struct net_device *dev)
8710 {
8711         struct bnx2x *bp;
8712         int rc;
8713
8714         SET_NETDEV_DEV(dev, &pdev->dev);
8715         bp = netdev_priv(dev);
8716
8717         bp->flags = 0;
8718         bp->port = PCI_FUNC(pdev->devfn);
8719
8720         rc = pci_enable_device(pdev);
8721         if (rc) {
8722                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
8723                 goto err_out;
8724         }
8725
8726         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8727                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
8728                        " aborting\n");
8729                 rc = -ENODEV;
8730                 goto err_out_disable;
8731         }
8732
8733         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8734                 printk(KERN_ERR PFX "Cannot find second PCI device"
8735                        " base address, aborting\n");
8736                 rc = -ENODEV;
8737                 goto err_out_disable;
8738         }
8739
8740         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8741         if (rc) {
8742                 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
8743                        " aborting\n");
8744                 goto err_out_disable;
8745         }
8746
8747         pci_set_master(pdev);
8748
8749         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8750         if (bp->pm_cap == 0) {
8751                 printk(KERN_ERR PFX "Cannot find power management"
8752                        " capability, aborting\n");
8753                 rc = -EIO;
8754                 goto err_out_release;
8755         }
8756
8757         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8758         if (bp->pcie_cap == 0) {
8759                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
8760                        " aborting\n");
8761                 rc = -EIO;
8762                 goto err_out_release;
8763         }
8764
8765         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
8766                 bp->flags |= USING_DAC_FLAG;
8767                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
8768                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
8769                                " failed, aborting\n");
8770                         rc = -EIO;
8771                         goto err_out_release;
8772                 }
8773
8774         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
8775                 printk(KERN_ERR PFX "System does not support DMA,"
8776                        " aborting\n");
8777                 rc = -EIO;
8778                 goto err_out_release;
8779         }
8780
8781         bp->dev = dev;
8782         bp->pdev = pdev;
8783
8784         spin_lock_init(&bp->phy_lock);
8785
8786         bp->in_reset_task = 0;
8787
8788         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8789         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
8790
8791         dev->base_addr = pci_resource_start(pdev, 0);
8792
8793         dev->irq = pdev->irq;
8794
8795         bp->regview = ioremap_nocache(dev->base_addr,
8796                                       pci_resource_len(pdev, 0));
8797         if (!bp->regview) {
8798                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
8799                 rc = -ENOMEM;
8800                 goto err_out_release;
8801         }
8802
8803         bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
8804                                         pci_resource_len(pdev, 2));
8805         if (!bp->doorbells) {
8806                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
8807                 rc = -ENOMEM;
8808                 goto err_out_unmap;
8809         }
8810
8811         bnx2x_set_power_state(bp, PCI_D0);
8812
8813         bnx2x_get_hwinfo(bp);
8814
8815         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
8816                 printk(KERN_ERR PFX "FPGA detacted. MCP disabled,"
8817                        " will only init first device\n");
8818                 onefunc = 1;
8819                 nomcp = 1;
8820         }
8821
8822         if (nomcp) {
8823                 printk(KERN_ERR PFX "MCP disabled, will only"
8824                        " init first device\n");
8825                 onefunc = 1;
8826         }
8827
8828         if (onefunc && bp->port) {
8829                 printk(KERN_ERR PFX "Second device disabled, exiting\n");
8830                 rc = -ENODEV;
8831                 goto err_out_unmap;
8832         }
8833
8834         bp->tx_ring_size = MAX_TX_AVAIL;
8835         bp->rx_ring_size = MAX_RX_AVAIL;
8836
8837         bp->rx_csum = 1;
8838
8839         bp->rx_offset = 0;
8840
8841         bp->tx_quick_cons_trip_int = 0xff;
8842         bp->tx_quick_cons_trip = 0xff;
8843         bp->tx_ticks_int = 50;
8844         bp->tx_ticks = 50;
8845
8846         bp->rx_quick_cons_trip_int = 0xff;
8847         bp->rx_quick_cons_trip = 0xff;
8848         bp->rx_ticks_int = 25;
8849         bp->rx_ticks = 25;
8850
8851         bp->stats_ticks = 1000000 & 0xffff00;
8852
8853         bp->timer_interval = HZ;
8854         bp->current_interval = (poll ? poll : HZ);
8855
8856         init_timer(&bp->timer);
8857         bp->timer.expires = jiffies + bp->current_interval;
8858         bp->timer.data = (unsigned long) bp;
8859         bp->timer.function = bnx2x_timer;
8860
8861         return 0;
8862
8863 err_out_unmap:
8864         if (bp->regview) {
8865                 iounmap(bp->regview);
8866                 bp->regview = NULL;
8867         }
8868
8869         if (bp->doorbells) {
8870                 iounmap(bp->doorbells);
8871                 bp->doorbells = NULL;
8872         }
8873
8874 err_out_release:
8875         pci_release_regions(pdev);
8876
8877 err_out_disable:
8878         pci_disable_device(pdev);
8879         pci_set_drvdata(pdev, NULL);
8880
8881 err_out:
8882         return rc;
8883 }
8884
8885 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8886                                     const struct pci_device_id *ent)
8887 {
8888         static int version_printed;
8889         struct net_device *dev = NULL;
8890         struct bnx2x *bp;
8891         int rc, i;
8892         int port = PCI_FUNC(pdev->devfn);
8893
8894         if (version_printed++ == 0)
8895                 printk(KERN_INFO "%s", version);
8896
8897         /* dev zeroed in init_etherdev */
8898         dev = alloc_etherdev(sizeof(*bp));
8899         if (!dev)
8900                 return -ENOMEM;
8901
8902         netif_carrier_off(dev);
8903
8904         bp = netdev_priv(dev);
8905         bp->msglevel = debug;
8906
8907         if (port && onefunc) {
8908                 printk(KERN_ERR PFX "second function disabled. exiting\n");
8909                 return 0;
8910         }
8911
8912         rc = bnx2x_init_board(pdev, dev);
8913         if (rc < 0) {
8914                 free_netdev(dev);
8915                 return rc;
8916         }
8917
8918         dev->hard_start_xmit = bnx2x_start_xmit;
8919         dev->watchdog_timeo = TX_TIMEOUT;
8920
8921         dev->get_stats = bnx2x_get_stats;
8922         dev->ethtool_ops = &bnx2x_ethtool_ops;
8923         dev->open = bnx2x_open;
8924         dev->stop = bnx2x_close;
8925         dev->set_multicast_list = bnx2x_set_rx_mode;
8926         dev->set_mac_address = bnx2x_change_mac_addr;
8927         dev->do_ioctl = bnx2x_ioctl;
8928         dev->change_mtu = bnx2x_change_mtu;
8929         dev->tx_timeout = bnx2x_tx_timeout;
8930 #ifdef BCM_VLAN
8931         dev->vlan_rx_register = bnx2x_vlan_rx_register;
8932 #endif
8933 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8934         dev->poll_controller = poll_bnx2x;
8935 #endif
8936         dev->features |= NETIF_F_SG;
8937         if (bp->flags & USING_DAC_FLAG)
8938                 dev->features |= NETIF_F_HIGHDMA;
8939         dev->features |= NETIF_F_IP_CSUM;
8940 #ifdef BCM_VLAN
8941         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8942 #endif
8943         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8944
8945         rc = register_netdev(dev);
8946         if (rc) {
8947                 printk(KERN_ERR PFX "Cannot register net device\n");
8948                 if (bp->regview)
8949                         iounmap(bp->regview);
8950                 if (bp->doorbells)
8951                         iounmap(bp->doorbells);
8952                 pci_release_regions(pdev);
8953                 pci_disable_device(pdev);
8954                 pci_set_drvdata(pdev, NULL);
8955                 free_netdev(dev);
8956                 return rc;
8957         }
8958
8959         pci_set_drvdata(pdev, dev);
8960
8961         bp->name = board_info[ent->driver_data].name;
8962         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz "
8963                "found at mem %lx, IRQ %d, ",
8964                dev->name, bp->name,
8965                ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8966                ((CHIP_ID(bp) & 0x0ff0) >> 4),
8967                ((bp->flags & PCIX_FLAG) ? "-X" : ""),
8968                ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
8969                bp->bus_speed_mhz,
8970                dev->base_addr,
8971                bp->pdev->irq);
8972
8973         printk("node addr ");
8974         for (i = 0; i < 6; i++)
8975                 printk("%2.2x", dev->dev_addr[i]);
8976         printk("\n");
8977
8978         return 0;
8979 }
8980
8981 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8982 {
8983         struct net_device *dev = pci_get_drvdata(pdev);
8984         struct bnx2x *bp = netdev_priv(dev);
8985
8986         flush_scheduled_work();
8987         /*tasklet_kill(&bp->sp_task);*/
8988         unregister_netdev(dev);
8989
8990         if (bp->regview)
8991                 iounmap(bp->regview);
8992
8993         if (bp->doorbells)
8994                 iounmap(bp->doorbells);
8995
8996         free_netdev(dev);
8997         pci_release_regions(pdev);
8998         pci_disable_device(pdev);
8999         pci_set_drvdata(pdev, NULL);
9000 }
9001
9002 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9003 {
9004         struct net_device *dev = pci_get_drvdata(pdev);
9005         struct bnx2x *bp = netdev_priv(dev);
9006         int rc;
9007
9008         if (!netif_running(dev))
9009                 return 0;
9010
9011         rc = bnx2x_nic_unload(bp, 0);
9012         if (!rc)
9013                 return rc;
9014
9015         netif_device_detach(dev);
9016         pci_save_state(pdev);
9017
9018         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9019         return 0;
9020 }
9021
9022 static int bnx2x_resume(struct pci_dev *pdev)
9023 {
9024         struct net_device *dev = pci_get_drvdata(pdev);
9025         struct bnx2x *bp = netdev_priv(dev);
9026         int rc;
9027
9028         if (!netif_running(dev))
9029                 return 0;
9030
9031         pci_restore_state(pdev);
9032
9033         bnx2x_set_power_state(bp, PCI_D0);
9034         netif_device_attach(dev);
9035
9036         rc = bnx2x_nic_load(bp, 0);
9037         if (rc)
9038                 return rc;
9039
9040         return 0;
9041 }
9042
9043 static struct pci_driver bnx2x_pci_driver = {
9044         .name       = DRV_MODULE_NAME,
9045         .id_table   = bnx2x_pci_tbl,
9046         .probe      = bnx2x_init_one,
9047         .remove     = __devexit_p(bnx2x_remove_one),
9048         .suspend    = bnx2x_suspend,
9049         .resume     = bnx2x_resume,
9050 };
9051
9052 static int __init bnx2x_init(void)
9053 {
9054         return pci_register_driver(&bnx2x_pci_driver);
9055 }
9056
9057 static void __exit bnx2x_cleanup(void)
9058 {
9059         pci_unregister_driver(&bnx2x_pci_driver);
9060 }
9061
9062 module_init(bnx2x_init);
9063 module_exit(bnx2x_cleanup);
9064