1 /* bnx2x.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
17 /* error/debug prints */
19 #define DRV_MODULE_NAME "bnx2x"
20 #define PFX DRV_MODULE_NAME ": "
22 /* for messages that are currently off */
23 #define BNX2X_MSG_OFF 0
24 #define BNX2X_MSG_MCP 0x10000 /* was: NETIF_MSG_HW */
25 #define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */
26 #define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */
27 #define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */
28 #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
29 #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
31 #define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
33 /* regular debug print */
34 #define DP(__mask, __fmt, __args...) do { \
35 if (bp->msglevel & (__mask)) \
36 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __FUNCTION__, \
37 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
40 /* for errors (never masked) */
41 #define BNX2X_ERR(__fmt, __args...) do { \
42 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __FUNCTION__, \
43 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
46 /* for logging (never masked) */
47 #define BNX2X_LOG(__fmt, __args...) do { \
48 printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \
49 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
52 /* before we have a dev->name use dev_info() */
53 #define BNX2X_DEV_INFO(__fmt, __args...) do { \
54 if (bp->msglevel & NETIF_MSG_PROBE) \
55 dev_info(&bp->pdev->dev, __fmt, ##__args); \
59 #ifdef BNX2X_STOP_ON_ERROR
60 #define bnx2x_panic() do { \
62 BNX2X_ERR("driver assert\n"); \
63 bnx2x_disable_int(bp); \
64 bnx2x_panic_dump(bp); \
67 #define bnx2x_panic() do { \
68 BNX2X_ERR("driver assert\n"); \
69 bnx2x_panic_dump(bp); \
74 #define U64_LO(x) (((u64)x) & 0xffffffff)
75 #define U64_HI(x) (((u64)x) >> 32)
76 #define HILO_U64(hi, lo) (((u64)hi << 32) + lo)
79 #define REG_ADDR(bp, offset) (bp->regview + offset)
81 #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
82 #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
83 #define REG_RD64(bp, offset) readq(REG_ADDR(bp, offset))
85 #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
86 #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
87 #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
88 #define REG_WR32(bp, offset, val) REG_WR(bp, offset, val)
90 #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
91 #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
93 #define REG_RD_DMAE(bp, offset, valp, len32) \
95 bnx2x_read_dmae(bp, offset, len32);\
96 memcpy(valp, bnx2x_sp(bp, wb_data[0]), len32 * 4); \
99 #define REG_WR_DMAE(bp, offset, val, len32) \
101 memcpy(bnx2x_sp(bp, wb_data[0]), val, len32 * 4); \
102 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
106 #define SHMEM_RD(bp, type) \
107 REG_RD(bp, bp->shmem_base + offsetof(struct shmem_region, type))
108 #define SHMEM_WR(bp, type, val) \
109 REG_WR(bp, bp->shmem_base + offsetof(struct shmem_region, type), val)
111 #define NIG_WR(reg, val) REG_WR(bp, reg, val)
112 #define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val)
113 #define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
116 #define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
118 #define for_each_nondefault_queue(bp, var) \
119 for (var = 1; var < bp->num_queues; var++)
120 #define is_multi(bp) (bp->num_queues > 1)
129 struct regp tx_gtpkt;
130 struct regp tx_gtxpf;
131 struct regp tx_gtfcs;
132 struct regp tx_gtmca;
133 struct regp tx_gtgca;
134 struct regp tx_gtfrg;
135 struct regp tx_gtovr;
137 struct regp tx_gt127;
138 struct regp tx_gt255; /* 10 */
139 struct regp tx_gt511;
140 struct regp tx_gt1023;
141 struct regp tx_gt1518;
142 struct regp tx_gt2047;
143 struct regp tx_gt4095;
144 struct regp tx_gt9216;
145 struct regp tx_gt16383;
146 struct regp tx_gtmax;
147 struct regp tx_gtufl;
148 struct regp tx_gterr; /* 20 */
149 struct regp tx_gtbyt;
152 struct regp rx_gr127;
153 struct regp rx_gr255;
154 struct regp rx_gr511;
155 struct regp rx_gr1023;
156 struct regp rx_gr1518;
157 struct regp rx_gr2047;
158 struct regp rx_gr4095;
159 struct regp rx_gr9216; /* 30 */
160 struct regp rx_gr16383;
161 struct regp rx_grmax;
162 struct regp rx_grpkt;
163 struct regp rx_grfcs;
164 struct regp rx_grmca;
165 struct regp rx_grbca;
166 struct regp rx_grxcf;
167 struct regp rx_grxpf;
168 struct regp rx_grxuo;
169 struct regp rx_grjbr; /* 40 */
170 struct regp rx_grovr;
171 struct regp rx_grflr;
172 struct regp rx_grmeg;
173 struct regp rx_grmeb;
174 struct regp rx_grbyt;
175 struct regp rx_grund;
176 struct regp rx_grfrg;
177 struct regp rx_grerb;
178 struct regp rx_grfre;
179 struct regp rx_gripj; /* 50 */
183 u32 rx_ifhcinoctets ;
184 u32 rx_ifhcinbadoctets ;
185 u32 rx_etherstatsfragments ;
186 u32 rx_ifhcinucastpkts ;
187 u32 rx_ifhcinmulticastpkts ;
188 u32 rx_ifhcinbroadcastpkts ;
189 u32 rx_dot3statsfcserrors ;
190 u32 rx_dot3statsalignmenterrors ;
191 u32 rx_dot3statscarriersenseerrors ;
192 u32 rx_xonpauseframesreceived ; /* 10 */
193 u32 rx_xoffpauseframesreceived ;
194 u32 rx_maccontrolframesreceived ;
195 u32 rx_xoffstateentered ;
196 u32 rx_dot3statsframestoolong ;
197 u32 rx_etherstatsjabbers ;
198 u32 rx_etherstatsundersizepkts ;
199 u32 rx_etherstatspkts64octets ;
200 u32 rx_etherstatspkts65octetsto127octets ;
201 u32 rx_etherstatspkts128octetsto255octets ;
202 u32 rx_etherstatspkts256octetsto511octets ; /* 20 */
203 u32 rx_etherstatspkts512octetsto1023octets ;
204 u32 rx_etherstatspkts1024octetsto1522octets;
205 u32 rx_etherstatspktsover1522octets ;
207 u32 rx_falsecarriererrors ;
209 u32 tx_ifhcoutoctets ;
210 u32 tx_ifhcoutbadoctets ;
211 u32 tx_etherstatscollisions ;
214 u32 tx_flowcontroldone ; /* 30 */
215 u32 tx_dot3statssinglecollisionframes ;
216 u32 tx_dot3statsmultiplecollisionframes ;
217 u32 tx_dot3statsdeferredtransmissions ;
218 u32 tx_dot3statsexcessivecollisions ;
219 u32 tx_dot3statslatecollisions ;
220 u32 tx_ifhcoutucastpkts ;
221 u32 tx_ifhcoutmulticastpkts ;
222 u32 tx_ifhcoutbroadcastpkts ;
223 u32 tx_etherstatspkts64octets ;
224 u32 tx_etherstatspkts65octetsto127octets ; /* 40 */
225 u32 tx_etherstatspkts128octetsto255octets ;
226 u32 tx_etherstatspkts256octetsto511octets ;
227 u32 tx_etherstatspkts512octetsto1023octets ;
228 u32 tx_etherstatspkts1024octetsto1522octet ;
229 u32 tx_etherstatspktsover1522octets ;
230 u32 tx_dot3statsinternalmactransmiterrors ; /* 46 */
234 struct emac_stats emac;
235 struct bmac_stats bmac;
242 u32 flow_ctrl_discard;
243 u32 flow_ctrl_octets;
244 u32 flow_ctrl_packet;
257 struct bnx2x_eth_stats {
258 u32 pad; /* to make long counters u64 aligned */
260 u32 total_bytes_received_hi;
261 u32 total_bytes_received_lo;
262 u32 total_bytes_transmitted_hi;
263 u32 total_bytes_transmitted_lo;
264 u32 total_unicast_packets_received_hi;
265 u32 total_unicast_packets_received_lo;
266 u32 total_multicast_packets_received_hi;
267 u32 total_multicast_packets_received_lo;
268 u32 total_broadcast_packets_received_hi;
269 u32 total_broadcast_packets_received_lo;
270 u32 total_unicast_packets_transmitted_hi;
271 u32 total_unicast_packets_transmitted_lo;
272 u32 total_multicast_packets_transmitted_hi;
273 u32 total_multicast_packets_transmitted_lo;
274 u32 total_broadcast_packets_transmitted_hi;
275 u32 total_broadcast_packets_transmitted_lo;
276 u32 crc_receive_errors;
277 u32 alignment_errors;
278 u32 false_carrier_detections;
279 u32 runt_packets_received;
280 u32 jabber_packets_received;
281 u32 pause_xon_frames_received;
282 u32 pause_xoff_frames_received;
283 u32 pause_xon_frames_transmitted;
284 u32 pause_xoff_frames_transmitted;
285 u32 single_collision_transmit_frames;
286 u32 multiple_collision_transmit_frames;
287 u32 late_collision_frames;
288 u32 excessive_collision_frames;
289 u32 control_frames_received;
290 u32 frames_received_64_bytes;
291 u32 frames_received_65_127_bytes;
292 u32 frames_received_128_255_bytes;
293 u32 frames_received_256_511_bytes;
294 u32 frames_received_512_1023_bytes;
295 u32 frames_received_1024_1522_bytes;
296 u32 frames_received_1523_9022_bytes;
297 u32 frames_transmitted_64_bytes;
298 u32 frames_transmitted_65_127_bytes;
299 u32 frames_transmitted_128_255_bytes;
300 u32 frames_transmitted_256_511_bytes;
301 u32 frames_transmitted_512_1023_bytes;
302 u32 frames_transmitted_1024_1522_bytes;
303 u32 frames_transmitted_1523_9022_bytes;
304 u32 valid_bytes_received_hi;
305 u32 valid_bytes_received_lo;
306 u32 error_runt_packets_received;
307 u32 error_jabber_packets_received;
311 u32 stat_IfHCInBadOctets_hi;
312 u32 stat_IfHCInBadOctets_lo;
313 u32 stat_IfHCOutBadOctets_hi;
314 u32 stat_IfHCOutBadOctets_lo;
315 u32 stat_Dot3statsFramesTooLong;
316 u32 stat_Dot3statsInternalMacTransmitErrors;
317 u32 stat_Dot3StatsCarrierSenseErrors;
318 u32 stat_Dot3StatsDeferredTransmissions;
319 u32 stat_FlowControlDone;
320 u32 stat_XoffStateEntered;
322 u32 x_total_sent_bytes_hi;
323 u32 x_total_sent_bytes_lo;
324 u32 x_total_sent_pkts;
326 u32 t_rcv_unicast_bytes_hi;
327 u32 t_rcv_unicast_bytes_lo;
328 u32 t_rcv_broadcast_bytes_hi;
329 u32 t_rcv_broadcast_bytes_lo;
330 u32 t_rcv_multicast_bytes_hi;
331 u32 t_rcv_multicast_bytes_lo;
334 u32 checksum_discard;
335 u32 packets_too_big_discard;
339 u32 mac_filter_discard;
340 u32 xxoverflow_discard;
341 u32 brb_truncate_discard;
346 u32 flow_ctrl_discard;
347 u32 flow_ctrl_octets;
348 u32 flow_ctrl_packet;
358 u32 number_of_bugs_found_in_stats_spec; /* just kidding */
361 #define MAC_STX_NA 0xffffffff
364 #define MAX_CONTEXT 16
366 #define MAX_CONTEXT 1
370 struct eth_context eth;
376 /* DMA memory not used in fastpath */
377 struct bnx2x_slowpath {
378 union cdu_context context[MAX_CONTEXT];
379 struct eth_stats_query fw_stats;
380 struct mac_configuration_cmd mac_config;
381 struct mac_configuration_cmd mcast_config;
383 /* used by dmae command executer */
384 struct dmae_command dmae[MAX_DMAE_C];
386 union mac_stats mac_stats;
387 struct nig_stats nig;
388 struct bnx2x_eth_stats eth_stats;
391 #define BNX2X_WB_COMP_VAL 0xe0d0d0ae
395 #define bnx2x_sp(bp, var) (&bp->slowpath->var)
396 #define bnx2x_sp_check(bp, var) ((bp->slowpath) ? (&bp->slowpath->var) : NULL)
397 #define bnx2x_sp_mapping(bp, var) \
398 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
403 DECLARE_PCI_UNMAP_ADDR(mapping)
411 struct bnx2x_fastpath {
413 struct napi_struct napi;
415 struct host_status_block *status_blk;
416 dma_addr_t status_blk_mapping;
418 struct eth_tx_db_data *hw_tx_prods;
419 dma_addr_t tx_prods_mapping;
421 struct sw_tx_bd *tx_buf_ring;
423 struct eth_tx_bd *tx_desc_ring;
424 dma_addr_t tx_desc_mapping;
426 struct sw_rx_bd *rx_buf_ring;
428 struct eth_rx_bd *rx_desc_ring;
429 dma_addr_t rx_desc_mapping;
431 union eth_rx_cqe *rx_comp_ring;
432 dma_addr_t rx_comp_mapping;
435 #define BNX2X_FP_STATE_CLOSED 0
436 #define BNX2X_FP_STATE_IRQ 0x80000
437 #define BNX2X_FP_STATE_OPENING 0x90000
438 #define BNX2X_FP_STATE_OPEN 0xa0000
439 #define BNX2X_FP_STATE_HALTING 0xb0000
440 #define BNX2X_FP_STATE_HALTED 0xc0000
459 unsigned long tx_pkt,
463 struct bnx2x *bp; /* parent */
466 #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
469 /* attn group wiring */
470 #define MAX_DYNAMIC_ATTN_GRPS 8
477 /* Fields used in the tx and intr/napi performance paths
478 * are grouped together in the beginning of the structure
480 struct bnx2x_fastpath *fp;
481 void __iomem *regview;
482 void __iomem *doorbells;
484 struct net_device *dev;
485 struct pci_dev *pdev;
488 struct msix_entry msix_table[MAX_CONTEXT+1];
493 struct vlan_group *vlgrp;
498 u32 rx_buf_use_size; /* useable size */
499 u32 rx_buf_size; /* with alignment */
500 #define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
501 #define ETH_MIN_PACKET_SIZE 60
502 #define ETH_MAX_PACKET_SIZE 1500
503 #define ETH_MAX_JUMBO_PACKET_SIZE 9600
505 struct host_def_status_block *def_status_blk;
513 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
519 dma_addr_t spq_mapping;
521 struct eth_spe *spq_prod_bd;
522 struct eth_spe *spq_last_bd;
524 u16 spq_left; /* serialize spq */
527 /* Flag for marking that there is either
528 * STAT_QUERY or CFC DELETE ramrod pending
532 /* End of fields used in the performance code paths */
539 #define PCI_32BIT_FLAG 2
540 #define ONE_TDMA_FLAG 4 /* no longer used */
541 #define NO_WOL_FLAG 8
542 #define USING_DAC_FLAG 0x10
543 #define USING_MSIX_FLAG 0x20
544 #define ASF_ENABLE_FLAG 0x40
551 struct work_struct sp_task;
552 struct work_struct reset_task;
554 struct timer_list timer;
556 int current_interval;
561 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
562 #define CHIP_ID(bp) (bp->chip_id & 0xfffffff0)
564 #define CHIP_NUM(bp) (bp->chip_id >> 16)
565 #define CHIP_NUM_57710 0x164e
566 #define CHIP_NUM_57711 0x164f
567 #define CHIP_NUM_57711E 0x1650
568 #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
569 #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
570 #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
571 #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
573 #define IS_E1H_OFFSET CHIP_IS_E1H(bp)
575 #define CHIP_REV(bp) (bp->chip_id & 0x0000f000)
576 #define CHIP_REV_Ax 0x00000000
577 /* assume maximum 5 revisions */
578 #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
579 /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
580 #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
581 !(CHIP_REV(bp) & 0x00001000))
582 /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
583 #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
584 (CHIP_REV(bp) & 0x00001000))
586 #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
587 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
589 #define CHIP_METAL(bp) (bp->chip_id & 0x00000ff0)
590 #define CHIP_BOND_ID(bp) (bp->chip_id & 0x0000000f)
593 u16 fw_drv_pulse_wr_seq;
599 struct link_params link_params;
601 struct link_vars link_vars;
606 /* link settings - missing defines */
607 #define SUPPORTED_2500baseT_Full (1 << 15)
611 /* used to synchronize phy accesses */
612 struct mutex phy_mutex;
618 /* link settings - missing defines */
619 #define ADVERTISED_2500baseT_Full (1 << 15)
625 #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
626 #define NVRAM_TIMEOUT_COUNT 30000
627 #define NVRAM_PAGE_SIZE 256
633 u16 tx_quick_cons_trip_int;
634 u16 tx_quick_cons_trip;
638 u16 rx_quick_cons_trip_int;
639 u16 rx_quick_cons_trip;
646 #define BNX2X_STATE_CLOSED 0x0
647 #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
648 #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
649 #define BNX2X_STATE_OPEN 0x3000
650 #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
651 #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
652 #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
653 #define BNX2X_STATE_ERROR 0xF000
658 #define BNX2X_RX_MODE_NONE 0
659 #define BNX2X_RX_MODE_NORMAL 1
660 #define BNX2X_RX_MODE_ALLMULTI 2
661 #define BNX2X_RX_MODE_PROMISC 3
662 #define BNX2X_MAX_MULTICAST 64
663 #define BNX2X_MAX_EMUL_MULTI 16
665 dma_addr_t def_status_blk_mapping;
667 struct bnx2x_slowpath *slowpath;
668 dma_addr_t slowpath_mapping;
672 dma_addr_t t1_mapping;
674 dma_addr_t t2_mapping;
676 dma_addr_t timers_mapping;
678 dma_addr_t qm_mapping;
683 /* used to synchronize stats collecting */
685 #define STATS_STATE_DISABLE 0
686 #define STATS_STATE_ENABLE 1
687 #define STATS_STATE_STOP 2 /* stop stats on next iteration */
689 /* used by dmae command loader */
690 struct dmae_command dmae;
694 /* used to synchronize dmae accesses */
695 struct mutex dmae_mutex;
696 struct dmae_command init_dmae;
701 struct bmac_stats old_bmac;
702 struct tstorm_per_client_stats old_tclient;
703 struct z_stream_s *strm;
705 dma_addr_t gunzip_mapping;
707 #define FW_BUF_SIZE 0x8000
712 /* DMAE command defines */
713 #define DMAE_CMD_SRC_PCI 0
714 #define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
716 #define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
717 #define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
719 #define DMAE_CMD_C_DST_PCI 0
720 #define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
722 #define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
724 #define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
725 #define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
726 #define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
727 #define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
729 #define DMAE_CMD_PORT_0 0
730 #define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
732 #define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
733 #define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
735 #define DMAE_LEN32_MAX 0x400
737 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
738 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
740 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode);
744 #define RX_COPY_THRESH 92
745 #define BCM_PAGE_BITS 12
746 #define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS)
748 #define NUM_TX_RINGS 16
749 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_tx_bd))
750 #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
751 #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
752 #define MAX_TX_BD (NUM_TX_BD - 1)
753 #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
754 #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
755 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
756 #define TX_BD(x) ((x) & MAX_TX_BD)
757 #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
759 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
760 #define NUM_RX_RINGS 8
761 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
762 #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
763 #define RX_DESC_MASK (RX_DESC_CNT - 1)
764 #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
765 #define MAX_RX_BD (NUM_RX_BD - 1)
766 #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
767 #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
768 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
769 #define RX_BD(x) ((x) & MAX_RX_BD)
771 #define NUM_RCQ_RINGS (NUM_RX_RINGS * 2)
772 #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
773 #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
774 #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
775 #define MAX_RCQ_BD (NUM_RCQ_BD - 1)
776 #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
777 #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
778 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
779 #define RCQ_BD(x) ((x) & MAX_RCQ_BD)
782 /* used on a CID received from the HW */
783 #define SW_CID(x) (le32_to_cpu(x) & \
784 (COMMON_RAMROD_ETH_RX_CQE_CID >> 1))
785 #define CQE_CMD(x) (le32_to_cpu(x) >> \
786 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
788 #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
789 le32_to_cpu((bd)->addr_lo))
790 #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
793 #define STROM_ASSERT_ARRAY_SIZE 50
797 /* must be used on a CID before placing it on a HW ring */
798 #define HW_CID(bp, x) (x | (bp->port << 23))
800 #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
801 #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
803 #define ATTN_NIG_FOR_FUNC (1L << 8)
804 #define ATTN_SW_TIMER_4_FUNC (1L << 9)
805 #define GPIO_2_FUNC (1L << 10)
806 #define GPIO_3_FUNC (1L << 11)
807 #define GPIO_4_FUNC (1L << 12)
808 #define ATTN_GENERAL_ATTN_1 (1L << 13)
809 #define ATTN_GENERAL_ATTN_2 (1L << 14)
810 #define ATTN_GENERAL_ATTN_3 (1L << 15)
811 #define ATTN_GENERAL_ATTN_4 (1L << 13)
812 #define ATTN_GENERAL_ATTN_5 (1L << 14)
813 #define ATTN_GENERAL_ATTN_6 (1L << 15)
815 #define ATTN_HARD_WIRED_MASK 0xff00
816 #define ATTENTION_ID 4
820 #define MAX_SPQ_PENDING 8
823 #define BNX2X_NUM_STATS 34
824 #define BNX2X_NUM_TESTS 1
827 #define DPM_TRIGER_TYPE 0x40
828 #define DOORBELL(bp, cid, val) \
830 writel((u32)val, (bp)->doorbells + (BCM_PAGE_SIZE * cid) + \
834 /* DMAE command defines */
835 #define DMAE_CMD_SRC_PCI 0
836 #define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
838 #define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
839 #define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
841 #define DMAE_CMD_C_DST_PCI 0
842 #define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
844 #define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
846 #define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
847 #define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
848 #define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
849 #define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
851 #define DMAE_CMD_PORT_0 0
852 #define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
854 #define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
855 #define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
856 #define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
858 #define DMAE_LEN32_RD_MAX 0x80
859 #define DMAE_LEN32_WR_MAX 0x400
861 #define DMAE_COMP_VAL 0xe0d0d0ae
863 #define MAX_DMAE_C_PER_PORT 8
864 #define INIT_DMAE_C(bp) (BP_PORT(bp)*MAX_DMAE_C_PER_PORT + \
866 #define PMF_DMAE_C(bp) (BP_PORT(bp)*MAX_DMAE_C_PER_PORT + \
870 /* PCIE link and speed */
871 #define PCICFG_LINK_WIDTH 0x1f00000
872 #define PCICFG_LINK_WIDTH_SHIFT 20
873 #define PCICFG_LINK_SPEED 0xf0000
874 #define PCICFG_LINK_SPEED_SHIFT 16
876 #define BMAC_CONTROL_RX_ENABLE 2
878 #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
880 /* stuff added to make the code fit 80Col */
882 #define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
883 #define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
884 #define TPA_TYPE(cqe) (cqe->fast_path_cqe.error_type_flags & \
885 (TPA_TYPE_START | TPA_TYPE_END))
886 #define BNX2X_RX_SUM_OK(cqe) \
887 (!(cqe->fast_path_cqe.status_flags & \
888 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \
889 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)))
891 #define BNX2X_RX_SUM_FIX(cqe) \
892 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
893 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
894 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
897 #define BNX2X_MC_ASSERT_BITS \
898 (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
899 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
900 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
901 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
903 #define BNX2X_MCP_ASSERT \
904 GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
906 #define BNX2X_DOORQ_ASSERT \
907 AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
909 #define HW_INTERRUT_ASSERT_SET_0 \
910 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
911 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
912 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
913 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
914 #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
915 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
916 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
917 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
918 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
919 #define HW_INTERRUT_ASSERT_SET_1 \
920 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
921 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
922 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
923 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
924 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
925 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
926 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
927 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
928 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
929 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
930 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
931 #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
932 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
933 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
934 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
935 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
936 AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
937 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
938 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
939 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
940 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
941 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
942 #define HW_INTERRUT_ASSERT_SET_2 \
943 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
944 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
945 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
946 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
947 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
948 #define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
949 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
950 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
951 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
952 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
953 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
954 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
957 #define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
958 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
959 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
962 #define MULTI_FLAGS \
963 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
964 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
965 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
966 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
967 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_ENABLE)
969 #define MULTI_MASK 0x7f
972 #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
973 #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
974 #define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
976 #define BNX2X_RX_SB_INDEX \
977 &fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]
979 #define BNX2X_TX_SB_INDEX \
980 &fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]
982 #define BNX2X_SP_DSB_INDEX \
983 &bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]
986 #define CAM_IS_INVALID(x) \
987 (x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
989 #define CAM_INVALIDATE(x) \
990 x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE
993 /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */