1 /* bnx2x_init.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
20 #define INIT_EMULATION 0x1
23 #define INIT_HARDWARE 0x7
25 #define TSTORM_INTMEM_ADDR TSEM_REG_FAST_MEMORY
26 #define CSTORM_INTMEM_ADDR CSEM_REG_FAST_MEMORY
27 #define XSTORM_INTMEM_ADDR XSEM_REG_FAST_MEMORY
28 #define USTORM_INTMEM_ADDR USEM_REG_FAST_MEMORY
29 /* RAM0 size in bytes */
30 #define STORM_INTMEM_SIZE_E1 0x5800
31 #define STORM_INTMEM_SIZE_E1H 0x10000
32 #define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1H(bp) ? STORM_INTMEM_SIZE_E1H : \
33 STORM_INTMEM_SIZE_E1) / 4)
36 /* Init operation types and structures */
37 /* Common for both E1 and E1H */
38 #define OP_RD 0x1 /* read single register */
39 #define OP_WR 0x2 /* write single register */
40 #define OP_IW 0x3 /* write single register using mailbox */
41 #define OP_SW 0x4 /* copy a string to the device */
42 #define OP_SI 0x5 /* copy a string using mailbox */
43 #define OP_ZR 0x6 /* clear memory */
44 #define OP_ZP 0x7 /* unzip then copy with DMAE */
45 #define OP_WR_64 0x8 /* write 64 bit pattern */
46 #define OP_WB 0x9 /* copy a string using DMAE */
48 /* Operation specific for E1 */
49 #define OP_RD_E1 0xa /* read single register */
50 #define OP_WR_E1 0xb /* write single register */
51 #define OP_IW_E1 0xc /* write single register using mailbox */
52 #define OP_SW_E1 0xd /* copy a string to the device */
53 #define OP_SI_E1 0xe /* copy a string using mailbox */
54 #define OP_ZR_E1 0xf /* clear memory */
55 #define OP_ZP_E1 0x10 /* unzip then copy with DMAE */
56 #define OP_WR_64_E1 0x11 /* write 64 bit pattern on E1 */
57 #define OP_WB_E1 0x12 /* copy a string using DMAE */
59 /* Operation specific for E1H */
60 #define OP_RD_E1H 0x13 /* read single register */
61 #define OP_WR_E1H 0x14 /* write single register */
62 #define OP_IW_E1H 0x15 /* write single register using mailbox */
63 #define OP_SW_E1H 0x16 /* copy a string to the device */
64 #define OP_SI_E1H 0x17 /* copy a string using mailbox */
65 #define OP_ZR_E1H 0x18 /* clear memory */
66 #define OP_ZP_E1H 0x19 /* unzip then copy with DMAE */
67 #define OP_WR_64_E1H 0x1a /* write 64 bit pattern on E1H */
68 #define OP_WB_E1H 0x1b /* copy a string using DMAE */
70 /* FPGA and EMUL specific operations */
71 #define OP_WR_EMUL_E1H 0x1c /* write single register on E1H Emul */
72 #define OP_WR_EMUL 0x1d /* write single register on Emulation */
73 #define OP_WR_FPGA 0x1e /* write single register on FPGA */
74 #define OP_WR_ASIC 0x1f /* write single register on ASIC */
95 struct op_string_write {
98 #ifdef __LITTLE_ENDIAN
101 #else /* __BIG_ENDIAN */
115 struct op_write write;
116 struct op_string_write str_wr;
121 #include "bnx2x_init_values.h"
123 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
124 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len);
126 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
131 for (i = 0; i < len; i++) {
132 REG_WR(bp, addr + i*4, data[i]);
134 touch_softlockup_watchdog();
140 static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
145 for (i = 0; i < len; i++) {
146 REG_WR_IND(bp, addr + i*4, data[i]);
148 touch_softlockup_watchdog();
154 static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
158 if (bp->dmae_ready) {
159 while (len > DMAE_LEN32_WR_MAX) {
160 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
161 addr + offset, DMAE_LEN32_WR_MAX);
162 offset += DMAE_LEN32_WR_MAX * 4;
163 len -= DMAE_LEN32_WR_MAX;
165 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
168 bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len);
171 static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
173 u32 buf_len = (((len * 4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len * 4));
174 u32 buf_len32 = buf_len / 4;
177 memset(bp->gunzip_buf, fill, buf_len);
179 for (i = 0; i < len; i += buf_len32) {
180 u32 cur_len = min(buf_len32, len - i);
182 bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
186 static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
189 u32 buf_len32 = FW_BUF_SIZE / 4;
194 /* 64 bit value is in a blob: first low DWORD, then high DWORD */
195 data64 = HILO_U64((*(data + 1)), (*data));
196 len64 = min((u32)(FW_BUF_SIZE/8), len64);
197 for (i = 0; i < len64; i++) {
198 u64 *pdata = ((u64 *)(bp->gunzip_buf)) + i;
203 for (i = 0; i < len; i += buf_len32) {
204 u32 cur_len = min(buf_len32, len - i);
206 bnx2x_write_big_buf(bp, addr + i * 4, cur_len);
210 /*********************************************************
211 There are different blobs for each PRAM section.
212 In addition, each blob write operation is divided into a few operations
213 in order to decrease the amount of phys. contiguous buffer needed.
214 Thus, when we select a blob the address may be with some offset
215 from the beginning of PRAM section.
216 The same holds for the INT_TABLE sections.
217 **********************************************************/
218 #define IF_IS_INT_TABLE_ADDR(base, addr) \
219 if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
221 #define IF_IS_PRAM_ADDR(base, addr) \
222 if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
224 static const u32 *bnx2x_sel_blob(u32 addr, const u32 *data, int is_e1)
226 IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
227 data = is_e1 ? tsem_int_table_data_e1 :
228 tsem_int_table_data_e1h;
230 IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
231 data = is_e1 ? csem_int_table_data_e1 :
232 csem_int_table_data_e1h;
234 IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
235 data = is_e1 ? usem_int_table_data_e1 :
236 usem_int_table_data_e1h;
238 IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
239 data = is_e1 ? xsem_int_table_data_e1 :
240 xsem_int_table_data_e1h;
242 IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
243 data = is_e1 ? tsem_pram_data_e1 : tsem_pram_data_e1h;
245 IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
246 data = is_e1 ? csem_pram_data_e1 : csem_pram_data_e1h;
248 IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
249 data = is_e1 ? usem_pram_data_e1 : usem_pram_data_e1h;
251 IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
252 data = is_e1 ? xsem_pram_data_e1 : xsem_pram_data_e1h;
257 static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
258 u32 len, int gunzip, int is_e1, u32 blob_off)
262 data = bnx2x_sel_blob(addr, data, is_e1) + blob_off;
270 temp = kmalloc(len, GFP_KERNEL);
271 size = (len / 4) + ((len % 4) ? 1 : 0);
272 for (i = 0; i < size; i++)
273 temp[i] = swab32(data[i]);
276 rc = bnx2x_gunzip(bp, (u8 *)data, len);
278 BNX2X_ERR("gunzip failed ! rc %d\n", rc);
284 len = bp->gunzip_outlen;
287 for (i = 0; i < len; i++)
288 ((u32 *)bp->gunzip_buf)[i] =
289 swab32(((u32 *)bp->gunzip_buf)[i]);
292 if ((len * 4) > FW_BUF_SIZE) {
293 BNX2X_ERR("LARGE DMAE OPERATION ! "
294 "addr 0x%x len 0x%x\n", addr, len*4);
297 memcpy(bp->gunzip_buf, data, len * 4);
300 if (bp->dmae_ready) {
301 while (len > DMAE_LEN32_WR_MAX) {
302 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
303 addr + offset, DMAE_LEN32_WR_MAX);
304 offset += DMAE_LEN32_WR_MAX * 4;
305 len -= DMAE_LEN32_WR_MAX;
307 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
310 bnx2x_init_ind_wr(bp, addr, bp->gunzip_buf, len);
313 static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
315 int is_e1 = CHIP_IS_E1(bp);
316 int is_e1h = CHIP_IS_E1H(bp);
317 int is_emul_e1h = (CHIP_REV_IS_EMUL(bp) && is_e1h);
320 u32 op_type, addr, len;
321 const u32 *data, *data_base;
323 if (CHIP_REV_IS_FPGA(bp))
325 else if (CHIP_REV_IS_EMUL(bp))
331 data_base = init_data_e1;
332 else /* CHIP_IS_E1H(bp) */
333 data_base = init_data_e1h;
335 for (i = op_start; i < op_end; i++) {
337 op = (union init_op *)&(init_ops[i]);
339 op_type = op->str_wr.op;
340 addr = op->str_wr.offset;
341 len = op->str_wr.data_len;
342 data = data_base + op->str_wr.data_off;
344 /* careful! it must be in order */
345 if (unlikely(op_type > OP_WB)) {
348 if (op_type <= OP_WB_E1) {
350 op_type -= (OP_RD_E1 - OP_RD);
353 } else if (op_type <= OP_WB_E1H) {
355 op_type -= (OP_RD_E1H - OP_RD);
358 /* HW/EMUL specific */
359 if (op_type == hw_wr)
362 /* EMUL on E1H is special */
363 if ((op_type == OP_WR_EMUL_E1H) && is_emul_e1h)
372 REG_WR(bp, addr, op->write.val);
375 bnx2x_init_str_wr(bp, addr, data, len);
378 bnx2x_init_wr_wb(bp, addr, data, len, 0, is_e1, 0);
381 bnx2x_init_ind_wr(bp, addr, data, len);
384 bnx2x_init_fill(bp, addr, 0, op->zero.len);
387 bnx2x_init_wr_wb(bp, addr, data, len, 1, is_e1,
388 op->str_wr.data_off);
391 bnx2x_init_wr_64(bp, addr, data, len);
394 /* happens whenever an op is of a diff HW */
396 DP(NETIF_MSG_HW, "skipping init operation "
397 "index %d[%d:%d]: type %d addr 0x%x "
399 i, op_start, op_end, op_type, addr, len, len);
407 /****************************************************************************
409 ****************************************************************************/
411 * This code configures the PCI read/write arbiter
412 * which implements a weighted round robin
413 * between the virtual queues in the chip.
415 * The values were derived for each PCI max payload and max request size.
416 * since max payload and max request size are only known at run time,
417 * this is done as a separate init stage.
425 /* configuration for one arbiter queue */
432 /* derived configuration for each read queue for each max request size */
433 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
434 /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
435 { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
436 { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
437 { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
438 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
439 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
440 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
441 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
442 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
443 /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
444 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
445 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
446 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
447 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
448 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
449 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
450 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
451 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
452 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
453 /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
454 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
455 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
456 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
457 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
458 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
459 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
460 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
461 { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
462 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
465 /* derived configuration for each write queue for each max request size */
466 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
467 /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
468 { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
469 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
470 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
471 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
472 { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
473 { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
474 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
475 { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
476 /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
477 { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
478 { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
479 { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
482 /* register addresses for read queues */
483 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
484 /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
485 PXP2_REG_RQ_BW_RD_UBOUND0},
486 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
487 PXP2_REG_PSWRQ_BW_UB1},
488 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
489 PXP2_REG_PSWRQ_BW_UB2},
490 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
491 PXP2_REG_PSWRQ_BW_UB3},
492 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
493 PXP2_REG_RQ_BW_RD_UBOUND4},
494 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
495 PXP2_REG_RQ_BW_RD_UBOUND5},
496 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
497 PXP2_REG_PSWRQ_BW_UB6},
498 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
499 PXP2_REG_PSWRQ_BW_UB7},
500 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
501 PXP2_REG_PSWRQ_BW_UB8},
502 /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
503 PXP2_REG_PSWRQ_BW_UB9},
504 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
505 PXP2_REG_PSWRQ_BW_UB10},
506 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
507 PXP2_REG_PSWRQ_BW_UB11},
508 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
509 PXP2_REG_RQ_BW_RD_UBOUND12},
510 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
511 PXP2_REG_RQ_BW_RD_UBOUND13},
512 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
513 PXP2_REG_RQ_BW_RD_UBOUND14},
514 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
515 PXP2_REG_RQ_BW_RD_UBOUND15},
516 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
517 PXP2_REG_RQ_BW_RD_UBOUND16},
518 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
519 PXP2_REG_RQ_BW_RD_UBOUND17},
520 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
521 PXP2_REG_RQ_BW_RD_UBOUND18},
522 /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
523 PXP2_REG_RQ_BW_RD_UBOUND19},
524 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
525 PXP2_REG_RQ_BW_RD_UBOUND20},
526 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
527 PXP2_REG_RQ_BW_RD_UBOUND22},
528 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
529 PXP2_REG_RQ_BW_RD_UBOUND23},
530 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
531 PXP2_REG_RQ_BW_RD_UBOUND24},
532 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
533 PXP2_REG_RQ_BW_RD_UBOUND25},
534 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
535 PXP2_REG_RQ_BW_RD_UBOUND26},
536 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
537 PXP2_REG_RQ_BW_RD_UBOUND27},
538 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
539 PXP2_REG_PSWRQ_BW_UB28}
542 /* register addresses for write queues */
543 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
544 /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
545 PXP2_REG_PSWRQ_BW_UB1},
546 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
547 PXP2_REG_PSWRQ_BW_UB2},
548 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
549 PXP2_REG_PSWRQ_BW_UB3},
550 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
551 PXP2_REG_PSWRQ_BW_UB6},
552 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
553 PXP2_REG_PSWRQ_BW_UB7},
554 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
555 PXP2_REG_PSWRQ_BW_UB8},
556 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
557 PXP2_REG_PSWRQ_BW_UB9},
558 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
559 PXP2_REG_PSWRQ_BW_UB10},
560 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
561 PXP2_REG_PSWRQ_BW_UB11},
562 /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
563 PXP2_REG_PSWRQ_BW_UB28},
564 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
565 PXP2_REG_RQ_BW_WR_UBOUND29},
566 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
567 PXP2_REG_RQ_BW_WR_UBOUND30}
570 static void bnx2x_init_pxp(struct bnx2x *bp)
573 int r_order, w_order;
576 pci_read_config_word(bp->pdev,
577 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
578 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
579 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
581 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
583 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
587 if (r_order > MAX_RD_ORD) {
588 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
589 r_order, MAX_RD_ORD);
590 r_order = MAX_RD_ORD;
592 if (w_order > MAX_WR_ORD) {
593 DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
594 w_order, MAX_WR_ORD);
595 w_order = MAX_WR_ORD;
597 if (CHIP_REV_IS_FPGA(bp)) {
598 DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
601 DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
603 for (i = 0; i < NUM_RD_Q-1; i++) {
604 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
605 REG_WR(bp, read_arb_addr[i].add,
606 read_arb_data[i][r_order].add);
607 REG_WR(bp, read_arb_addr[i].ubound,
608 read_arb_data[i][r_order].ubound);
611 for (i = 0; i < NUM_WR_Q-1; i++) {
612 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
613 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
615 REG_WR(bp, write_arb_addr[i].l,
616 write_arb_data[i][w_order].l);
618 REG_WR(bp, write_arb_addr[i].add,
619 write_arb_data[i][w_order].add);
621 REG_WR(bp, write_arb_addr[i].ubound,
622 write_arb_data[i][w_order].ubound);
625 val = REG_RD(bp, write_arb_addr[i].l);
626 REG_WR(bp, write_arb_addr[i].l,
627 val | (write_arb_data[i][w_order].l << 10));
629 val = REG_RD(bp, write_arb_addr[i].add);
630 REG_WR(bp, write_arb_addr[i].add,
631 val | (write_arb_data[i][w_order].add << 10));
633 val = REG_RD(bp, write_arb_addr[i].ubound);
634 REG_WR(bp, write_arb_addr[i].ubound,
635 val | (write_arb_data[i][w_order].ubound << 7));
639 val = write_arb_data[NUM_WR_Q-1][w_order].add;
640 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
641 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
642 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
644 val = read_arb_data[NUM_RD_Q-1][r_order].add;
645 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
646 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
647 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
649 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
650 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
651 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
652 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
654 if (r_order == MAX_RD_ORD)
655 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
657 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
659 if (CHIP_IS_E1H(bp)) {
660 val = ((w_order == 0) ? 2 : 3);
661 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
662 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
663 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
664 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
665 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
666 REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
667 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
668 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
669 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
670 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
671 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
676 /****************************************************************************
678 ****************************************************************************/
680 #define CDU_REGION_NUMBER_XCM_AG 2
681 #define CDU_REGION_NUMBER_UCM_AG 4
684 * String-to-compress [31:8] = CID (all 24 bits)
685 * String-to-compress [7:4] = Region
686 * String-to-compress [3:0] = Type
688 #define CDU_VALID_DATA(_cid, _region, _type) \
689 (((_cid) << 8) | (((_region) & 0xf) << 4) | (((_type) & 0xf)))
690 #define CDU_CRC8(_cid, _region, _type) \
691 calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)
692 #define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
693 (0x80 | (CDU_CRC8(_cid, _region, _type) & 0x7f))
694 #define CDU_RSRVD_VALUE_TYPE_B(_crc, _type) \
695 (0x80 | ((_type) & 0xf << 3) | (CDU_CRC8(_cid, _region, _type) & 0x7))
696 #define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
698 /*****************************************************************************
700 * Calculates crc 8 on a word value: polynomial 0-1-2-8
701 * Code was translated from Verilog.
702 ****************************************************************************/
703 static u8 calc_crc8(u32 data, u8 crc)
711 /* split the data into 31 bits */
712 for (i = 0; i < 32; i++) {
717 /* split the crc into 8 bits */
718 for (i = 0; i < 8; i++) {
723 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
724 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
726 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
727 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
728 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
729 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
730 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
731 C[0] ^ C[1] ^ C[4] ^ C[5];
732 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
733 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
734 C[1] ^ C[2] ^ C[5] ^ C[6];
735 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
736 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
737 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
738 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
739 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
741 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
742 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
744 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
745 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
749 for (i = 0; i < 8; i++)
750 crc_res |= (NewCRC[i] << i);
755 /* registers addresses are not in order
756 so these arrays help simplify the code */
757 static const int cm_start[E1H_FUNC_MAX][9] = {
758 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
759 XCM_FUNC0_START, TSEM_FUNC0_START, USEM_FUNC0_START, CSEM_FUNC0_START,
761 {MISC_FUNC1_START, TCM_FUNC1_START, UCM_FUNC1_START, CCM_FUNC1_START,
762 XCM_FUNC1_START, TSEM_FUNC1_START, USEM_FUNC1_START, CSEM_FUNC1_START,
764 {MISC_FUNC2_START, TCM_FUNC2_START, UCM_FUNC2_START, CCM_FUNC2_START,
765 XCM_FUNC2_START, TSEM_FUNC2_START, USEM_FUNC2_START, CSEM_FUNC2_START,
767 {MISC_FUNC3_START, TCM_FUNC3_START, UCM_FUNC3_START, CCM_FUNC3_START,
768 XCM_FUNC3_START, TSEM_FUNC3_START, USEM_FUNC3_START, CSEM_FUNC3_START,
770 {MISC_FUNC4_START, TCM_FUNC4_START, UCM_FUNC4_START, CCM_FUNC4_START,
771 XCM_FUNC4_START, TSEM_FUNC4_START, USEM_FUNC4_START, CSEM_FUNC4_START,
773 {MISC_FUNC5_START, TCM_FUNC5_START, UCM_FUNC5_START, CCM_FUNC5_START,
774 XCM_FUNC5_START, TSEM_FUNC5_START, USEM_FUNC5_START, CSEM_FUNC5_START,
776 {MISC_FUNC6_START, TCM_FUNC6_START, UCM_FUNC6_START, CCM_FUNC6_START,
777 XCM_FUNC6_START, TSEM_FUNC6_START, USEM_FUNC6_START, CSEM_FUNC6_START,
779 {MISC_FUNC7_START, TCM_FUNC7_START, UCM_FUNC7_START, CCM_FUNC7_START,
780 XCM_FUNC7_START, TSEM_FUNC7_START, USEM_FUNC7_START, CSEM_FUNC7_START,
784 static const int cm_end[E1H_FUNC_MAX][9] = {
785 {MISC_FUNC0_END, TCM_FUNC0_END, UCM_FUNC0_END, CCM_FUNC0_END,
786 XCM_FUNC0_END, TSEM_FUNC0_END, USEM_FUNC0_END, CSEM_FUNC0_END,
788 {MISC_FUNC1_END, TCM_FUNC1_END, UCM_FUNC1_END, CCM_FUNC1_END,
789 XCM_FUNC1_END, TSEM_FUNC1_END, USEM_FUNC1_END, CSEM_FUNC1_END,
791 {MISC_FUNC2_END, TCM_FUNC2_END, UCM_FUNC2_END, CCM_FUNC2_END,
792 XCM_FUNC2_END, TSEM_FUNC2_END, USEM_FUNC2_END, CSEM_FUNC2_END,
794 {MISC_FUNC3_END, TCM_FUNC3_END, UCM_FUNC3_END, CCM_FUNC3_END,
795 XCM_FUNC3_END, TSEM_FUNC3_END, USEM_FUNC3_END, CSEM_FUNC3_END,
797 {MISC_FUNC4_END, TCM_FUNC4_END, UCM_FUNC4_END, CCM_FUNC4_END,
798 XCM_FUNC4_END, TSEM_FUNC4_END, USEM_FUNC4_END, CSEM_FUNC4_END,
800 {MISC_FUNC5_END, TCM_FUNC5_END, UCM_FUNC5_END, CCM_FUNC5_END,
801 XCM_FUNC5_END, TSEM_FUNC5_END, USEM_FUNC5_END, CSEM_FUNC5_END,
803 {MISC_FUNC6_END, TCM_FUNC6_END, UCM_FUNC6_END, CCM_FUNC6_END,
804 XCM_FUNC6_END, TSEM_FUNC6_END, USEM_FUNC6_END, CSEM_FUNC6_END,
806 {MISC_FUNC7_END, TCM_FUNC7_END, UCM_FUNC7_END, CCM_FUNC7_END,
807 XCM_FUNC7_END, TSEM_FUNC7_END, USEM_FUNC7_END, CSEM_FUNC7_END,
811 static const int hc_limits[E1H_FUNC_MAX][2] = {
812 {HC_FUNC0_START, HC_FUNC0_END},
813 {HC_FUNC1_START, HC_FUNC1_END},
814 {HC_FUNC2_START, HC_FUNC2_END},
815 {HC_FUNC3_START, HC_FUNC3_END},
816 {HC_FUNC4_START, HC_FUNC4_END},
817 {HC_FUNC5_START, HC_FUNC5_END},
818 {HC_FUNC6_START, HC_FUNC6_END},
819 {HC_FUNC7_START, HC_FUNC7_END}
822 #endif /* BNX2X_INIT_H */