2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/pci.h>
37 #include <linux/timer.h>
38 #include "firmware_exports.h"
41 #define T3_MAX_INLINE 64
42 #define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
43 #define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
44 #define T3_STAG0_PAGE_SHIFT 15
46 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
47 #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
49 #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
50 #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
51 #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
52 #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
54 static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
56 writel(((1<<31) | qpid), doorbell);
59 #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
62 T3_COMPLETION_FLAG = 0x01,
63 T3_NOTIFY_FLAG = 0x02,
64 T3_SOLICITED_EVENT_FLAG = 0x04,
65 T3_READ_FENCE_FLAG = 0x08,
66 T3_LOCAL_FENCE_FLAG = 0x10
67 } __attribute__ ((packed));
70 T3_WR_BP = FW_WROPCODE_RI_BYPASS,
71 T3_WR_SEND = FW_WROPCODE_RI_SEND,
72 T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
73 T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
74 T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
75 T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
76 T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
77 T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
78 T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
79 T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
80 } __attribute__ ((packed));
83 T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
91 T3_RDMA_INIT, /* CHELSIO RI specific ... */
97 T3_RDMA_READ_REQ_WITH_INV,
98 } __attribute__ ((packed));
100 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
103 case T3_WR_BP: return T3_BYPASS;
104 case T3_WR_SEND: return T3_SEND;
105 case T3_WR_WRITE: return T3_RDMA_WRITE;
106 case T3_WR_READ: return T3_READ_REQ;
107 case T3_WR_INV_STAG: return T3_LOCAL_INV;
108 case T3_WR_BIND: return T3_BIND_MW;
109 case T3_WR_INIT: return T3_RDMA_INIT;
110 case T3_WR_QP_MOD: return T3_QP_MOD;
111 case T3_WR_FASTREG: return T3_FAST_REGISTER;
118 /* Work request id */
127 #define WRID(wrid) (wrid.id1)
128 #define WRID_GEN(wrid) (wrid.id0.wr_gen)
129 #define WRID_IDX(wrid) (wrid.id0.wr_idx)
130 #define WRID_LO(wrid) (wrid.id0.wr_lo)
133 __be32 op_seop_flags;
137 #define S_FW_RIWR_OP 24
138 #define M_FW_RIWR_OP 0xff
139 #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
140 #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
142 #define S_FW_RIWR_SOPEOP 22
143 #define M_FW_RIWR_SOPEOP 0x3
144 #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
146 #define S_FW_RIWR_FLAGS 8
147 #define M_FW_RIWR_FLAGS 0x3fffff
148 #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
149 #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
151 #define S_FW_RIWR_TID 8
152 #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
154 #define S_FW_RIWR_LEN 0
155 #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
157 #define S_FW_RIWR_GEN 31
158 #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
166 /* If num_sgle is zero, flit 5+ contains immediate data.*/
168 struct fw_riwrh wrh; /* 0 */
169 union t3_wrid wrid; /* 1 */
176 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
179 #define T3_MAX_FASTREG_DEPTH 24
180 #define T3_MAX_FASTREG_FRAG 10
182 struct t3_fastreg_wr {
183 struct fw_riwrh wrh; /* 0 */
184 union t3_wrid wrid; /* 1 */
187 __be32 va_base_hi; /* 3 */
188 __be32 va_base_lo_fbo;
189 __be32 page_type_perms; /* 4 */
191 __be64 pbl_addrs[0]; /* 5+ */
195 * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
198 struct fw_riwrh wrh; /* 0 */
199 __be64 pbl_addrs[14]; /* 1..14 */
202 #define S_FR_PAGE_COUNT 24
203 #define M_FR_PAGE_COUNT 0xff
204 #define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
205 #define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
207 #define S_FR_PAGE_SIZE 16
208 #define M_FR_PAGE_SIZE 0x1f
209 #define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
210 #define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
213 #define M_FR_TYPE 0x1
214 #define V_FR_TYPE(x) ((x) << S_FR_TYPE)
215 #define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
218 #define M_FR_PERMS 0xff
219 #define V_FR_PERMS(x) ((x) << S_FR_PERMS)
220 #define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
222 struct t3_local_inv_wr {
223 struct fw_riwrh wrh; /* 0 */
224 union t3_wrid wrid; /* 1 */
229 struct t3_rdma_write_wr {
230 struct fw_riwrh wrh; /* 0 */
231 union t3_wrid wrid; /* 1 */
235 __be64 to_sink; /* 3 */
238 struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
241 struct t3_rdma_read_wr {
242 struct fw_riwrh wrh; /* 0 */
243 union t3_wrid wrid; /* 1 */
248 __be64 rem_to; /* 3 */
249 __be32 local_stag; /* 4 */
251 __be64 local_to; /* 5 */
254 struct t3_bind_mw_wr {
255 struct fw_riwrh wrh; /* 0 */
256 union t3_wrid wrid; /* 1 */
257 u16 reserved; /* 2 */
261 __be32 mw_stag; /* 3 */
263 __be64 mw_va; /* 4 */
264 __be32 mr_pbl_addr; /* 5 */
269 struct t3_receive_wr {
270 struct fw_riwrh wrh; /* 0 */
271 union t3_wrid wrid; /* 1 */
272 u8 pagesz[T3_MAX_SGE];
273 __be32 num_sgle; /* 2 */
274 struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
275 __be32 pbl_addr[T3_MAX_SGE];
278 struct t3_bypass_wr {
280 union t3_wrid wrid; /* 1 */
283 struct t3_modify_qp_wr {
284 struct fw_riwrh wrh; /* 0 */
285 union t3_wrid wrid; /* 1 */
286 __be32 flags; /* 2 */
287 __be32 quiesce; /* 2 */
288 __be32 max_ird; /* 3 */
289 __be32 max_ord; /* 3 */
290 __be64 sge_cmd; /* 4 */
295 enum t3_modify_qp_flags {
296 MODQP_QUIESCE = 0x01,
297 MODQP_MAX_IRD = 0x02,
298 MODQP_MAX_ORD = 0x04,
299 MODQP_WRITE_EC = 0x08,
300 MODQP_READ_EC = 0x10,
305 uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
306 uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
307 uP_RI_MPA_CRC_ENABLE = 0x4,
308 uP_RI_MPA_IETF_ENABLE = 0x8
309 } __attribute__ ((packed));
312 uP_RI_QP_RDMA_READ_ENABLE = 0x01,
313 uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
314 uP_RI_QP_BIND_ENABLE = 0x04,
315 uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
316 uP_RI_QP_STAG0_ENABLE = 0x10
317 } __attribute__ ((packed));
319 enum rdma_init_rtr_types {
326 #define M_RTR_TYPE 0x3
327 #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328 #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
330 struct t3_rdma_init_attr {
338 enum t3_mpa_attrs mpaattrs;
339 enum t3_qp_caps qpcaps;
345 enum rdma_init_rtr_types rtr_type;
351 struct t3_rdma_init_wr {
352 struct fw_riwrh wrh; /* 0 */
353 union t3_wrid wrid; /* 1 */
356 __be32 scqid; /* 3 */
358 __be32 rq_addr; /* 4 */
363 __be16 flags_rtr_type;
367 __be64 qp_dma_addr; /* 7 */
368 __be32 qp_dma_size; /* 8 */
377 struct t3_wq_in_err {
382 enum rdma_init_wr_flags {
383 MPA_INITIATOR = (1<<0),
388 struct t3_send_wr send;
389 struct t3_rdma_write_wr write;
390 struct t3_rdma_read_wr read;
391 struct t3_receive_wr recv;
392 struct t3_fastreg_wr fastreg;
393 struct t3_pbl_frag pbl_frag;
394 struct t3_local_inv_wr local_inv;
395 struct t3_bind_mw_wr bind;
396 struct t3_bypass_wr bypass;
397 struct t3_rdma_init_wr init;
398 struct t3_modify_qp_wr qp_mod;
399 struct t3_genbit genbit;
400 struct t3_wq_in_err wq_in_err;
404 #define T3_SQ_CQE_FLIT 13
405 #define T3_SQ_COOKIE_FLIT 14
407 #define T3_RQ_COOKIE_FLIT 13
408 #define T3_RQ_CQE_FLIT 14
410 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
412 return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
415 enum t3_wr_hdr_bits {
418 T3_SOPEOP = T3_EOP|T3_SOP,
421 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
422 enum t3_wr_flags flags, u8 genbit, u32 tid,
425 wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
426 V_FW_RIWR_SOPEOP(sopeop) |
427 V_FW_RIWR_FLAGS(flags));
429 wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
433 ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
437 * T3 ULP2_TX commands
444 /* T3 MC7 RDMA TPT entry format */
447 TPT_NON_SHARED_MR = 0x0,
450 TPT_MW_RELAXED_PROTECTION = 0x3
460 TPT_LOCAL_READ = 0x8,
461 TPT_LOCAL_WRITE = 0x4,
462 TPT_REMOTE_READ = 0x2,
463 TPT_REMOTE_WRITE = 0x1
467 __be32 valid_stag_pdid;
468 __be32 flags_pagesize_qpid;
470 __be32 rsvd_pbl_addr;
473 __be32 va_low_or_fbo;
475 __be32 rsvd_bind_cnt_or_pstag;
476 __be32 rsvd_pbl_size;
479 #define S_TPT_VALID 31
480 #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
481 #define F_TPT_VALID V_TPT_VALID(1U)
483 #define S_TPT_STAG_KEY 23
484 #define M_TPT_STAG_KEY 0xFF
485 #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
486 #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
488 #define S_TPT_STAG_STATE 22
489 #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
490 #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
492 #define S_TPT_STAG_TYPE 20
493 #define M_TPT_STAG_TYPE 0x3
494 #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
495 #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
498 #define M_TPT_PDID 0xFFFFF
499 #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
500 #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
502 #define S_TPT_PERM 28
503 #define M_TPT_PERM 0xF
504 #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
505 #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
507 #define S_TPT_REM_INV_DIS 27
508 #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
509 #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
511 #define S_TPT_ADDR_TYPE 26
512 #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
513 #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
515 #define S_TPT_MW_BIND_ENABLE 25
516 #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
517 #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
519 #define S_TPT_PAGE_SIZE 20
520 #define M_TPT_PAGE_SIZE 0x1F
521 #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
522 #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
524 #define S_TPT_PBL_ADDR 0
525 #define M_TPT_PBL_ADDR 0x1FFFFFFF
526 #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
527 #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
530 #define M_TPT_QPID 0xFFFFF
531 #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
532 #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
534 #define S_TPT_PSTAG 0
535 #define M_TPT_PSTAG 0xFFFFFF
536 #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
537 #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
539 #define S_TPT_PBL_SIZE 0
540 #define M_TPT_PBL_SIZE 0xFFFFF
541 #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
542 #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
563 #define M_CQE_OOO 0x1
564 #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
565 #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
567 #define S_CQE_QPID 12
568 #define M_CQE_QPID 0x7FFFF
569 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
570 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
572 #define S_CQE_SWCQE 11
573 #define M_CQE_SWCQE 0x1
574 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
575 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
577 #define S_CQE_GENBIT 10
578 #define M_CQE_GENBIT 0x1
579 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
580 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
582 #define S_CQE_STATUS 5
583 #define M_CQE_STATUS 0x1F
584 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
585 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
588 #define M_CQE_TYPE 0x1
589 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
590 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
592 #define S_CQE_OPCODE 0
593 #define M_CQE_OPCODE 0xF
594 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
595 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
597 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
598 #define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
599 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
600 #define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
601 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
602 #define SQ_TYPE(x) (CQE_TYPE((x)))
603 #define RQ_TYPE(x) (!CQE_TYPE((x)))
604 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
605 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
607 #define CQE_LEN(x) (be32_to_cpu((x).len))
609 /* used for RQ completion processing */
610 #define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
611 #define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
613 /* used for SQ completion processing */
614 #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
615 #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
617 /* generic accessor macros */
618 #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
619 #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
621 #define TPT_ERR_SUCCESS 0x0
622 #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
623 /* STAG is offlimt, being 0, */
624 /* or STAG_key mismatch */
625 #define TPT_ERR_PDID 0x2 /* PDID mismatch */
626 #define TPT_ERR_QPID 0x3 /* QPID mismatch */
627 #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
628 #define TPT_ERR_WRAP 0x5 /* Wrap error */
629 #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
630 #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
631 /* shared memory region */
632 #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
633 /* shared memory region */
634 #define TPT_ERR_ECC 0x9 /* ECC error detected */
635 #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
636 /* reading PSTAG for a MW */
638 #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
640 #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
641 #define TPT_ERR_CRC 0x10 /* CRC error */
642 #define TPT_ERR_MARKER 0x11 /* Marker error */
643 #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
644 #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
645 #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
646 #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
647 #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
648 #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
649 #define TPT_ERR_MSN 0x18 /* MSN error */
650 #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
651 #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
653 #define TPT_ERR_MSN_GAP 0x1B
654 #define TPT_ERR_MSN_RANGE 0x1C
655 #define TPT_ERR_IRD_OVERFLOW 0x1D
656 #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
658 #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
677 * A T3 WQ implements both the SQ and RQ.
680 union t3_wr *queue; /* DMA accessable memory */
681 dma_addr_t dma_addr; /* DMA address for HW */
682 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
683 u32 error; /* 1 once we go to ERROR */
685 u32 wptr; /* idx to next available WR slot */
686 u32 size_log2; /* total wq size */
687 struct t3_swsq *sq; /* SW SQ */
688 struct t3_swsq *oldest_read; /* tracks oldest pending read */
689 u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
690 u32 sq_rptr; /* pending wrs */
691 u32 sq_size_log2; /* sq size */
692 struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
693 u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
694 u32 rq_rptr; /* pending wrs */
695 struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
696 u32 rq_size_log2; /* rq size */
697 u32 rq_addr; /* rq adapter address */
698 void __iomem *doorbell; /* kernel db */
699 u64 udb; /* user db if any */
700 struct cxio_rdev *rdev;
709 DECLARE_PCI_UNMAP_ADDR(mapping)
710 struct t3_cqe *queue;
711 struct t3_cqe *sw_queue;
716 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
719 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
721 wq->queue->wq_in_err.err = 1;
724 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
728 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
729 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
734 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
738 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
739 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
745 static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
749 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
750 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
753 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
754 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))