2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
36 * Get one cq entry from cxio and map it to openib.
41 * -EAGAIN caller must try again
42 * any other -errno fatal error
44 static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
47 struct iwch_qp *qhp = NULL;
48 struct t3_cqe cqe, *rd_cqe;
55 rd_cqe = cxio_next_cqe(&chp->cq);
60 qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
64 spin_lock(&qhp->lock);
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
69 if (t3a_device(chp->rhp) && credit) {
70 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
71 credit, chp->cq.cqid);
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
83 wc->vendor_err = CQE_STATUS(cqe);
85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86 "lo 0x%x cookie 0x%llx\n", __FUNCTION__,
87 CQE_QPID(cqe), CQE_TYPE(cqe),
88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89 CQE_WRID_LOW(cqe), (unsigned long long) cookie);
91 if (CQE_TYPE(cqe) == 0) {
93 wc->byte_len = CQE_LEN(cqe);
96 wc->opcode = IB_WC_RECV;
98 switch (CQE_OPCODE(cqe)) {
100 wc->opcode = IB_WC_RDMA_WRITE;
103 wc->opcode = IB_WC_RDMA_READ;
104 wc->byte_len = CQE_LEN(cqe);
107 case T3_SEND_WITH_SE:
108 wc->opcode = IB_WC_SEND;
111 wc->opcode = IB_WC_BIND_MW;
114 /* these aren't supported yet */
115 case T3_SEND_WITH_INV:
116 case T3_SEND_WITH_SE_INV:
118 case T3_FAST_REGISTER:
120 printk(KERN_ERR MOD "Unexpected opcode %d "
121 "in the CQE received for QPID=0x%0x\n",
122 CQE_OPCODE(cqe), CQE_QPID(cqe));
129 wc->status = IB_WC_WR_FLUSH_ERR;
132 switch (CQE_STATUS(cqe)) {
133 case TPT_ERR_SUCCESS:
134 wc->status = IB_WC_SUCCESS;
137 wc->status = IB_WC_LOC_ACCESS_ERR;
140 wc->status = IB_WC_LOC_PROT_ERR;
144 wc->status = IB_WC_LOC_ACCESS_ERR;
147 wc->status = IB_WC_GENERAL_ERR;
150 wc->status = IB_WC_LOC_LEN_ERR;
152 case TPT_ERR_INVALIDATE_SHARED_MR:
153 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
154 wc->status = IB_WC_MW_BIND_ERR;
158 case TPT_ERR_PDU_LEN_ERR:
159 case TPT_ERR_OUT_OF_RQE:
160 case TPT_ERR_DDP_VERSION:
161 case TPT_ERR_RDMA_VERSION:
162 case TPT_ERR_DDP_QUEUE_NUM:
166 case TPT_ERR_MSN_RANGE:
167 case TPT_ERR_IRD_OVERFLOW:
169 wc->status = IB_WC_FATAL_ERR;
171 case TPT_ERR_SWFLUSH:
172 wc->status = IB_WC_WR_FLUSH_ERR;
175 printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
176 "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
182 spin_unlock(&qhp->lock);
186 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
188 struct iwch_dev *rhp;
194 chp = to_iwch_cq(ibcq);
197 spin_lock_irqsave(&chp->lock, flags);
198 for (npolled = 0; npolled < num_entries; ++npolled) {
204 * Because T3 can post CQEs that are _not_ associated
205 * with a WR, we might have to poll again after removing
209 err = iwch_poll_cq_one(rhp, chp, wc + npolled);
213 } while (err == -EAGAIN);
217 spin_unlock_irqrestore(&chp->lock, flags);