2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
36 #include "cxio_resource.h"
40 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
48 if (wr->send_flags & IB_SEND_SOLICITED)
49 wqe->send.rdmaop = T3_SEND_WITH_SE;
51 wqe->send.rdmaop = T3_SEND;
52 wqe->send.rem_stag = 0;
54 case IB_WR_SEND_WITH_INV:
55 if (wr->send_flags & IB_SEND_SOLICITED)
56 wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
58 wqe->send.rdmaop = T3_SEND_WITH_INV;
59 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
64 if (wr->num_sge > T3_MAX_SGE)
66 wqe->send.reserved[0] = 0;
67 wqe->send.reserved[1] = 0;
68 wqe->send.reserved[2] = 0;
70 for (i = 0; i < wr->num_sge; i++) {
71 if ((plen + wr->sg_list[i].length) < plen)
74 plen += wr->sg_list[i].length;
75 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
76 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
77 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
79 wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
80 *flit_cnt = 4 + ((wr->num_sge) << 1);
81 wqe->send.plen = cpu_to_be32(plen);
85 static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
90 if (wr->num_sge > T3_MAX_SGE)
92 wqe->write.rdmaop = T3_RDMA_WRITE;
93 wqe->write.reserved[0] = 0;
94 wqe->write.reserved[1] = 0;
95 wqe->write.reserved[2] = 0;
96 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
97 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
101 wqe->write.sgl[0].stag = wr->ex.imm_data;
102 wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
103 wqe->write.num_sgle = __constant_cpu_to_be32(0);
107 for (i = 0; i < wr->num_sge; i++) {
108 if ((plen + wr->sg_list[i].length) < plen) {
111 plen += wr->sg_list[i].length;
112 wqe->write.sgl[i].stag =
113 cpu_to_be32(wr->sg_list[i].lkey);
114 wqe->write.sgl[i].len =
115 cpu_to_be32(wr->sg_list[i].length);
116 wqe->write.sgl[i].to =
117 cpu_to_be64(wr->sg_list[i].addr);
119 wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
120 *flit_cnt = 5 + ((wr->num_sge) << 1);
122 wqe->write.plen = cpu_to_be32(plen);
126 static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
131 wqe->read.rdmaop = T3_READ_REQ;
132 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
133 wqe->read.local_inv = 1;
135 wqe->read.local_inv = 0;
136 wqe->read.reserved[0] = 0;
137 wqe->read.reserved[1] = 0;
138 wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
139 wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
140 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
141 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
142 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
143 *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
147 static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
148 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
153 if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
156 wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
157 wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
158 wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
159 wqe->fastreg.va_base_lo_fbo =
160 cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
161 wqe->fastreg.page_type_perms = cpu_to_be32(
162 V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
163 V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
164 V_FR_TYPE(TPT_VATO) |
165 V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
166 p = &wqe->fastreg.pbl_addrs[0];
167 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
169 /* If we need a 2nd WR, then set it up */
170 if (i == T3_MAX_FASTREG_FRAG) {
172 wqe = (union t3_wr *)(wq->queue +
173 Q_PTR2IDX((wq->wptr+1), wq->size_log2));
174 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
175 Q_GENBIT(wq->wptr + 1, wq->size_log2),
176 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
179 p = &wqe->pbl_frag.pbl_addrs[0];
181 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
183 *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
189 static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
192 wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
193 wqe->local_inv.reserved = 0;
194 *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
198 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
199 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
204 for (i = 0; i < num_sgle; i++) {
206 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
208 PDBG("%s %d\n", __func__, __LINE__);
211 if (!mhp->attr.state) {
212 PDBG("%s %d\n", __func__, __LINE__);
215 if (mhp->attr.zbva) {
216 PDBG("%s %d\n", __func__, __LINE__);
220 if (sg_list[i].addr < mhp->attr.va_fbo) {
221 PDBG("%s %d\n", __func__, __LINE__);
224 if (sg_list[i].addr + ((u64) sg_list[i].length) <
226 PDBG("%s %d\n", __func__, __LINE__);
229 if (sg_list[i].addr + ((u64) sg_list[i].length) >
230 mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
231 PDBG("%s %d\n", __func__, __LINE__);
234 offset = sg_list[i].addr - mhp->attr.va_fbo;
235 offset += mhp->attr.va_fbo &
236 ((1UL << (12 + mhp->attr.page_size)) - 1);
237 pbl_addr[i] = ((mhp->attr.pbl_addr -
238 rhp->rdev.rnic_info.pbl_base) >> 3) +
239 (offset >> (12 + mhp->attr.page_size));
240 page_size[i] = mhp->attr.page_size;
245 static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
246 struct ib_recv_wr *wr)
249 u32 pbl_addr[T3_MAX_SGE];
250 u8 page_size[T3_MAX_SGE];
252 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
256 wqe->recv.pagesz[0] = page_size[0];
257 wqe->recv.pagesz[1] = page_size[1];
258 wqe->recv.pagesz[2] = page_size[2];
259 wqe->recv.pagesz[3] = page_size[3];
260 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
261 for (i = 0; i < wr->num_sge; i++) {
262 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
263 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
265 /* to in the WQE == the offset into the page */
266 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
267 ((1UL << (12 + page_size[i])) - 1));
269 /* pbl_addr is the adapters address in the PBL */
270 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
272 for (; i < T3_MAX_SGE; i++) {
273 wqe->recv.sgl[i].stag = 0;
274 wqe->recv.sgl[i].len = 0;
275 wqe->recv.sgl[i].to = 0;
276 wqe->recv.pbl_addr[i] = 0;
278 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
279 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
281 qhp->wq.rq_size_log2)].pbl_addr = 0;
285 static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
286 struct ib_recv_wr *wr)
294 * The T3 HW requires the PBL in the HW recv descriptor to reference
295 * a PBL entry. So we allocate the max needed PBL memory here and pass
296 * it to the uP in the recv WR. The uP will build the PBL and setup
297 * the HW recv descriptor.
299 pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
304 * Compute the 8B aligned offset.
306 pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
308 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
310 for (i = 0; i < wr->num_sge; i++) {
313 * Use a 128MB page size. This and an imposed 128MB
314 * sge length limit allows us to require only a 2-entry HW
315 * PBL for each SGE. This restriction is acceptable since
316 * since it is not possible to allocate 128MB of contiguous
317 * DMA coherent memory!
319 if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
321 wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
324 * T3 restricts a recv to all zero-stag or all non-zero-stag.
326 if (wr->sg_list[i].lkey != 0)
328 wqe->recv.sgl[i].stag = 0;
329 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
330 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
331 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
334 for (; i < T3_MAX_SGE; i++) {
335 wqe->recv.pagesz[i] = 0;
336 wqe->recv.sgl[i].stag = 0;
337 wqe->recv.sgl[i].len = 0;
338 wqe->recv.sgl[i].to = 0;
339 wqe->recv.pbl_addr[i] = 0;
341 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
342 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
343 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
344 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
348 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
349 struct ib_send_wr **bad_wr)
352 u8 uninitialized_var(t3_wr_flit_cnt);
353 enum t3_wr_opcode t3_wr_opcode = 0;
354 enum t3_wr_flags t3_wr_flags;
363 qhp = to_iwch_qp(ibqp);
364 spin_lock_irqsave(&qhp->lock, flag);
365 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
366 spin_unlock_irqrestore(&qhp->lock, flag);
369 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
370 qhp->wq.sq_size_log2);
372 spin_unlock_irqrestore(&qhp->lock, flag);
381 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
382 wqe = (union t3_wr *) (qhp->wq.queue + idx);
384 if (wr->send_flags & IB_SEND_SOLICITED)
385 t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
386 if (wr->send_flags & IB_SEND_SIGNALED)
387 t3_wr_flags |= T3_COMPLETION_FLAG;
389 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
390 switch (wr->opcode) {
392 case IB_WR_SEND_WITH_INV:
393 if (wr->send_flags & IB_SEND_FENCE)
394 t3_wr_flags |= T3_READ_FENCE_FLAG;
395 t3_wr_opcode = T3_WR_SEND;
396 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
398 case IB_WR_RDMA_WRITE:
399 case IB_WR_RDMA_WRITE_WITH_IMM:
400 t3_wr_opcode = T3_WR_WRITE;
401 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
403 case IB_WR_RDMA_READ:
404 case IB_WR_RDMA_READ_WITH_INV:
405 t3_wr_opcode = T3_WR_READ;
406 t3_wr_flags = 0; /* T3 reads are always signaled */
407 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
410 sqp->read_len = wqe->read.local_len;
411 if (!qhp->wq.oldest_read)
412 qhp->wq.oldest_read = sqp;
414 case IB_WR_FAST_REG_MR:
415 t3_wr_opcode = T3_WR_FASTREG;
416 err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
419 case IB_WR_LOCAL_INV:
420 if (wr->send_flags & IB_SEND_FENCE)
421 t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
422 t3_wr_opcode = T3_WR_INV_STAG;
423 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
426 PDBG("%s post of type=%d TBD!\n", __func__,
434 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
435 sqp->wr_id = wr->wr_id;
436 sqp->opcode = wr2opcode(t3_wr_opcode);
437 sqp->sq_wptr = qhp->wq.sq_wptr;
439 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
441 build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
442 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
444 (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
445 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
446 __func__, (unsigned long long) wr->wr_id, idx,
447 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
451 qhp->wq.wptr += wr_cnt;
454 spin_unlock_irqrestore(&qhp->lock, flag);
455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
459 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
460 struct ib_recv_wr **bad_wr)
469 qhp = to_iwch_qp(ibqp);
470 spin_lock_irqsave(&qhp->lock, flag);
471 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
472 spin_unlock_irqrestore(&qhp->lock, flag);
475 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
476 qhp->wq.rq_size_log2) - 1;
478 spin_unlock_irqrestore(&qhp->lock, flag);
482 if (wr->num_sge > T3_MAX_SGE) {
487 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
488 wqe = (union t3_wr *) (qhp->wq.queue + idx);
490 if (wr->sg_list[0].lkey)
491 err = build_rdma_recv(qhp, wqe, wr);
493 err = build_zero_stag_recv(qhp, wqe, wr);
500 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
501 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
502 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
503 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
504 "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
505 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
511 spin_unlock_irqrestore(&qhp->lock, flag);
512 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
516 int iwch_bind_mw(struct ib_qp *qp,
518 struct ib_mw_bind *mw_bind)
520 struct iwch_dev *rhp;
530 enum t3_wr_flags t3_wr_flags;
534 qhp = to_iwch_qp(qp);
535 mhp = to_iwch_mw(mw);
538 spin_lock_irqsave(&qhp->lock, flag);
539 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
540 spin_unlock_irqrestore(&qhp->lock, flag);
543 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
544 qhp->wq.sq_size_log2);
545 if ((num_wrs) <= 0) {
546 spin_unlock_irqrestore(&qhp->lock, flag);
549 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
550 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
552 wqe = (union t3_wr *) (qhp->wq.queue + idx);
555 if (mw_bind->send_flags & IB_SEND_SIGNALED)
556 t3_wr_flags = T3_COMPLETION_FLAG;
558 sgl.addr = mw_bind->addr;
559 sgl.lkey = mw_bind->mr->lkey;
560 sgl.length = mw_bind->length;
561 wqe->bind.reserved = 0;
562 wqe->bind.type = TPT_VATO;
564 /* TBD: check perms */
565 wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
566 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
567 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
568 wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
569 wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
570 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
572 spin_unlock_irqrestore(&qhp->lock, flag);
575 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
576 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
577 sqp->wr_id = mw_bind->wr_id;
578 sqp->opcode = T3_BIND_MW;
579 sqp->sq_wptr = qhp->wq.sq_wptr;
581 sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
582 wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
583 wqe->bind.mr_pagesz = page_size;
584 build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
585 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
586 sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
589 spin_unlock_irqrestore(&qhp->lock, flag);
591 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
596 static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
597 u8 *layer_type, u8 *ecode)
599 int status = TPT_ERR_INTERNAL_ERR;
606 status = CQE_STATUS(rsp_msg->cqe);
607 opcode = CQE_OPCODE(rsp_msg->cqe);
608 rqtype = RQ_TYPE(rsp_msg->cqe);
609 send_inv = (opcode == T3_SEND_WITH_INV) ||
610 (opcode == T3_SEND_WITH_SE_INV);
611 tagged = (opcode == T3_RDMA_WRITE) ||
612 (rqtype && (opcode == T3_READ_RESP));
618 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
619 *ecode = RDMAP_CANT_INV_STAG;
621 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
622 *ecode = RDMAP_INV_STAG;
626 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
627 if ((opcode == T3_SEND_WITH_INV) ||
628 (opcode == T3_SEND_WITH_SE_INV))
629 *ecode = RDMAP_CANT_INV_STAG;
631 *ecode = RDMAP_STAG_NOT_ASSOC;
634 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
635 *ecode = RDMAP_STAG_NOT_ASSOC;
638 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
639 *ecode = RDMAP_ACC_VIOL;
642 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
643 *ecode = RDMAP_TO_WRAP;
647 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
648 *ecode = DDPT_BASE_BOUNDS;
650 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
651 *ecode = RDMAP_BASE_BOUNDS;
654 case TPT_ERR_INVALIDATE_SHARED_MR:
655 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
656 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
657 *ecode = RDMAP_CANT_INV_STAG;
660 case TPT_ERR_ECC_PSTAG:
661 case TPT_ERR_INTERNAL_ERR:
662 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
665 case TPT_ERR_OUT_OF_RQE:
666 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
667 *ecode = DDPU_INV_MSN_NOBUF;
669 case TPT_ERR_PBL_ADDR_BOUND:
670 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
671 *ecode = DDPT_BASE_BOUNDS;
674 *layer_type = LAYER_MPA|DDP_LLP;
675 *ecode = MPA_CRC_ERR;
678 *layer_type = LAYER_MPA|DDP_LLP;
679 *ecode = MPA_MARKER_ERR;
681 case TPT_ERR_PDU_LEN_ERR:
682 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
683 *ecode = DDPU_MSG_TOOBIG;
685 case TPT_ERR_DDP_VERSION:
687 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
688 *ecode = DDPT_INV_VERS;
690 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
691 *ecode = DDPU_INV_VERS;
694 case TPT_ERR_RDMA_VERSION:
695 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
696 *ecode = RDMAP_INV_VERS;
699 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
700 *ecode = RDMAP_INV_OPCODE;
702 case TPT_ERR_DDP_QUEUE_NUM:
703 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
704 *ecode = DDPU_INV_QN;
707 case TPT_ERR_MSN_GAP:
708 case TPT_ERR_MSN_RANGE:
709 case TPT_ERR_IRD_OVERFLOW:
710 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
711 *ecode = DDPU_INV_MSN_RANGE;
714 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
718 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
719 *ecode = DDPU_INV_MO;
722 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
728 int iwch_post_zb_read(struct iwch_qp *qhp)
732 u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
734 PDBG("%s enter\n", __func__);
735 skb = alloc_skb(40, GFP_KERNEL);
737 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
740 wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
741 memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
742 wqe->read.rdmaop = T3_READ_REQ;
743 wqe->read.reserved[0] = 0;
744 wqe->read.reserved[1] = 0;
745 wqe->read.rem_stag = cpu_to_be32(1);
746 wqe->read.rem_to = cpu_to_be64(1);
747 wqe->read.local_stag = cpu_to_be32(1);
748 wqe->read.local_len = cpu_to_be32(0);
749 wqe->read.local_to = cpu_to_be64(1);
750 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
751 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
752 V_FW_RIWR_LEN(flit_cnt));
753 skb->priority = CPL_PRIORITY_DATA;
754 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
758 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
760 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
763 struct terminate_message *term;
766 PDBG("%s %d\n", __func__, __LINE__);
767 skb = alloc_skb(40, GFP_ATOMIC);
769 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
772 wqe = (union t3_wr *)skb_put(skb, 40);
774 wqe->send.rdmaop = T3_TERMINATE;
776 /* immediate data length */
777 wqe->send.plen = htonl(4);
779 /* immediate data starts here. */
780 term = (struct terminate_message *)wqe->send.sgl;
781 build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
782 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
783 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
784 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
785 skb->priority = CPL_PRIORITY_DATA;
786 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
790 * Assumes qhp lock is held.
792 static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
794 struct iwch_cq *rchp, *schp;
798 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
799 schp = get_chp(qhp->rhp, qhp->attr.scq);
801 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
802 /* take a ref on the qhp since we must release the lock */
803 atomic_inc(&qhp->refcnt);
804 spin_unlock_irqrestore(&qhp->lock, *flag);
806 /* locking heirarchy: cq lock first, then qp lock. */
807 spin_lock_irqsave(&rchp->lock, *flag);
808 spin_lock(&qhp->lock);
809 cxio_flush_hw_cq(&rchp->cq);
810 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
811 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
812 spin_unlock(&qhp->lock);
813 spin_unlock_irqrestore(&rchp->lock, *flag);
815 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
817 /* locking heirarchy: cq lock first, then qp lock. */
818 spin_lock_irqsave(&schp->lock, *flag);
819 spin_lock(&qhp->lock);
820 cxio_flush_hw_cq(&schp->cq);
821 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
822 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
823 spin_unlock(&qhp->lock);
824 spin_unlock_irqrestore(&schp->lock, *flag);
826 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
829 if (atomic_dec_and_test(&qhp->refcnt))
832 spin_lock_irqsave(&qhp->lock, *flag);
835 static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
837 if (qhp->ibqp.uobject)
838 cxio_set_wq_in_error(&qhp->wq);
840 __flush_qp(qhp, flag);
845 * Return count of RECV WRs posted
847 u16 iwch_rqes_posted(struct iwch_qp *qhp)
849 union t3_wr *wqe = qhp->wq.queue;
851 while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
855 PDBG("%s qhp %p count %u\n", __func__, qhp, count);
859 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
860 enum iwch_qp_attr_mask mask,
861 struct iwch_qp_attributes *attrs)
863 struct t3_rdma_init_attr init_attr;
866 init_attr.tid = qhp->ep->hwtid;
867 init_attr.qpid = qhp->wq.qpid;
868 init_attr.pdid = qhp->attr.pd;
869 init_attr.scqid = qhp->attr.scq;
870 init_attr.rcqid = qhp->attr.rcq;
871 init_attr.rq_addr = qhp->wq.rq_addr;
872 init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
873 init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
874 qhp->attr.mpa_attr.recv_marker_enabled |
875 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
876 (qhp->attr.mpa_attr.crc_enabled << 2);
878 init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
879 uP_RI_QP_RDMA_WRITE_ENABLE |
880 uP_RI_QP_BIND_ENABLE;
881 if (!qhp->ibqp.uobject)
882 init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
883 uP_RI_QP_FAST_REGISTER_ENABLE;
885 init_attr.tcp_emss = qhp->ep->emss;
886 init_attr.ord = qhp->attr.max_ord;
887 init_attr.ird = qhp->attr.max_ird;
888 init_attr.qp_dma_addr = qhp->wq.dma_addr;
889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
890 init_attr.rqe_count = iwch_rqes_posted(qhp);
891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
893 init_attr.rtr_type = RTR_READ;
894 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
896 if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
899 init_attr.rtr_type = 0;
900 init_attr.irs = qhp->ep->rcv_seq;
901 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
902 "flags 0x%x qpcaps 0x%x\n", __func__,
903 init_attr.rq_addr, init_attr.rq_size,
904 init_attr.flags, init_attr.qpcaps);
905 ret = cxio_rdma_init(&rhp->rdev, &init_attr);
906 PDBG("%s ret %d\n", __func__, ret);
910 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
911 enum iwch_qp_attr_mask mask,
912 struct iwch_qp_attributes *attrs,
916 struct iwch_qp_attributes newattr = qhp->attr;
922 struct iwch_ep *ep = NULL;
924 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
925 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
926 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
928 spin_lock_irqsave(&qhp->lock, flag);
930 /* Process attr changes if in IDLE */
931 if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
932 if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
936 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
937 newattr.enable_rdma_read = attrs->enable_rdma_read;
938 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
939 newattr.enable_rdma_write = attrs->enable_rdma_write;
940 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
941 newattr.enable_bind = attrs->enable_bind;
942 if (mask & IWCH_QP_ATTR_MAX_ORD) {
944 rhp->attr.max_rdma_read_qp_depth) {
948 newattr.max_ord = attrs->max_ord;
950 if (mask & IWCH_QP_ATTR_MAX_IRD) {
952 rhp->attr.max_rdma_reads_per_qp) {
956 newattr.max_ird = attrs->max_ird;
961 if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
963 if (qhp->attr.state == attrs->next_state)
966 switch (qhp->attr.state) {
967 case IWCH_QP_STATE_IDLE:
968 switch (attrs->next_state) {
969 case IWCH_QP_STATE_RTS:
970 if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
974 if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
978 qhp->attr.mpa_attr = attrs->mpa_attr;
979 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
980 qhp->ep = qhp->attr.llp_stream_handle;
981 qhp->attr.state = IWCH_QP_STATE_RTS;
984 * Ref the endpoint here and deref when we
985 * disassociate the endpoint from the QP. This
986 * happens in CLOSING->IDLE transition or *->ERROR
989 get_ep(&qhp->ep->com);
990 spin_unlock_irqrestore(&qhp->lock, flag);
991 ret = rdma_init(rhp, qhp, mask, attrs);
992 spin_lock_irqsave(&qhp->lock, flag);
996 case IWCH_QP_STATE_ERROR:
997 qhp->attr.state = IWCH_QP_STATE_ERROR;
998 flush_qp(qhp, &flag);
1005 case IWCH_QP_STATE_RTS:
1006 switch (attrs->next_state) {
1007 case IWCH_QP_STATE_CLOSING:
1008 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1009 qhp->attr.state = IWCH_QP_STATE_CLOSING;
1017 case IWCH_QP_STATE_TERMINATE:
1018 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
1019 if (qhp->ibqp.uobject)
1020 cxio_set_wq_in_error(&qhp->wq);
1024 case IWCH_QP_STATE_ERROR:
1025 qhp->attr.state = IWCH_QP_STATE_ERROR;
1039 case IWCH_QP_STATE_CLOSING:
1044 switch (attrs->next_state) {
1045 case IWCH_QP_STATE_IDLE:
1046 flush_qp(qhp, &flag);
1047 qhp->attr.state = IWCH_QP_STATE_IDLE;
1048 qhp->attr.llp_stream_handle = NULL;
1049 put_ep(&qhp->ep->com);
1051 wake_up(&qhp->wait);
1053 case IWCH_QP_STATE_ERROR:
1060 case IWCH_QP_STATE_ERROR:
1061 if (attrs->next_state != IWCH_QP_STATE_IDLE) {
1066 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
1067 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
1071 qhp->attr.state = IWCH_QP_STATE_IDLE;
1072 memset(&qhp->attr, 0, sizeof(qhp->attr));
1074 case IWCH_QP_STATE_TERMINATE:
1082 printk(KERN_ERR "%s in a bad state %d\n",
1083 __func__, qhp->attr.state);
1090 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1093 /* disassociate the LLP connection */
1094 qhp->attr.llp_stream_handle = NULL;
1097 qhp->attr.state = IWCH_QP_STATE_ERROR;
1099 wake_up(&qhp->wait);
1101 flush_qp(qhp, &flag);
1103 spin_unlock_irqrestore(&qhp->lock, flag);
1106 iwch_post_terminate(qhp, NULL);
1109 * If disconnect is 1, then we need to initiate a disconnect
1110 * on the EP. This can be a normal close (RTS->CLOSING) or
1111 * an abnormal close (RTS/CLOSING->ERROR).
1114 iwch_ep_disconnect(ep, abort, GFP_KERNEL);
1119 * If free is 1, then we've disassociated the EP from the QP
1120 * and we need to dereference the EP.
1125 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1129 static int quiesce_qp(struct iwch_qp *qhp)
1131 spin_lock_irq(&qhp->lock);
1132 iwch_quiesce_tid(qhp->ep);
1133 qhp->flags |= QP_QUIESCED;
1134 spin_unlock_irq(&qhp->lock);
1138 static int resume_qp(struct iwch_qp *qhp)
1140 spin_lock_irq(&qhp->lock);
1141 iwch_resume_tid(qhp->ep);
1142 qhp->flags &= ~QP_QUIESCED;
1143 spin_unlock_irq(&qhp->lock);
1147 int iwch_quiesce_qps(struct iwch_cq *chp)
1150 struct iwch_qp *qhp;
1152 for (i=0; i < T3_MAX_NUM_QP; i++) {
1153 qhp = get_qhp(chp->rhp, i);
1156 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1160 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1166 int iwch_resume_qps(struct iwch_cq *chp)
1169 struct iwch_qp *qhp;
1171 for (i=0; i < T3_MAX_NUM_QP; i++) {
1172 qhp = get_qhp(chp->rhp, i);
1175 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1179 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))