2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm-powerpc/system.h>
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
54 struct ehca_wqe *wqe_p,
55 struct ib_recv_wr *recv_wr)
58 if (unlikely((recv_wr->num_sge < 0) ||
59 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
60 ehca_gen_err("Invalid number of WQE SGE. "
61 "num_sqe=%x max_nr_of_sg=%x",
62 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
63 return -EINVAL; /* invalid SG list length */
66 /* clear wqe header until sglist */
67 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
69 wqe_p->work_request_id = recv_wr->wr_id;
70 wqe_p->nr_of_data_seg = recv_wr->num_sge;
72 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
73 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
74 recv_wr->sg_list[cnt_ds].addr;
75 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
76 recv_wr->sg_list[cnt_ds].lkey;
77 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
78 recv_wr->sg_list[cnt_ds].length;
81 if (ehca_debug_level) {
82 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
84 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
90 #if defined(DEBUG_GSI_SEND_WR)
92 /* need ib_mad struct */
93 #include <rdma/ib_mad.h>
95 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
100 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
101 struct ib_sge *sge = send_wr->sg_list;
102 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
103 "send_flags=%x opcode=%x", idx, send_wr->wr_id,
104 send_wr->num_sge, send_wr->send_flags,
107 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
108 "mgmt_class=%x class_version=%x method=%x "
109 "status=%x class_specific=%x tid=%lx "
110 "attr_id=%x resv=%x attr_mod=%x",
111 idx, mad_hdr->base_version,
113 mad_hdr->class_version, mad_hdr->method,
114 mad_hdr->status, mad_hdr->class_specific,
115 mad_hdr->tid, mad_hdr->attr_id,
119 for (j = 0; j < send_wr->num_sge; j++) {
120 u8 *data = (u8 *)abs_to_virt(sge->addr);
121 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
123 idx, j, data, sge->length, sge->lkey);
124 /* assume length is n*16 */
125 ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
130 send_wr = send_wr->next;
131 } /* eof while send_wr */
134 #endif /* DEBUG_GSI_SEND_WR */
136 static inline int ehca_write_swqe(struct ehca_qp *qp,
137 struct ehca_wqe *wqe_p,
138 const struct ib_send_wr *send_wr)
142 struct ehca_av *my_av;
143 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
145 if (unlikely((send_wr->num_sge < 0) ||
146 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
147 ehca_gen_err("Invalid number of WQE SGE. "
148 "num_sqe=%x max_nr_of_sg=%x",
149 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
150 return -EINVAL; /* invalid SG list length */
153 /* clear wqe header until sglist */
154 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
156 wqe_p->work_request_id = send_wr->wr_id;
158 switch (send_wr->opcode) {
160 case IB_WR_SEND_WITH_IMM:
161 wqe_p->optype = WQE_OPTYPE_SEND;
163 case IB_WR_RDMA_WRITE:
164 case IB_WR_RDMA_WRITE_WITH_IMM:
165 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
167 case IB_WR_RDMA_READ:
168 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
171 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
172 return -EINVAL; /* invalid opcode */
175 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
179 if (send_wr->send_flags & IB_SEND_SIGNALED)
180 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
182 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
183 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
184 /* this might not work as long as HW does not support it */
185 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
186 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
189 wqe_p->nr_of_data_seg = send_wr->num_sge;
191 switch (qp->qp_type) {
194 /* no break is intential here */
196 /* IB 1.2 spec C10-15 compliance */
197 if (send_wr->wr.ud.remote_qkey & 0x80000000)
198 remote_qkey = qp->qkey;
200 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
201 wqe_p->local_ee_context_qkey = remote_qkey;
202 if (!send_wr->wr.ud.ah) {
203 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
206 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
207 wqe_p->u.ud_av.ud_av = my_av->av;
210 * omitted check of IB_SEND_INLINE
211 * since HW does not support it
213 for (idx = 0; idx < send_wr->num_sge; idx++) {
214 wqe_p->u.ud_av.sg_list[idx].vaddr =
215 send_wr->sg_list[idx].addr;
216 wqe_p->u.ud_av.sg_list[idx].lkey =
217 send_wr->sg_list[idx].lkey;
218 wqe_p->u.ud_av.sg_list[idx].length =
219 send_wr->sg_list[idx].length;
221 if (qp->qp_type == IB_QPT_SMI ||
222 qp->qp_type == IB_QPT_GSI)
223 wqe_p->u.ud_av.ud_av.pmtu = 1;
224 if (qp->qp_type == IB_QPT_GSI) {
225 wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
226 #ifdef DEBUG_GSI_SEND_WR
227 trace_send_wr_ud(send_wr);
228 #endif /* DEBUG_GSI_SEND_WR */
233 if (send_wr->send_flags & IB_SEND_FENCE)
234 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
235 /* no break is intentional here */
237 /* TODO: atomic not implemented */
238 wqe_p->u.nud.remote_virtual_adress =
239 send_wr->wr.rdma.remote_addr;
240 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
243 * omitted checking of IB_SEND_INLINE
244 * since HW does not support it
247 for (idx = 0; idx < send_wr->num_sge; idx++) {
248 wqe_p->u.nud.sg_list[idx].vaddr =
249 send_wr->sg_list[idx].addr;
250 wqe_p->u.nud.sg_list[idx].lkey =
251 send_wr->sg_list[idx].lkey;
252 wqe_p->u.nud.sg_list[idx].length =
253 send_wr->sg_list[idx].length;
254 dma_length += send_wr->sg_list[idx].length;
256 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
261 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
265 if (ehca_debug_level) {
266 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
267 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
272 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
273 static inline void map_ib_wc_status(u32 cqe_status,
274 enum ib_wc_status *wc_status)
276 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
277 switch (cqe_status & 0x3F) {
280 *wc_status = IB_WC_LOC_LEN_ERR;
284 *wc_status = IB_WC_LOC_QP_OP_ERR;
288 *wc_status = IB_WC_LOC_EEC_OP_ERR;
292 *wc_status = IB_WC_LOC_PROT_ERR;
296 *wc_status = IB_WC_WR_FLUSH_ERR;
299 *wc_status = IB_WC_MW_BIND_ERR;
301 case 0x07: /* remote error - look into bits 20:24 */
303 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
306 * PSN Sequence Error!
307 * couldn't find a matching status!
309 *wc_status = IB_WC_GENERAL_ERR;
312 *wc_status = IB_WC_REM_INV_REQ_ERR;
315 *wc_status = IB_WC_REM_ACCESS_ERR;
318 *wc_status = IB_WC_REM_OP_ERR;
321 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
326 *wc_status = IB_WC_RETRY_EXC_ERR;
329 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
333 *wc_status = IB_WC_REM_ABORT_ERR;
337 *wc_status = IB_WC_INV_EECN_ERR;
341 *wc_status = IB_WC_INV_EEC_STATE_ERR;
344 *wc_status = IB_WC_BAD_RESP_ERR;
348 *wc_status = IB_WC_WR_FLUSH_ERR;
351 *wc_status = IB_WC_FATAL_ERR;
355 *wc_status = IB_WC_SUCCESS;
358 int ehca_post_send(struct ib_qp *qp,
359 struct ib_send_wr *send_wr,
360 struct ib_send_wr **bad_send_wr)
362 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
363 struct ib_send_wr *cur_send_wr;
364 struct ehca_wqe *wqe_p;
370 spin_lock_irqsave(&my_qp->spinlock_s, flags);
372 /* loop processes list of send reqs */
373 for (cur_send_wr = send_wr; cur_send_wr != NULL;
374 cur_send_wr = cur_send_wr->next) {
375 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
376 /* get pointer next to free WQE */
377 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
378 if (unlikely(!wqe_p)) {
379 /* too many posted work requests: queue overflow */
381 *bad_send_wr = cur_send_wr;
384 ehca_err(qp->device, "Too many posted WQEs "
385 "qp_num=%x", qp->qp_num);
387 goto post_send_exit0;
389 /* write a SEND WQE into the QUEUE */
390 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
392 * if something failed,
393 * reset the free entry pointer to the start value
396 my_qp->ipz_squeue.current_q_offset = start_offset;
397 *bad_send_wr = cur_send_wr;
400 ehca_err(qp->device, "Could not write WQE "
401 "qp_num=%x", qp->qp_num);
403 goto post_send_exit0;
406 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
407 my_qp, qp->qp_num, wqe_cnt);
408 } /* eof for cur_send_wr */
411 iosync(); /* serialize GAL register access */
412 hipz_update_sqa(my_qp, wqe_cnt);
413 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
417 static int internal_post_recv(struct ehca_qp *my_qp,
418 struct ib_device *dev,
419 struct ib_recv_wr *recv_wr,
420 struct ib_recv_wr **bad_recv_wr)
422 struct ib_recv_wr *cur_recv_wr;
423 struct ehca_wqe *wqe_p;
428 if (unlikely(!HAS_RQ(my_qp))) {
429 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
430 my_qp, my_qp->real_qp_num, my_qp->ext_type);
435 spin_lock_irqsave(&my_qp->spinlock_r, flags);
437 /* loop processes list of send reqs */
438 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
439 cur_recv_wr = cur_recv_wr->next) {
440 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
441 /* get pointer next to free WQE */
442 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
443 if (unlikely(!wqe_p)) {
444 /* too many posted work requests: queue overflow */
446 *bad_recv_wr = cur_recv_wr;
449 ehca_err(dev, "Too many posted WQEs "
450 "qp_num=%x", my_qp->real_qp_num);
452 goto post_recv_exit0;
454 /* write a RECV WQE into the QUEUE */
455 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
457 * if something failed,
458 * reset the free entry pointer to the start value
461 my_qp->ipz_rqueue.current_q_offset = start_offset;
462 *bad_recv_wr = cur_recv_wr;
465 ehca_err(dev, "Could not write WQE "
466 "qp_num=%x", my_qp->real_qp_num);
468 goto post_recv_exit0;
471 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
472 my_qp, my_qp->real_qp_num, wqe_cnt);
473 } /* eof for cur_recv_wr */
476 iosync(); /* serialize GAL register access */
477 hipz_update_rqa(my_qp, wqe_cnt);
478 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
482 int ehca_post_recv(struct ib_qp *qp,
483 struct ib_recv_wr *recv_wr,
484 struct ib_recv_wr **bad_recv_wr)
486 return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
487 qp->device, recv_wr, bad_recv_wr);
490 int ehca_post_srq_recv(struct ib_srq *srq,
491 struct ib_recv_wr *recv_wr,
492 struct ib_recv_wr **bad_recv_wr)
494 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
495 srq->device, recv_wr, bad_recv_wr);
499 * ib_wc_opcode table converts ehca wc opcode to ib
500 * Since we use zero to indicate invalid opcode, the actual ib opcode must
503 static const u8 ib_wc_opcode[255] = {
504 [0x01] = IB_WC_RECV+1,
505 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
506 [0x04] = IB_WC_BIND_MW+1,
507 [0x08] = IB_WC_FETCH_ADD+1,
508 [0x10] = IB_WC_COMP_SWAP+1,
509 [0x20] = IB_WC_RDMA_WRITE+1,
510 [0x40] = IB_WC_RDMA_READ+1,
511 [0x80] = IB_WC_SEND+1
514 /* internal function to poll one entry of cq */
515 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
518 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
519 struct ehca_cqe *cqe;
520 struct ehca_qp *my_qp;
523 poll_cq_one_read_cqe:
524 cqe = (struct ehca_cqe *)
525 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
528 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
529 "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
530 goto poll_cq_one_exit0;
533 /* prevents loads being reordered across this point */
537 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
542 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
544 ehca_err(cq->device, "cq_num=%x qp_num=%x "
545 "could not find qp -> ignore cqe",
546 my_cq->cq_number, cqe->local_qp_number);
547 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
548 my_cq->cq_number, cqe->local_qp_number);
549 /* ignore this purged cqe */
550 goto poll_cq_one_read_cqe;
552 spin_lock_irqsave(&qp->spinlock_s, flags);
553 purgeflag = qp->sqerr_purgeflag;
554 spin_unlock_irqrestore(&qp->spinlock_s, flags);
558 "Got CQE with purged bit qp_num=%x src_qp=%x",
559 cqe->local_qp_number, cqe->remote_qp_number);
560 if (ehca_debug_level)
561 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
562 cqe->local_qp_number,
563 cqe->remote_qp_number);
565 * ignore this to avoid double cqes of bad wqe
566 * that caused sqe and turn off purge flag
568 qp->sqerr_purgeflag = 0;
569 goto poll_cq_one_read_cqe;
574 if (unlikely(ehca_debug_level)) {
576 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
577 my_cq, my_cq->cq_number);
578 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
579 my_cq, my_cq->cq_number);
581 "ehca_cq=%p cq_num=%x -------------------------",
582 my_cq, my_cq->cq_number);
585 /* we got a completion! */
586 wc->wr_id = cqe->work_request_id;
588 /* eval ib_wc_opcode */
589 wc->opcode = ib_wc_opcode[cqe->optype]-1;
590 if (unlikely(wc->opcode == -1)) {
591 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
592 "ehca_cq=%p cq_num=%x",
593 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
594 /* dump cqe for other infos */
595 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
596 my_cq, my_cq->cq_number);
597 /* update also queue adder to throw away this entry!!! */
598 goto poll_cq_one_exit0;
600 /* eval ib_wc_status */
601 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
602 /* complete with errors */
603 map_ib_wc_status(cqe->status, &wc->status);
604 wc->vendor_err = wc->status;
606 wc->status = IB_WC_SUCCESS;
608 read_lock(&ehca_qp_idr_lock);
609 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
610 wc->qp = &my_qp->ib_qp;
611 read_unlock(&ehca_qp_idr_lock);
613 wc->byte_len = cqe->nr_bytes_transferred;
614 wc->pkey_index = cqe->pkey_index;
615 wc->slid = cqe->rlid;
616 wc->dlid_path_bits = cqe->dlid;
617 wc->src_qp = cqe->remote_qp_number;
618 wc->wc_flags = cqe->w_completion_flags;
619 wc->imm_data = cpu_to_be32(cqe->immediate_data);
620 wc->sl = cqe->service_level;
622 if (unlikely(wc->status != IB_WC_SUCCESS))
624 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
625 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
626 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
627 cqe->status, cqe->local_qp_number,
628 cqe->remote_qp_number, cqe->work_request_id, cqe);
632 hipz_update_feca(my_cq, cqe_count);
637 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
641 struct ib_wc *current_wc = wc;
645 if (num_entries < 1) {
646 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
647 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
652 spin_lock_irqsave(&my_cq->spinlock, flags);
653 for (nr = 0; nr < num_entries; nr++) {
654 ret = ehca_poll_cq_one(cq, current_wc);
659 spin_unlock_irqrestore(&my_cq->spinlock, flags);
660 if (ret == -EAGAIN || !ret)
667 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
669 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
672 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
673 case IB_CQ_SOLICITED:
674 hipz_set_cqx_n0(my_cq, 1);
676 case IB_CQ_NEXT_COMP:
677 hipz_set_cqx_n1(my_cq, 1);
683 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
684 unsigned long spl_flags;
685 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
686 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
687 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);