2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/system.h>
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
49 #include "ehca_iverbs.h"
53 /* in RC traffic, insert an empty RDMA READ every this many packets */
54 #define ACK_CIRC_THRESHOLD 2000000
56 static u64 replace_wr_id(u64 wr_id, u16 idx)
60 ret = wr_id & ~QMAP_IDX_MASK;
61 ret |= idx & QMAP_IDX_MASK;
66 static u16 get_app_wr_id(u64 wr_id)
68 return wr_id & QMAP_IDX_MASK;
71 static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
72 struct ehca_wqe *wqe_p,
73 struct ib_recv_wr *recv_wr,
77 if (unlikely((recv_wr->num_sge < 0) ||
78 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
79 ehca_gen_err("Invalid number of WQE SGE. "
80 "num_sqe=%x max_nr_of_sg=%x",
81 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
82 return -EINVAL; /* invalid SG list length */
85 /* clear wqe header until sglist */
86 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
88 wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
89 wqe_p->nr_of_data_seg = recv_wr->num_sge;
91 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
92 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
93 recv_wr->sg_list[cnt_ds].addr;
94 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
95 recv_wr->sg_list[cnt_ds].lkey;
96 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
97 recv_wr->sg_list[cnt_ds].length;
100 if (ehca_debug_level >= 3) {
101 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
103 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
109 #if defined(DEBUG_GSI_SEND_WR)
111 /* need ib_mad struct */
112 #include <rdma/ib_mad.h>
114 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
119 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
120 struct ib_sge *sge = send_wr->sg_list;
121 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
122 "send_flags=%x opcode=%x", idx, send_wr->wr_id,
123 send_wr->num_sge, send_wr->send_flags,
126 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
127 "mgmt_class=%x class_version=%x method=%x "
128 "status=%x class_specific=%x tid=%lx "
129 "attr_id=%x resv=%x attr_mod=%x",
130 idx, mad_hdr->base_version,
132 mad_hdr->class_version, mad_hdr->method,
133 mad_hdr->status, mad_hdr->class_specific,
134 mad_hdr->tid, mad_hdr->attr_id,
138 for (j = 0; j < send_wr->num_sge; j++) {
139 u8 *data = (u8 *)abs_to_virt(sge->addr);
140 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
142 idx, j, data, sge->length, sge->lkey);
143 /* assume length is n*16 */
144 ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
149 send_wr = send_wr->next;
150 } /* eof while send_wr */
153 #endif /* DEBUG_GSI_SEND_WR */
155 static inline int ehca_write_swqe(struct ehca_qp *qp,
156 struct ehca_wqe *wqe_p,
157 const struct ib_send_wr *send_wr,
163 struct ehca_av *my_av;
164 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
165 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
167 if (unlikely((send_wr->num_sge < 0) ||
168 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
169 ehca_gen_err("Invalid number of WQE SGE. "
170 "num_sqe=%x max_nr_of_sg=%x",
171 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
172 return -EINVAL; /* invalid SG list length */
175 /* clear wqe header until sglist */
176 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
178 wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
181 qmap_entry->reported = 0;
183 switch (send_wr->opcode) {
185 case IB_WR_SEND_WITH_IMM:
186 wqe_p->optype = WQE_OPTYPE_SEND;
188 case IB_WR_RDMA_WRITE:
189 case IB_WR_RDMA_WRITE_WITH_IMM:
190 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
192 case IB_WR_RDMA_READ:
193 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
196 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
197 return -EINVAL; /* invalid opcode */
200 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
204 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
209 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
210 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
211 /* this might not work as long as HW does not support it */
212 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
213 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
216 wqe_p->nr_of_data_seg = send_wr->num_sge;
218 switch (qp->qp_type) {
221 /* no break is intential here */
223 /* IB 1.2 spec C10-15 compliance */
224 if (send_wr->wr.ud.remote_qkey & 0x80000000)
225 remote_qkey = qp->qkey;
227 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
228 wqe_p->local_ee_context_qkey = remote_qkey;
229 if (unlikely(!send_wr->wr.ud.ah)) {
230 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
233 if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
234 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
237 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
238 wqe_p->u.ud_av.ud_av = my_av->av;
241 * omitted check of IB_SEND_INLINE
242 * since HW does not support it
244 for (idx = 0; idx < send_wr->num_sge; idx++) {
245 wqe_p->u.ud_av.sg_list[idx].vaddr =
246 send_wr->sg_list[idx].addr;
247 wqe_p->u.ud_av.sg_list[idx].lkey =
248 send_wr->sg_list[idx].lkey;
249 wqe_p->u.ud_av.sg_list[idx].length =
250 send_wr->sg_list[idx].length;
252 if (qp->qp_type == IB_QPT_SMI ||
253 qp->qp_type == IB_QPT_GSI)
254 wqe_p->u.ud_av.ud_av.pmtu = 1;
255 if (qp->qp_type == IB_QPT_GSI) {
256 wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
257 #ifdef DEBUG_GSI_SEND_WR
258 trace_send_wr_ud(send_wr);
259 #endif /* DEBUG_GSI_SEND_WR */
264 if (send_wr->send_flags & IB_SEND_FENCE)
265 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
266 /* no break is intentional here */
268 /* TODO: atomic not implemented */
269 wqe_p->u.nud.remote_virtual_adress =
270 send_wr->wr.rdma.remote_addr;
271 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
274 * omitted checking of IB_SEND_INLINE
275 * since HW does not support it
278 for (idx = 0; idx < send_wr->num_sge; idx++) {
279 wqe_p->u.nud.sg_list[idx].vaddr =
280 send_wr->sg_list[idx].addr;
281 wqe_p->u.nud.sg_list[idx].lkey =
282 send_wr->sg_list[idx].lkey;
283 wqe_p->u.nud.sg_list[idx].length =
284 send_wr->sg_list[idx].length;
285 dma_length += send_wr->sg_list[idx].length;
287 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
289 /* unsolicited ack circumvention */
290 if (send_wr->opcode == IB_WR_RDMA_READ) {
291 /* on RDMA read, switch on and reset counters */
292 qp->message_count = qp->packet_count = 0;
293 qp->unsol_ack_circ = 1;
295 /* else estimate #packets */
296 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
301 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
305 if (ehca_debug_level >= 3) {
306 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
307 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
312 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
313 static inline void map_ib_wc_status(u32 cqe_status,
314 enum ib_wc_status *wc_status)
316 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
317 switch (cqe_status & 0x3F) {
320 *wc_status = IB_WC_LOC_LEN_ERR;
324 *wc_status = IB_WC_LOC_QP_OP_ERR;
328 *wc_status = IB_WC_LOC_EEC_OP_ERR;
332 *wc_status = IB_WC_LOC_PROT_ERR;
336 *wc_status = IB_WC_WR_FLUSH_ERR;
339 *wc_status = IB_WC_MW_BIND_ERR;
341 case 0x07: /* remote error - look into bits 20:24 */
343 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
346 * PSN Sequence Error!
347 * couldn't find a matching status!
349 *wc_status = IB_WC_GENERAL_ERR;
352 *wc_status = IB_WC_REM_INV_REQ_ERR;
355 *wc_status = IB_WC_REM_ACCESS_ERR;
358 *wc_status = IB_WC_REM_OP_ERR;
361 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
366 *wc_status = IB_WC_RETRY_EXC_ERR;
369 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
373 *wc_status = IB_WC_REM_ABORT_ERR;
377 *wc_status = IB_WC_INV_EECN_ERR;
381 *wc_status = IB_WC_INV_EEC_STATE_ERR;
384 *wc_status = IB_WC_BAD_RESP_ERR;
388 *wc_status = IB_WC_WR_FLUSH_ERR;
391 *wc_status = IB_WC_FATAL_ERR;
395 *wc_status = IB_WC_SUCCESS;
398 static inline int post_one_send(struct ehca_qp *my_qp,
399 struct ib_send_wr *cur_send_wr,
400 struct ib_send_wr **bad_send_wr,
403 struct ehca_wqe *wqe_p;
406 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
408 /* get pointer next to free WQE */
409 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
410 if (unlikely(!wqe_p)) {
411 /* too many posted work requests: queue overflow */
413 *bad_send_wr = cur_send_wr;
414 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
415 "qp_num=%x", my_qp->ib_qp.qp_num);
420 * Get the index of the WQE in the send queue. The same index is used
421 * for writing into the sq_map.
423 sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
425 /* write a SEND WQE into the QUEUE */
426 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
428 * if something failed,
429 * reset the free entry pointer to the start value
432 my_qp->ipz_squeue.current_q_offset = start_offset;
434 *bad_send_wr = cur_send_wr;
435 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
436 "qp_num=%x", my_qp->ib_qp.qp_num);
443 int ehca_post_send(struct ib_qp *qp,
444 struct ib_send_wr *send_wr,
445 struct ib_send_wr **bad_send_wr)
447 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
448 struct ib_send_wr *cur_send_wr;
453 /* Reject WR if QP is in RESET, INIT or RTR state */
454 if (unlikely(my_qp->state < IB_QPS_RTS)) {
455 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
456 my_qp->state, qp->qp_num);
461 spin_lock_irqsave(&my_qp->spinlock_s, flags);
463 /* Send an empty extra RDMA read if:
464 * 1) there has been an RDMA read on this connection before
465 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
466 * 3) we can be sure that any previous extra RDMA read has been
467 * processed so we don't overflow the SQ
469 if (unlikely(my_qp->unsol_ack_circ &&
470 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
471 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
472 /* insert an empty RDMA READ to fix up the remote QP state */
473 struct ib_send_wr circ_wr;
474 memset(&circ_wr, 0, sizeof(circ_wr));
475 circ_wr.opcode = IB_WR_RDMA_READ;
476 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
478 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
479 my_qp->message_count = my_qp->packet_count = 0;
482 /* loop processes list of send reqs */
483 for (cur_send_wr = send_wr; cur_send_wr != NULL;
484 cur_send_wr = cur_send_wr->next) {
485 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
487 /* if one or more WQEs were successful, don't fail */
490 goto post_send_exit0;
493 } /* eof for cur_send_wr */
496 iosync(); /* serialize GAL register access */
497 hipz_update_sqa(my_qp, wqe_cnt);
498 if (unlikely(ret || ehca_debug_level >= 2))
499 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
500 my_qp, qp->qp_num, wqe_cnt, ret);
501 my_qp->message_count += wqe_cnt;
502 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
506 static int internal_post_recv(struct ehca_qp *my_qp,
507 struct ib_device *dev,
508 struct ib_recv_wr *recv_wr,
509 struct ib_recv_wr **bad_recv_wr)
511 struct ib_recv_wr *cur_recv_wr;
512 struct ehca_wqe *wqe_p;
517 struct ehca_qmap_entry *qmap_entry;
519 if (unlikely(!HAS_RQ(my_qp))) {
520 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
521 my_qp, my_qp->real_qp_num, my_qp->ext_type);
526 spin_lock_irqsave(&my_qp->spinlock_r, flags);
528 /* loop processes list of send reqs */
529 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
530 cur_recv_wr = cur_recv_wr->next) {
531 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
532 /* get pointer next to free WQE */
533 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
534 if (unlikely(!wqe_p)) {
535 /* too many posted work requests: queue overflow */
537 *bad_recv_wr = cur_recv_wr;
540 ehca_err(dev, "Too many posted WQEs "
541 "qp_num=%x", my_qp->real_qp_num);
543 goto post_recv_exit0;
546 * Get the index of the WQE in the recv queue. The same index
547 * is used for writing into the rq_map.
549 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
551 /* write a RECV WQE into the QUEUE */
552 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
555 * if something failed,
556 * reset the free entry pointer to the start value
559 my_qp->ipz_rqueue.current_q_offset = start_offset;
560 *bad_recv_wr = cur_recv_wr;
563 ehca_err(dev, "Could not write WQE "
564 "qp_num=%x", my_qp->real_qp_num);
566 goto post_recv_exit0;
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0;
574 } /* eof for cur_recv_wr */
577 iosync(); /* serialize GAL register access */
578 hipz_update_rqa(my_qp, wqe_cnt);
579 if (unlikely(ret || ehca_debug_level >= 2))
580 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
581 my_qp, my_qp->real_qp_num, wqe_cnt, ret);
582 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
586 int ehca_post_recv(struct ib_qp *qp,
587 struct ib_recv_wr *recv_wr,
588 struct ib_recv_wr **bad_recv_wr)
590 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
592 /* Reject WR if QP is in RESET state */
593 if (unlikely(my_qp->state == IB_QPS_RESET)) {
594 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
595 my_qp->state, qp->qp_num);
599 return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
602 int ehca_post_srq_recv(struct ib_srq *srq,
603 struct ib_recv_wr *recv_wr,
604 struct ib_recv_wr **bad_recv_wr)
606 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
607 srq->device, recv_wr, bad_recv_wr);
611 * ib_wc_opcode table converts ehca wc opcode to ib
612 * Since we use zero to indicate invalid opcode, the actual ib opcode must
615 static const u8 ib_wc_opcode[255] = {
616 [0x01] = IB_WC_RECV+1,
617 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
618 [0x04] = IB_WC_BIND_MW+1,
619 [0x08] = IB_WC_FETCH_ADD+1,
620 [0x10] = IB_WC_COMP_SWAP+1,
621 [0x20] = IB_WC_RDMA_WRITE+1,
622 [0x40] = IB_WC_RDMA_READ+1,
623 [0x80] = IB_WC_SEND+1
626 /* internal function to poll one entry of cq */
627 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
629 int ret = 0, qmap_tail_idx;
630 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
631 struct ehca_cqe *cqe;
632 struct ehca_qp *my_qp;
633 struct ehca_qmap_entry *qmap_entry;
634 struct ehca_queue_map *qmap;
635 int cqe_count = 0, is_error;
638 cqe = (struct ehca_cqe *)
639 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
642 if (ehca_debug_level >= 3)
643 ehca_dbg(cq->device, "Completion queue is empty "
644 "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
645 goto poll_cq_one_exit0;
648 /* prevents loads being reordered across this point */
652 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
657 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
659 ehca_err(cq->device, "cq_num=%x qp_num=%x "
660 "could not find qp -> ignore cqe",
661 my_cq->cq_number, cqe->local_qp_number);
662 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
663 my_cq->cq_number, cqe->local_qp_number);
664 /* ignore this purged cqe */
667 spin_lock_irqsave(&qp->spinlock_s, flags);
668 purgeflag = qp->sqerr_purgeflag;
669 spin_unlock_irqrestore(&qp->spinlock_s, flags);
673 "Got CQE with purged bit qp_num=%x src_qp=%x",
674 cqe->local_qp_number, cqe->remote_qp_number);
675 if (ehca_debug_level >= 2)
676 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
677 cqe->local_qp_number,
678 cqe->remote_qp_number);
680 * ignore this to avoid double cqes of bad wqe
681 * that caused sqe and turn off purge flag
683 qp->sqerr_purgeflag = 0;
688 is_error = cqe->status & WC_STATUS_ERROR_BIT;
690 /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
691 if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
693 "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
694 is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
695 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
696 my_cq, my_cq->cq_number);
698 "ehca_cq=%p cq_num=%x -------------------------",
699 my_cq, my_cq->cq_number);
702 read_lock(&ehca_qp_idr_lock);
703 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
704 read_unlock(&ehca_qp_idr_lock);
707 wc->qp = &my_qp->ib_qp;
711 * set left_to_poll to 0 because in error state, we will not
712 * get any additional CQEs
714 ehca_add_to_err_list(my_qp, 1);
715 my_qp->sq_map.left_to_poll = 0;
718 ehca_add_to_err_list(my_qp, 0);
719 my_qp->rq_map.left_to_poll = 0;
722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
724 /* We got a send completion. */
725 qmap = &my_qp->sq_map;
727 /* We got a receive completion. */
728 qmap = &my_qp->rq_map;
730 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
734 /* found a double cqe, discard it and read next one */
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1;
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
745 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--;
748 if ((my_qp->sq_map.left_to_poll == 0) &&
749 (my_qp->rq_map.left_to_poll == 0)) {
750 ehca_add_to_err_list(my_qp, 1);
752 ehca_add_to_err_list(my_qp, 0);
756 /* eval ib_wc_opcode */
757 wc->opcode = ib_wc_opcode[cqe->optype]-1;
758 if (unlikely(wc->opcode == -1)) {
759 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
760 "ehca_cq=%p cq_num=%x",
761 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
762 /* dump cqe for other infos */
763 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
764 my_cq, my_cq->cq_number);
765 /* update also queue adder to throw away this entry!!! */
769 /* eval ib_wc_status */
770 if (unlikely(is_error)) {
771 /* complete with errors */
772 map_ib_wc_status(cqe->status, &wc->status);
773 wc->vendor_err = wc->status;
775 wc->status = IB_WC_SUCCESS;
777 wc->byte_len = cqe->nr_bytes_transferred;
778 wc->pkey_index = cqe->pkey_index;
779 wc->slid = cqe->rlid;
780 wc->dlid_path_bits = cqe->dlid;
781 wc->src_qp = cqe->remote_qp_number;
782 wc->wc_flags = cqe->w_completion_flags;
783 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
784 wc->sl = cqe->service_level;
788 hipz_update_feca(my_cq, cqe_count);
793 static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
794 struct ib_wc *wc, int num_entries,
795 struct ipz_queue *ipz_queue, int on_sq)
798 struct ehca_wqe *wqe;
800 struct ehca_queue_map *qmap;
801 struct ehca_qmap_entry *qmap_entry;
804 qmap = &my_qp->sq_map;
806 qmap = &my_qp->rq_map;
808 qmap_entry = &qmap->map[qmap->tail];
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */
812 memset(wc, 0, sizeof(*wc));
814 offset = qmap->tail * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
818 "qp_num=%#x", offset, my_qp->real_qp_num);
822 wc->wr_id = replace_wr_id(wqe->work_request_id,
823 qmap_entry->app_wr_id);
826 switch (wqe->optype) {
827 case WQE_OPTYPE_SEND:
828 wc->opcode = IB_WC_SEND;
830 case WQE_OPTYPE_RDMAWRITE:
831 wc->opcode = IB_WC_RDMA_WRITE;
833 case WQE_OPTYPE_RDMAREAD:
834 wc->opcode = IB_WC_RDMA_READ;
837 ehca_err(cq->device, "Invalid optype=%x",
842 wc->opcode = IB_WC_RECV;
844 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
845 wc->ex.imm_data = wqe->immediate_data;
846 wc->wc_flags |= IB_WC_WITH_IMM;
849 wc->status = IB_WC_WR_FLUSH_ERR;
851 wc->qp = &my_qp->ib_qp;
853 /* mark as reported and advance tail pointer */
854 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries)
857 qmap_entry = &qmap->map[qmap->tail];
866 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
868 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
870 struct ehca_qp *err_qp;
871 struct ib_wc *current_wc = wc;
874 int entries_left = num_entries;
876 if (num_entries < 1) {
877 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
878 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
883 spin_lock_irqsave(&my_cq->spinlock, flags);
885 /* generate flush cqes for send queues */
886 list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
887 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
888 &err_qp->ipz_squeue, 1);
892 if (entries_left == 0)
896 /* generate flush cqes for receive queues */
897 list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
898 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
899 &err_qp->ipz_rqueue, 0);
903 if (entries_left == 0)
907 for (nr = 0; nr < entries_left; nr++) {
908 ret = ehca_poll_cq_one(cq, current_wc);
915 spin_unlock_irqrestore(&my_cq->spinlock, flags);
916 if (ret == -EAGAIN || !ret)
917 ret = num_entries - entries_left;
923 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
925 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
928 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
929 case IB_CQ_SOLICITED:
930 hipz_set_cqx_n0(my_cq, 1);
932 case IB_CQ_NEXT_COMP:
933 hipz_set_cqx_n1(my_cq, 1);
939 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
940 unsigned long spl_flags;
941 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
942 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
943 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);