Merge branch 'linux-2.6'
[linux-2.6] / drivers / infiniband / hw / ehca / ehca_reqs.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  post_send/recv, poll_cq, req_notify
5  *
6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
7  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8  *           Reinhard Ernst <rernst@de.ibm.com>
9  *
10  *  Copyright (c) 2005 IBM Corporation
11  *
12  *  All rights reserved.
13  *
14  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15  *  BSD.
16  *
17  * OpenIB BSD License
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions are met:
21  *
22  * Redistributions of source code must retain the above copyright notice, this
23  * list of conditions and the following disclaimer.
24  *
25  * Redistributions in binary form must reproduce the above copyright notice,
26  * this list of conditions and the following disclaimer in the documentation
27  * and/or other materials
28  * provided with the distribution.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42
43
44 #include <asm-powerpc/system.h>
45 #include "ehca_classes.h"
46 #include "ehca_tools.h"
47 #include "ehca_qes.h"
48 #include "ehca_iverbs.h"
49 #include "hcp_if.h"
50 #include "hipz_fns.h"
51
52 static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
53                                   struct ehca_wqe *wqe_p,
54                                   struct ib_recv_wr *recv_wr)
55 {
56         u8 cnt_ds;
57         if (unlikely((recv_wr->num_sge < 0) ||
58                      (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
59                 ehca_gen_err("Invalid number of WQE SGE. "
60                          "num_sqe=%x max_nr_of_sg=%x",
61                          recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
62                 return -EINVAL; /* invalid SG list length */
63         }
64
65         /* clear wqe header until sglist */
66         memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
67
68         wqe_p->work_request_id = recv_wr->wr_id;
69         wqe_p->nr_of_data_seg = recv_wr->num_sge;
70
71         for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
72                 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
73                         recv_wr->sg_list[cnt_ds].addr;
74                 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
75                         recv_wr->sg_list[cnt_ds].lkey;
76                 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
77                         recv_wr->sg_list[cnt_ds].length;
78         }
79
80         if (ehca_debug_level) {
81                 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
82                 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
83         }
84
85         return 0;
86 }
87
88 #if defined(DEBUG_GSI_SEND_WR)
89
90 /* need ib_mad struct */
91 #include <rdma/ib_mad.h>
92
93 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
94 {
95         int idx;
96         int j;
97         while (send_wr) {
98                 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
99                 struct ib_sge *sge = send_wr->sg_list;
100                 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
101                              "send_flags=%x opcode=%x",idx, send_wr->wr_id,
102                              send_wr->num_sge, send_wr->send_flags,
103                              send_wr->opcode);
104                 if (mad_hdr) {
105                         ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
106                                      "mgmt_class=%x class_version=%x method=%x "
107                                      "status=%x class_specific=%x tid=%lx "
108                                      "attr_id=%x resv=%x attr_mod=%x",
109                                      idx, mad_hdr->base_version,
110                                      mad_hdr->mgmt_class,
111                                      mad_hdr->class_version, mad_hdr->method,
112                                      mad_hdr->status, mad_hdr->class_specific,
113                                      mad_hdr->tid, mad_hdr->attr_id,
114                                      mad_hdr->resv,
115                                      mad_hdr->attr_mod);
116                 }
117                 for (j = 0; j < send_wr->num_sge; j++) {
118                         u8 *data = (u8 *) abs_to_virt(sge->addr);
119                         ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
120                                      "lkey=%x",
121                                      idx, j, data, sge->length, sge->lkey);
122                         /* assume length is n*16 */
123                         ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
124                                  idx, j);
125                         sge++;
126                 } /* eof for j */
127                 idx++;
128                 send_wr = send_wr->next;
129         } /* eof while send_wr */
130 }
131
132 #endif /* DEBUG_GSI_SEND_WR */
133
134 static inline int ehca_write_swqe(struct ehca_qp *qp,
135                                   struct ehca_wqe *wqe_p,
136                                   const struct ib_send_wr *send_wr)
137 {
138         u32 idx;
139         u64 dma_length;
140         struct ehca_av *my_av;
141         u32 remote_qkey = send_wr->wr.ud.remote_qkey;
142
143         if (unlikely((send_wr->num_sge < 0) ||
144                      (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
145                 ehca_gen_err("Invalid number of WQE SGE. "
146                          "num_sqe=%x max_nr_of_sg=%x",
147                          send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
148                 return -EINVAL; /* invalid SG list length */
149         }
150
151         /* clear wqe header until sglist */
152         memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
153
154         wqe_p->work_request_id = send_wr->wr_id;
155
156         switch (send_wr->opcode) {
157         case IB_WR_SEND:
158         case IB_WR_SEND_WITH_IMM:
159                 wqe_p->optype = WQE_OPTYPE_SEND;
160                 break;
161         case IB_WR_RDMA_WRITE:
162         case IB_WR_RDMA_WRITE_WITH_IMM:
163                 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
164                 break;
165         case IB_WR_RDMA_READ:
166                 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
167                 break;
168         default:
169                 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
170                 return -EINVAL; /* invalid opcode */
171         }
172
173         wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
174
175         wqe_p->wr_flag = 0;
176
177         if (send_wr->send_flags & IB_SEND_SIGNALED)
178                 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
179
180         if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
181             send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
182                 /* this might not work as long as HW does not support it */
183                 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
184                 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
185         }
186
187         wqe_p->nr_of_data_seg = send_wr->num_sge;
188
189         switch (qp->qp_type) {
190         case IB_QPT_SMI:
191         case IB_QPT_GSI:
192                 /* no break is intential here */
193         case IB_QPT_UD:
194                 /* IB 1.2 spec C10-15 compliance */
195                 if (send_wr->wr.ud.remote_qkey & 0x80000000)
196                         remote_qkey = qp->qkey;
197
198                 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
199                 wqe_p->local_ee_context_qkey = remote_qkey;
200                 if (!send_wr->wr.ud.ah) {
201                         ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
202                         return -EINVAL;
203                 }
204                 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
205                 wqe_p->u.ud_av.ud_av = my_av->av;
206
207                 /*
208                  * omitted check of IB_SEND_INLINE
209                  * since HW does not support it
210                  */
211                 for (idx = 0; idx < send_wr->num_sge; idx++) {
212                         wqe_p->u.ud_av.sg_list[idx].vaddr =
213                                 send_wr->sg_list[idx].addr;
214                         wqe_p->u.ud_av.sg_list[idx].lkey =
215                                 send_wr->sg_list[idx].lkey;
216                         wqe_p->u.ud_av.sg_list[idx].length =
217                                 send_wr->sg_list[idx].length;
218                 } /* eof for idx */
219                 if (qp->qp_type == IB_QPT_SMI ||
220                     qp->qp_type == IB_QPT_GSI)
221                         wqe_p->u.ud_av.ud_av.pmtu = 1;
222                 if (qp->qp_type == IB_QPT_GSI) {
223                         wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
224 #ifdef DEBUG_GSI_SEND_WR
225                         trace_send_wr_ud(send_wr);
226 #endif /* DEBUG_GSI_SEND_WR */
227                 }
228                 break;
229
230         case IB_QPT_UC:
231                 if (send_wr->send_flags & IB_SEND_FENCE)
232                         wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
233                 /* no break is intentional here */
234         case IB_QPT_RC:
235                 /* TODO: atomic not implemented */
236                 wqe_p->u.nud.remote_virtual_adress =
237                         send_wr->wr.rdma.remote_addr;
238                 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
239
240                 /*
241                  * omitted checking of IB_SEND_INLINE
242                  * since HW does not support it
243                  */
244                 dma_length = 0;
245                 for (idx = 0; idx < send_wr->num_sge; idx++) {
246                         wqe_p->u.nud.sg_list[idx].vaddr =
247                                 send_wr->sg_list[idx].addr;
248                         wqe_p->u.nud.sg_list[idx].lkey =
249                                 send_wr->sg_list[idx].lkey;
250                         wqe_p->u.nud.sg_list[idx].length =
251                                 send_wr->sg_list[idx].length;
252                         dma_length += send_wr->sg_list[idx].length;
253                 } /* eof idx */
254                 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
255
256                 break;
257
258         default:
259                 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
260                 return -EINVAL;
261         }
262
263         if (ehca_debug_level) {
264                 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
265                 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
266         }
267         return 0;
268 }
269
270 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
271 static inline void map_ib_wc_status(u32 cqe_status,
272                                     enum ib_wc_status *wc_status)
273 {
274         if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
275                 switch (cqe_status & 0x3F) {
276                 case 0x01:
277                 case 0x21:
278                         *wc_status = IB_WC_LOC_LEN_ERR;
279                         break;
280                 case 0x02:
281                 case 0x22:
282                         *wc_status = IB_WC_LOC_QP_OP_ERR;
283                         break;
284                 case 0x03:
285                 case 0x23:
286                         *wc_status = IB_WC_LOC_EEC_OP_ERR;
287                         break;
288                 case 0x04:
289                 case 0x24:
290                         *wc_status = IB_WC_LOC_PROT_ERR;
291                         break;
292                 case 0x05:
293                 case 0x25:
294                         *wc_status = IB_WC_WR_FLUSH_ERR;
295                         break;
296                 case 0x06:
297                         *wc_status = IB_WC_MW_BIND_ERR;
298                         break;
299                 case 0x07: /* remote error - look into bits 20:24 */
300                         switch ((cqe_status
301                                  & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
302                         case 0x0:
303                                 /*
304                                  * PSN Sequence Error!
305                                  * couldn't find a matching status!
306                                  */
307                                 *wc_status = IB_WC_GENERAL_ERR;
308                                 break;
309                         case 0x1:
310                                 *wc_status = IB_WC_REM_INV_REQ_ERR;
311                                 break;
312                         case 0x2:
313                                 *wc_status = IB_WC_REM_ACCESS_ERR;
314                                 break;
315                         case 0x3:
316                                 *wc_status = IB_WC_REM_OP_ERR;
317                                 break;
318                         case 0x4:
319                                 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
320                                 break;
321                         }
322                         break;
323                 case 0x08:
324                         *wc_status = IB_WC_RETRY_EXC_ERR;
325                         break;
326                 case 0x09:
327                         *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
328                         break;
329                 case 0x0A:
330                 case 0x2D:
331                         *wc_status = IB_WC_REM_ABORT_ERR;
332                         break;
333                 case 0x0B:
334                 case 0x2E:
335                         *wc_status = IB_WC_INV_EECN_ERR;
336                         break;
337                 case 0x0C:
338                 case 0x2F:
339                         *wc_status = IB_WC_INV_EEC_STATE_ERR;
340                         break;
341                 case 0x0D:
342                         *wc_status = IB_WC_BAD_RESP_ERR;
343                         break;
344                 case 0x10:
345                         /* WQE purged */
346                         *wc_status = IB_WC_WR_FLUSH_ERR;
347                         break;
348                 default:
349                         *wc_status = IB_WC_FATAL_ERR;
350
351                 }
352         } else
353                 *wc_status = IB_WC_SUCCESS;
354 }
355
356 int ehca_post_send(struct ib_qp *qp,
357                    struct ib_send_wr *send_wr,
358                    struct ib_send_wr **bad_send_wr)
359 {
360         struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
361         struct ib_send_wr *cur_send_wr;
362         struct ehca_wqe *wqe_p;
363         int wqe_cnt = 0;
364         int ret = 0;
365         unsigned long spl_flags;
366
367         /* LOCK the QUEUE */
368         spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
369
370         /* loop processes list of send reqs */
371         for (cur_send_wr = send_wr; cur_send_wr != NULL;
372              cur_send_wr = cur_send_wr->next) {
373                 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
374                 /* get pointer next to free WQE */
375                 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
376                 if (unlikely(!wqe_p)) {
377                         /* too many posted work requests: queue overflow */
378                         if (bad_send_wr)
379                                 *bad_send_wr = cur_send_wr;
380                         if (wqe_cnt == 0) {
381                                 ret = -ENOMEM;
382                                 ehca_err(qp->device, "Too many posted WQEs "
383                                          "qp_num=%x", qp->qp_num);
384                         }
385                         goto post_send_exit0;
386                 }
387                 /* write a SEND WQE into the QUEUE */
388                 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
389                 /*
390                  * if something failed,
391                  * reset the free entry pointer to the start value
392                  */
393                 if (unlikely(ret)) {
394                         my_qp->ipz_squeue.current_q_offset = start_offset;
395                         *bad_send_wr = cur_send_wr;
396                         if (wqe_cnt == 0) {
397                                 ret = -EINVAL;
398                                 ehca_err(qp->device, "Could not write WQE "
399                                          "qp_num=%x", qp->qp_num);
400                         }
401                         goto post_send_exit0;
402                 }
403                 wqe_cnt++;
404                 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
405                          my_qp, qp->qp_num, wqe_cnt);
406         } /* eof for cur_send_wr */
407
408 post_send_exit0:
409         /* UNLOCK the QUEUE */
410         spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
411         iosync(); /* serialize GAL register access */
412         hipz_update_sqa(my_qp, wqe_cnt);
413         return ret;
414 }
415
416 int ehca_post_recv(struct ib_qp *qp,
417                    struct ib_recv_wr *recv_wr,
418                    struct ib_recv_wr **bad_recv_wr)
419 {
420         struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
421         struct ib_recv_wr *cur_recv_wr;
422         struct ehca_wqe *wqe_p;
423         int wqe_cnt = 0;
424         int ret = 0;
425         unsigned long spl_flags;
426
427         /* LOCK the QUEUE */
428         spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
429
430         /* loop processes list of send reqs */
431         for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
432              cur_recv_wr = cur_recv_wr->next) {
433                 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
434                 /* get pointer next to free WQE */
435                 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
436                 if (unlikely(!wqe_p)) {
437                         /* too many posted work requests: queue overflow */
438                         if (bad_recv_wr)
439                                 *bad_recv_wr = cur_recv_wr;
440                         if (wqe_cnt == 0) {
441                                 ret = -ENOMEM;
442                                 ehca_err(qp->device, "Too many posted WQEs "
443                                          "qp_num=%x", qp->qp_num);
444                         }
445                         goto post_recv_exit0;
446                 }
447                 /* write a RECV WQE into the QUEUE */
448                 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
449                 /*
450                  * if something failed,
451                  * reset the free entry pointer to the start value
452                  */
453                 if (unlikely(ret)) {
454                         my_qp->ipz_rqueue.current_q_offset = start_offset;
455                         *bad_recv_wr = cur_recv_wr;
456                         if (wqe_cnt == 0) {
457                                 ret = -EINVAL;
458                                 ehca_err(qp->device, "Could not write WQE "
459                                          "qp_num=%x", qp->qp_num);
460                         }
461                         goto post_recv_exit0;
462                 }
463                 wqe_cnt++;
464                 ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
465                      my_qp, qp->qp_num, wqe_cnt);
466         } /* eof for cur_recv_wr */
467
468 post_recv_exit0:
469         spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
470         iosync(); /* serialize GAL register access */
471         hipz_update_rqa(my_qp, wqe_cnt);
472         return ret;
473 }
474
475 /*
476  * ib_wc_opcode table converts ehca wc opcode to ib
477  * Since we use zero to indicate invalid opcode, the actual ib opcode must
478  * be decremented!!!
479  */
480 static const u8 ib_wc_opcode[255] = {
481         [0x01] = IB_WC_RECV+1,
482         [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
483         [0x04] = IB_WC_BIND_MW+1,
484         [0x08] = IB_WC_FETCH_ADD+1,
485         [0x10] = IB_WC_COMP_SWAP+1,
486         [0x20] = IB_WC_RDMA_WRITE+1,
487         [0x40] = IB_WC_RDMA_READ+1,
488         [0x80] = IB_WC_SEND+1
489 };
490
491 /* internal function to poll one entry of cq */
492 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
493 {
494         int ret = 0;
495         struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
496         struct ehca_cqe *cqe;
497         int cqe_count = 0;
498
499 poll_cq_one_read_cqe:
500         cqe = (struct ehca_cqe *)
501                 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
502         if (!cqe) {
503                 ret = -EAGAIN;
504                 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
505                          "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
506                 goto  poll_cq_one_exit0;
507         }
508
509         /* prevents loads being reordered across this point */
510         rmb();
511
512         cqe_count++;
513         if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
514                 struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
515                 int purgeflag;
516                 unsigned long spl_flags;
517                 if (!qp) {
518                         ehca_err(cq->device, "cq_num=%x qp_num=%x "
519                                  "could not find qp -> ignore cqe",
520                                  my_cq->cq_number, cqe->local_qp_number);
521                         ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
522                                  my_cq->cq_number, cqe->local_qp_number);
523                         /* ignore this purged cqe */
524                         goto poll_cq_one_read_cqe;
525                 }
526                 spin_lock_irqsave(&qp->spinlock_s, spl_flags);
527                 purgeflag = qp->sqerr_purgeflag;
528                 spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
529
530                 if (purgeflag) {
531                         ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
532                                  "src_qp=%x",
533                                  cqe->local_qp_number, cqe->remote_qp_number);
534                         if (ehca_debug_level)
535                                 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
536                                          cqe->local_qp_number,
537                                          cqe->remote_qp_number);
538                         /*
539                          * ignore this to avoid double cqes of bad wqe
540                          * that caused sqe and turn off purge flag
541                          */
542                         qp->sqerr_purgeflag = 0;
543                         goto poll_cq_one_read_cqe;
544                 }
545         }
546
547         /* tracing cqe */
548         if (ehca_debug_level) {
549                 ehca_dbg(cq->device,
550                          "Received COMPLETION ehca_cq=%p cq_num=%x -----",
551                          my_cq, my_cq->cq_number);
552                 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
553                          my_cq, my_cq->cq_number);
554                 ehca_dbg(cq->device,
555                          "ehca_cq=%p cq_num=%x -------------------------",
556                          my_cq, my_cq->cq_number);
557         }
558
559         /* we got a completion! */
560         wc->wr_id = cqe->work_request_id;
561
562         /* eval ib_wc_opcode */
563         wc->opcode = ib_wc_opcode[cqe->optype]-1;
564         if (unlikely(wc->opcode == -1)) {
565                 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
566                          "ehca_cq=%p cq_num=%x",
567                          cqe->optype, cqe->status, my_cq, my_cq->cq_number);
568                 /* dump cqe for other infos */
569                 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
570                          my_cq, my_cq->cq_number);
571                 /* update also queue adder to throw away this entry!!! */
572                 goto poll_cq_one_exit0;
573         }
574         /* eval ib_wc_status */
575         if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
576                 /* complete with errors */
577                 map_ib_wc_status(cqe->status, &wc->status);
578                 wc->vendor_err = wc->status;
579         } else
580                 wc->status = IB_WC_SUCCESS;
581
582         wc->qp = NULL;
583         wc->byte_len = cqe->nr_bytes_transferred;
584         wc->pkey_index = cqe->pkey_index;
585         wc->slid = cqe->rlid;
586         wc->dlid_path_bits = cqe->dlid;
587         wc->src_qp = cqe->remote_qp_number;
588         wc->wc_flags = cqe->w_completion_flags;
589         wc->imm_data = cpu_to_be32(cqe->immediate_data);
590         wc->sl = cqe->service_level;
591
592         if (wc->status != IB_WC_SUCCESS)
593                 ehca_dbg(cq->device,
594                          "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
595                          "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
596                          "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
597                          cqe->status, cqe->local_qp_number,
598                          cqe->remote_qp_number, cqe->work_request_id, cqe);
599
600 poll_cq_one_exit0:
601         if (cqe_count > 0)
602                 hipz_update_feca(my_cq, cqe_count);
603
604         return ret;
605 }
606
607 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
608 {
609         struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
610         int nr;
611         struct ib_wc *current_wc = wc;
612         int ret = 0;
613         unsigned long spl_flags;
614
615         if (num_entries < 1) {
616                 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
617                          "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
618                 ret = -EINVAL;
619                 goto poll_cq_exit0;
620         }
621
622         spin_lock_irqsave(&my_cq->spinlock, spl_flags);
623         for (nr = 0; nr < num_entries; nr++) {
624                 ret = ehca_poll_cq_one(cq, current_wc);
625                 if (ret)
626                         break;
627                 current_wc++;
628         } /* eof for nr */
629         spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
630         if (ret == -EAGAIN  || !ret)
631                 ret = nr;
632
633 poll_cq_exit0:
634         return ret;
635 }
636
637 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
638 {
639         struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
640         unsigned long spl_flags;
641         int ret = 0;
642
643         switch (notify_flags & IB_CQ_SOLICITED_MASK) {
644         case IB_CQ_SOLICITED:
645                 hipz_set_cqx_n0(my_cq, 1);
646                 break;
647         case IB_CQ_NEXT_COMP:
648                 hipz_set_cqx_n1(my_cq, 1);
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
655                 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
656                 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
657                 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
658         }
659
660         return ret;
661 }