2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
39 #include <linux/init.h>
40 #include <linux/hardirq.h>
42 #include <rdma/ib_pack.h>
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_memfree.h"
49 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
53 MTHCA_CQ_ENTRY_SIZE = 0x20
57 * Must be packed because start is 64 bits but only aligned to 32 bits.
59 struct mthca_cq_context {
62 __be32 logsize_usrpage;
63 __be32 error_eqn; /* Tavor only */
67 __be32 last_notified_index;
68 __be32 solicit_producer_index;
69 __be32 consumer_index;
70 __be32 producer_index;
72 __be32 ci_db; /* Arbel only */
73 __be32 state_db; /* Arbel only */
75 } __attribute__((packed));
77 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
78 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
79 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
80 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
81 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
82 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
83 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
84 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
85 #define MTHCA_EQ_STATE_FIRED (10 << 8)
88 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
92 SYNDROME_LOCAL_LENGTH_ERR = 0x01,
93 SYNDROME_LOCAL_QP_OP_ERR = 0x02,
94 SYNDROME_LOCAL_EEC_OP_ERR = 0x03,
95 SYNDROME_LOCAL_PROT_ERR = 0x04,
96 SYNDROME_WR_FLUSH_ERR = 0x05,
97 SYNDROME_MW_BIND_ERR = 0x06,
98 SYNDROME_BAD_RESP_ERR = 0x10,
99 SYNDROME_LOCAL_ACCESS_ERR = 0x11,
100 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
101 SYNDROME_REMOTE_ACCESS_ERR = 0x13,
102 SYNDROME_REMOTE_OP_ERR = 0x14,
103 SYNDROME_RETRY_EXC_ERR = 0x15,
104 SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
105 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20,
106 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
107 SYNDROME_REMOTE_ABORTED_ERR = 0x22,
108 SYNDROME_INVAL_EECN_ERR = 0x23,
109 SYNDROME_INVAL_EEC_STATE_ERR = 0x24
118 __be32 imm_etype_pkey_eec;
127 struct mthca_err_cqe {
140 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
141 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
143 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
144 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
145 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
146 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
147 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
149 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
150 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
151 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
153 static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
157 return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
159 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
160 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
163 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
165 return get_cqe_from_buf(&cq->buf, entry);
168 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
170 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
173 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
175 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
178 static inline void set_cqe_hw(struct mthca_cqe *cqe)
180 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
183 static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
185 __be32 *cqe = cqe_ptr;
187 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
188 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
189 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
190 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
191 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
195 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
196 * should be correct before calling update_cons_index().
198 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
203 if (mthca_is_memfree(dev)) {
204 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
207 doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
208 doorbell[1] = cpu_to_be32(incr - 1);
210 mthca_write64(doorbell,
211 dev->kar + MTHCA_CQ_DOORBELL,
212 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
216 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
220 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
223 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
229 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
232 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
233 enum ib_event_type event_type)
236 struct ib_event event;
238 spin_lock(&dev->cq_table.lock);
240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
243 atomic_inc(&cq->refcount);
244 spin_unlock(&dev->cq_table.lock);
247 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
251 event.device = &dev->ib_dev;
252 event.event = event_type;
253 event.element.cq = &cq->ibcq;
254 if (cq->ibcq.event_handler)
255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
257 if (atomic_dec_and_test(&cq->refcount))
261 static inline int is_recv_cqe(struct mthca_cqe *cqe)
263 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
264 MTHCA_ERROR_CQE_OPCODE_MASK)
265 return !(cqe->opcode & 0x01);
267 return !(cqe->is_send & 0x80);
270 void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
271 struct mthca_srq *srq)
274 struct mthca_cqe *cqe;
278 spin_lock_irq(&dev->cq_table.lock);
279 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
281 atomic_inc(&cq->refcount);
282 spin_unlock_irq(&dev->cq_table.lock);
287 spin_lock_irq(&cq->lock);
290 * First we need to find the current producer index, so we
291 * know where to start cleaning from. It doesn't matter if HW
292 * adds new entries after this loop -- the QP we're worried
293 * about is already in RESET, so the new entries won't come
294 * from our QP and therefore don't need to be checked.
296 for (prod_index = cq->cons_index;
297 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
299 if (prod_index == cq->cons_index + cq->ibcq.cqe)
303 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
304 qpn, cqn, cq->cons_index, prod_index);
307 * Now sweep backwards through the CQ, removing CQ entries
308 * that match our QP by copying older entries on top of them.
310 while ((int) --prod_index - (int) cq->cons_index >= 0) {
311 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
312 if (cqe->my_qpn == cpu_to_be32(qpn)) {
313 if (srq && is_recv_cqe(cqe))
314 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
317 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
318 cqe, MTHCA_CQ_ENTRY_SIZE);
323 cq->cons_index += nfreed;
324 update_cons_index(dev, cq, nfreed);
327 spin_unlock_irq(&cq->lock);
328 if (atomic_dec_and_test(&cq->refcount))
332 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
337 * In Tavor mode, the hardware keeps the consumer and producer
338 * indices mod the CQ size. Since we might be making the CQ
339 * bigger, we need to deal with the case where the producer
340 * index wrapped around before the CQ was resized.
342 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
343 cq->ibcq.cqe < cq->resize_buf->cqe) {
344 cq->cons_index &= cq->ibcq.cqe;
345 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
346 cq->cons_index -= cq->ibcq.cqe + 1;
349 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
350 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
351 i & cq->resize_buf->cqe),
352 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
355 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
360 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
361 MTHCA_MAX_DIRECT_CQ_SIZE,
362 &buf->queue, &buf->is_direct,
363 &dev->driver_pd, 1, &buf->mr);
367 for (i = 0; i < nent; ++i)
368 set_cqe_hw(get_cqe_from_buf(buf, i));
373 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
375 mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
376 buf->is_direct, &buf->mr);
379 static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
380 struct mthca_qp *qp, int wqe_index, int is_send,
381 struct mthca_err_cqe *cqe,
382 struct ib_wc *entry, int *free_cqe)
387 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
388 mthca_dbg(dev, "local QP operation err "
389 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
390 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
391 cq->cqn, cq->cons_index);
396 * For completions in error, only work request ID, status, vendor error
397 * (and freed resource count for RD) have to be set.
399 switch (cqe->syndrome) {
400 case SYNDROME_LOCAL_LENGTH_ERR:
401 entry->status = IB_WC_LOC_LEN_ERR;
403 case SYNDROME_LOCAL_QP_OP_ERR:
404 entry->status = IB_WC_LOC_QP_OP_ERR;
406 case SYNDROME_LOCAL_EEC_OP_ERR:
407 entry->status = IB_WC_LOC_EEC_OP_ERR;
409 case SYNDROME_LOCAL_PROT_ERR:
410 entry->status = IB_WC_LOC_PROT_ERR;
412 case SYNDROME_WR_FLUSH_ERR:
413 entry->status = IB_WC_WR_FLUSH_ERR;
415 case SYNDROME_MW_BIND_ERR:
416 entry->status = IB_WC_MW_BIND_ERR;
418 case SYNDROME_BAD_RESP_ERR:
419 entry->status = IB_WC_BAD_RESP_ERR;
421 case SYNDROME_LOCAL_ACCESS_ERR:
422 entry->status = IB_WC_LOC_ACCESS_ERR;
424 case SYNDROME_REMOTE_INVAL_REQ_ERR:
425 entry->status = IB_WC_REM_INV_REQ_ERR;
427 case SYNDROME_REMOTE_ACCESS_ERR:
428 entry->status = IB_WC_REM_ACCESS_ERR;
430 case SYNDROME_REMOTE_OP_ERR:
431 entry->status = IB_WC_REM_OP_ERR;
433 case SYNDROME_RETRY_EXC_ERR:
434 entry->status = IB_WC_RETRY_EXC_ERR;
436 case SYNDROME_RNR_RETRY_EXC_ERR:
437 entry->status = IB_WC_RNR_RETRY_EXC_ERR;
439 case SYNDROME_LOCAL_RDD_VIOL_ERR:
440 entry->status = IB_WC_LOC_RDD_VIOL_ERR;
442 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
443 entry->status = IB_WC_REM_INV_RD_REQ_ERR;
445 case SYNDROME_REMOTE_ABORTED_ERR:
446 entry->status = IB_WC_REM_ABORT_ERR;
448 case SYNDROME_INVAL_EECN_ERR:
449 entry->status = IB_WC_INV_EECN_ERR;
451 case SYNDROME_INVAL_EEC_STATE_ERR:
452 entry->status = IB_WC_INV_EEC_STATE_ERR;
455 entry->status = IB_WC_GENERAL_ERR;
459 entry->vendor_err = cqe->vendor_err;
462 * Mem-free HCAs always generate one CQE per WQE, even in the
463 * error case, so we don't have to check the doorbell count, etc.
465 if (mthca_is_memfree(dev))
468 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
471 * If we're at the end of the WQE chain, or we've used up our
472 * doorbell count, free the CQE. Otherwise just update it for
473 * the next poll operation.
475 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
478 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
480 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
485 static inline int mthca_poll_one(struct mthca_dev *dev,
487 struct mthca_qp **cur_qp,
492 struct mthca_cqe *cqe;
499 cqe = next_cqe_sw(cq);
504 * Make sure we read CQ entry contents after we've checked the
510 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
511 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
512 be32_to_cpu(cqe->wqe));
516 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
517 MTHCA_ERROR_CQE_OPCODE_MASK;
518 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
520 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
522 * We do not have to take the QP table lock here,
523 * because CQs will be locked while QPs are removed
526 *cur_qp = mthca_array_get(&dev->qp_table.qp,
527 be32_to_cpu(cqe->my_qpn) &
528 (dev->limits.num_qps - 1));
530 mthca_warn(dev, "CQ entry for unknown QP %06x\n",
531 be32_to_cpu(cqe->my_qpn) & 0xffffff);
537 entry->qp_num = (*cur_qp)->qpn;
541 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
543 entry->wr_id = (*cur_qp)->wrid[wqe_index +
545 } else if ((*cur_qp)->ibqp.srq) {
546 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
547 u32 wqe = be32_to_cpu(cqe->wqe);
549 wqe_index = wqe >> srq->wqe_shift;
550 entry->wr_id = srq->wrid[wqe_index];
551 mthca_free_srq_wqe(srq, wqe);
554 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
555 entry->wr_id = (*cur_qp)->wrid[wqe_index];
559 if (wq->last_comp < wqe_index)
560 wq->tail += wqe_index - wq->last_comp;
562 wq->tail += wqe_index + wq->max - wq->last_comp;
564 wq->last_comp = wqe_index;
568 handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
569 (struct mthca_err_cqe *) cqe,
576 switch (cqe->opcode) {
577 case MTHCA_OPCODE_RDMA_WRITE:
578 entry->opcode = IB_WC_RDMA_WRITE;
580 case MTHCA_OPCODE_RDMA_WRITE_IMM:
581 entry->opcode = IB_WC_RDMA_WRITE;
582 entry->wc_flags |= IB_WC_WITH_IMM;
584 case MTHCA_OPCODE_SEND:
585 entry->opcode = IB_WC_SEND;
587 case MTHCA_OPCODE_SEND_IMM:
588 entry->opcode = IB_WC_SEND;
589 entry->wc_flags |= IB_WC_WITH_IMM;
591 case MTHCA_OPCODE_RDMA_READ:
592 entry->opcode = IB_WC_RDMA_READ;
593 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
595 case MTHCA_OPCODE_ATOMIC_CS:
596 entry->opcode = IB_WC_COMP_SWAP;
597 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
599 case MTHCA_OPCODE_ATOMIC_FA:
600 entry->opcode = IB_WC_FETCH_ADD;
601 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
603 case MTHCA_OPCODE_BIND_MW:
604 entry->opcode = IB_WC_BIND_MW;
607 entry->opcode = MTHCA_OPCODE_INVALID;
611 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
612 switch (cqe->opcode & 0x1f) {
613 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
614 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
615 entry->wc_flags = IB_WC_WITH_IMM;
616 entry->imm_data = cqe->imm_etype_pkey_eec;
617 entry->opcode = IB_WC_RECV;
619 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
620 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
621 entry->wc_flags = IB_WC_WITH_IMM;
622 entry->imm_data = cqe->imm_etype_pkey_eec;
623 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
627 entry->opcode = IB_WC_RECV;
630 entry->slid = be16_to_cpu(cqe->rlid);
631 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
632 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
633 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
634 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
635 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
639 entry->status = IB_WC_SUCCESS;
642 if (likely(free_cqe)) {
651 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
654 struct mthca_dev *dev = to_mdev(ibcq->device);
655 struct mthca_cq *cq = to_mcq(ibcq);
656 struct mthca_qp *qp = NULL;
662 spin_lock_irqsave(&cq->lock, flags);
666 while (npolled < num_entries) {
667 err = mthca_poll_one(dev, cq, &qp,
668 &freed, entry + npolled);
676 update_cons_index(dev, cq, freed);
680 * If a CQ resize is in progress and we discovered that the
681 * old buffer is empty, then peek in the new buffer, and if
682 * it's not empty, switch to the new buffer and continue
685 if (unlikely(err == -EAGAIN && cq->resize_buf &&
686 cq->resize_buf->state == CQ_RESIZE_READY)) {
688 * In Tavor mode, the hardware keeps the producer
689 * index modulo the CQ size. Since we might be making
690 * the CQ bigger, we need to mask our consumer index
691 * using the size of the old CQ buffer before looking
692 * in the new CQ buffer.
694 if (!mthca_is_memfree(dev))
695 cq->cons_index &= cq->ibcq.cqe;
697 if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
698 cq->cons_index & cq->resize_buf->cqe))) {
699 struct mthca_cq_buf tbuf;
704 cq->buf = cq->resize_buf->buf;
705 cq->ibcq.cqe = cq->resize_buf->cqe;
707 cq->resize_buf->buf = tbuf;
708 cq->resize_buf->cqe = tcqe;
709 cq->resize_buf->state = CQ_RESIZE_SWAPPED;
715 spin_unlock_irqrestore(&cq->lock, flags);
717 return err == 0 || err == -EAGAIN ? npolled : err;
720 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
724 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
725 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
726 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
728 doorbell[1] = (__force __be32) 0xffffffff;
730 mthca_write64(doorbell,
731 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
732 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
737 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
739 struct mthca_cq *cq = to_mcq(ibcq);
745 ci = cpu_to_be32(cq->cons_index);
748 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
749 (notify == IB_CQ_SOLICITED ? 1 : 2));
751 mthca_write_db_rec(doorbell, cq->arm_db);
754 * Make sure that the doorbell record in host memory is
755 * written before ringing the doorbell via PCI MMIO.
759 doorbell[0] = cpu_to_be32((sn << 28) |
760 (notify == IB_CQ_SOLICITED ?
761 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
762 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
766 mthca_write64(doorbell,
767 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
768 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
773 int mthca_init_cq(struct mthca_dev *dev, int nent,
774 struct mthca_ucontext *ctx, u32 pdn,
777 struct mthca_mailbox *mailbox;
778 struct mthca_cq_context *cq_context;
782 cq->ibcq.cqe = nent - 1;
783 cq->is_kernel = !ctx;
785 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
789 if (mthca_is_memfree(dev)) {
790 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
799 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
800 cq->cqn, &cq->set_ci_db);
801 if (cq->set_ci_db_index < 0)
804 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
805 cq->cqn, &cq->arm_db);
806 if (cq->arm_db_index < 0)
811 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
815 cq_context = mailbox->buf;
818 err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
820 goto err_out_mailbox;
823 spin_lock_init(&cq->lock);
824 atomic_set(&cq->refcount, 1);
825 init_waitqueue_head(&cq->wait);
827 memset(cq_context, 0, sizeof *cq_context);
828 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
829 MTHCA_CQ_STATE_DISARMED |
831 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
833 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
835 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
836 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
837 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
838 cq_context->pd = cpu_to_be32(pdn);
839 cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey);
840 cq_context->cqn = cpu_to_be32(cq->cqn);
842 if (mthca_is_memfree(dev)) {
843 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
844 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
847 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
849 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
850 goto err_out_free_mr;
854 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
857 goto err_out_free_mr;
860 spin_lock_irq(&dev->cq_table.lock);
861 if (mthca_array_set(&dev->cq_table.cq,
862 cq->cqn & (dev->limits.num_cqs - 1),
864 spin_unlock_irq(&dev->cq_table.lock);
865 goto err_out_free_mr;
867 spin_unlock_irq(&dev->cq_table.lock);
871 mthca_free_mailbox(dev, mailbox);
877 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
880 mthca_free_mailbox(dev, mailbox);
883 if (cq->is_kernel && mthca_is_memfree(dev))
884 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
887 if (cq->is_kernel && mthca_is_memfree(dev))
888 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
891 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
894 mthca_free(&dev->cq_table.alloc, cq->cqn);
899 void mthca_free_cq(struct mthca_dev *dev,
902 struct mthca_mailbox *mailbox;
906 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
907 if (IS_ERR(mailbox)) {
908 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
912 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
914 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
916 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
919 __be32 *ctx = mailbox->buf;
922 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
923 cq->cqn, cq->cons_index,
924 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
925 for (j = 0; j < 16; ++j)
926 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
929 spin_lock_irq(&dev->cq_table.lock);
930 mthca_array_clear(&dev->cq_table.cq,
931 cq->cqn & (dev->limits.num_cqs - 1));
932 spin_unlock_irq(&dev->cq_table.lock);
934 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
935 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
937 synchronize_irq(dev->pdev->irq);
939 atomic_dec(&cq->refcount);
940 wait_event(cq->wait, !atomic_read(&cq->refcount));
943 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
944 if (mthca_is_memfree(dev)) {
945 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
946 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
950 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
951 mthca_free(&dev->cq_table.alloc, cq->cqn);
952 mthca_free_mailbox(dev, mailbox);
955 int __devinit mthca_init_cq_table(struct mthca_dev *dev)
959 spin_lock_init(&dev->cq_table.lock);
961 err = mthca_alloc_init(&dev->cq_table.alloc,
964 dev->limits.reserved_cqs);
968 err = mthca_array_init(&dev->cq_table.cq,
969 dev->limits.num_cqs);
971 mthca_alloc_cleanup(&dev->cq_table.alloc);
976 void mthca_cleanup_cq_table(struct mthca_dev *dev)
978 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
979 mthca_alloc_cleanup(&dev->cq_table.alloc);