2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
37 #include <linux/init.h>
38 #include <linux/hardirq.h>
42 #include "mthca_dev.h"
43 #include "mthca_cmd.h"
44 #include "mthca_memfree.h"
47 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
51 MTHCA_CQ_ENTRY_SIZE = 0x20
55 * Must be packed because start is 64 bits but only aligned to 32 bits.
57 struct mthca_cq_context {
61 u32 error_eqn; /* Tavor only */
65 u32 last_notified_index;
66 u32 solicit_producer_index;
70 u32 ci_db; /* Arbel only */
71 u32 state_db; /* Arbel only */
73 } __attribute__((packed));
75 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
76 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
77 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
78 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
79 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
80 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
81 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
82 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
83 #define MTHCA_EQ_STATE_FIRED (10 << 8)
86 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
90 SYNDROME_LOCAL_LENGTH_ERR = 0x01,
91 SYNDROME_LOCAL_QP_OP_ERR = 0x02,
92 SYNDROME_LOCAL_EEC_OP_ERR = 0x03,
93 SYNDROME_LOCAL_PROT_ERR = 0x04,
94 SYNDROME_WR_FLUSH_ERR = 0x05,
95 SYNDROME_MW_BIND_ERR = 0x06,
96 SYNDROME_BAD_RESP_ERR = 0x10,
97 SYNDROME_LOCAL_ACCESS_ERR = 0x11,
98 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
99 SYNDROME_REMOTE_ACCESS_ERR = 0x13,
100 SYNDROME_REMOTE_OP_ERR = 0x14,
101 SYNDROME_RETRY_EXC_ERR = 0x15,
102 SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
103 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20,
104 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
105 SYNDROME_REMOTE_ABORTED_ERR = 0x22,
106 SYNDROME_INVAL_EECN_ERR = 0x23,
107 SYNDROME_INVAL_EEC_STATE_ERR = 0x24
116 u32 imm_etype_pkey_eec;
125 struct mthca_err_cqe {
138 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
139 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
141 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
142 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
143 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
144 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
145 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
147 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
148 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
149 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
151 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
154 return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
156 return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
157 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
160 static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
162 struct mthca_cqe *cqe = get_cqe(cq, i);
163 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
166 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
168 return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);
171 static inline void set_cqe_hw(struct mthca_cqe *cqe)
173 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
176 static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
178 __be32 *cqe = cqe_ptr;
180 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
181 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
182 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
183 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
184 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
188 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
189 * should be correct before calling update_cons_index().
191 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
196 if (mthca_is_memfree(dev)) {
197 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
200 doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
201 doorbell[1] = cpu_to_be32(incr - 1);
203 mthca_write64(doorbell,
204 dev->kar + MTHCA_CQ_DOORBELL,
205 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
209 void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
213 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
216 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
222 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
225 void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
228 struct mthca_cqe *cqe;
232 spin_lock_irq(&dev->cq_table.lock);
233 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
235 atomic_inc(&cq->refcount);
236 spin_unlock_irq(&dev->cq_table.lock);
241 spin_lock_irq(&cq->lock);
244 * First we need to find the current producer index, so we
245 * know where to start cleaning from. It doesn't matter if HW
246 * adds new entries after this loop -- the QP we're worried
247 * about is already in RESET, so the new entries won't come
248 * from our QP and therefore don't need to be checked.
250 for (prod_index = cq->cons_index;
251 cqe_sw(cq, prod_index & cq->ibcq.cqe);
253 if (prod_index == cq->cons_index + cq->ibcq.cqe)
257 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
258 qpn, cqn, cq->cons_index, prod_index);
261 * Now sweep backwards through the CQ, removing CQ entries
262 * that match our QP by copying older entries on top of them.
264 while (prod_index > cq->cons_index) {
265 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
266 if (cqe->my_qpn == cpu_to_be32(qpn))
269 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
272 MTHCA_CQ_ENTRY_SIZE);
278 cq->cons_index += nfreed;
279 update_cons_index(dev, cq, nfreed);
282 spin_unlock_irq(&cq->lock);
283 if (atomic_dec_and_test(&cq->refcount))
287 static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
288 struct mthca_qp *qp, int wqe_index, int is_send,
289 struct mthca_err_cqe *cqe,
290 struct ib_wc *entry, int *free_cqe)
296 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
297 mthca_dbg(dev, "local QP operation err "
298 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
299 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
300 cq->cqn, cq->cons_index);
305 * For completions in error, only work request ID, status (and
306 * freed resource count for RD) have to be set.
308 switch (cqe->syndrome) {
309 case SYNDROME_LOCAL_LENGTH_ERR:
310 entry->status = IB_WC_LOC_LEN_ERR;
312 case SYNDROME_LOCAL_QP_OP_ERR:
313 entry->status = IB_WC_LOC_QP_OP_ERR;
315 case SYNDROME_LOCAL_EEC_OP_ERR:
316 entry->status = IB_WC_LOC_EEC_OP_ERR;
318 case SYNDROME_LOCAL_PROT_ERR:
319 entry->status = IB_WC_LOC_PROT_ERR;
321 case SYNDROME_WR_FLUSH_ERR:
322 entry->status = IB_WC_WR_FLUSH_ERR;
324 case SYNDROME_MW_BIND_ERR:
325 entry->status = IB_WC_MW_BIND_ERR;
327 case SYNDROME_BAD_RESP_ERR:
328 entry->status = IB_WC_BAD_RESP_ERR;
330 case SYNDROME_LOCAL_ACCESS_ERR:
331 entry->status = IB_WC_LOC_ACCESS_ERR;
333 case SYNDROME_REMOTE_INVAL_REQ_ERR:
334 entry->status = IB_WC_REM_INV_REQ_ERR;
336 case SYNDROME_REMOTE_ACCESS_ERR:
337 entry->status = IB_WC_REM_ACCESS_ERR;
339 case SYNDROME_REMOTE_OP_ERR:
340 entry->status = IB_WC_REM_OP_ERR;
342 case SYNDROME_RETRY_EXC_ERR:
343 entry->status = IB_WC_RETRY_EXC_ERR;
345 case SYNDROME_RNR_RETRY_EXC_ERR:
346 entry->status = IB_WC_RNR_RETRY_EXC_ERR;
348 case SYNDROME_LOCAL_RDD_VIOL_ERR:
349 entry->status = IB_WC_LOC_RDD_VIOL_ERR;
351 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
352 entry->status = IB_WC_REM_INV_RD_REQ_ERR;
354 case SYNDROME_REMOTE_ABORTED_ERR:
355 entry->status = IB_WC_REM_ABORT_ERR;
357 case SYNDROME_INVAL_EECN_ERR:
358 entry->status = IB_WC_INV_EECN_ERR;
360 case SYNDROME_INVAL_EEC_STATE_ERR:
361 entry->status = IB_WC_INV_EEC_STATE_ERR;
364 entry->status = IB_WC_GENERAL_ERR;
368 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
373 * If we're at the end of the WQE chain, or we've used up our
374 * doorbell count, free the CQE. Otherwise just update it for
375 * the next poll operation.
377 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
380 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
382 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
389 static inline int mthca_poll_one(struct mthca_dev *dev,
391 struct mthca_qp **cur_qp,
396 struct mthca_cqe *cqe;
403 cqe = next_cqe_sw(cq);
408 * Make sure we read CQ entry contents after we've checked the
414 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
415 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
416 be32_to_cpu(cqe->wqe));
420 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
421 MTHCA_ERROR_CQE_OPCODE_MASK;
422 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
424 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
426 * We do not have to take the QP table lock here,
427 * because CQs will be locked while QPs are removed
430 *cur_qp = mthca_array_get(&dev->qp_table.qp,
431 be32_to_cpu(cqe->my_qpn) &
432 (dev->limits.num_qps - 1));
434 mthca_warn(dev, "CQ entry for unknown QP %06x\n",
435 be32_to_cpu(cqe->my_qpn) & 0xffffff);
441 entry->qp_num = (*cur_qp)->qpn;
445 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
447 entry->wr_id = (*cur_qp)->wrid[wqe_index +
451 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
452 entry->wr_id = (*cur_qp)->wrid[wqe_index];
455 if (wq->last_comp < wqe_index)
456 wq->tail += wqe_index - wq->last_comp;
458 wq->tail += wqe_index + wq->max - wq->last_comp;
460 wq->last_comp = wqe_index;
463 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",
464 is_send ? "Send" : "Receive",
465 (*cur_qp)->qpn, wqe_index, wq->max);
468 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
469 (struct mthca_err_cqe *) cqe,
476 switch (cqe->opcode) {
477 case MTHCA_OPCODE_RDMA_WRITE:
478 entry->opcode = IB_WC_RDMA_WRITE;
480 case MTHCA_OPCODE_RDMA_WRITE_IMM:
481 entry->opcode = IB_WC_RDMA_WRITE;
482 entry->wc_flags |= IB_WC_WITH_IMM;
484 case MTHCA_OPCODE_SEND:
485 entry->opcode = IB_WC_SEND;
487 case MTHCA_OPCODE_SEND_IMM:
488 entry->opcode = IB_WC_SEND;
489 entry->wc_flags |= IB_WC_WITH_IMM;
491 case MTHCA_OPCODE_RDMA_READ:
492 entry->opcode = IB_WC_RDMA_READ;
493 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
495 case MTHCA_OPCODE_ATOMIC_CS:
496 entry->opcode = IB_WC_COMP_SWAP;
497 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
499 case MTHCA_OPCODE_ATOMIC_FA:
500 entry->opcode = IB_WC_FETCH_ADD;
501 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
503 case MTHCA_OPCODE_BIND_MW:
504 entry->opcode = IB_WC_BIND_MW;
507 entry->opcode = MTHCA_OPCODE_INVALID;
511 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
512 switch (cqe->opcode & 0x1f) {
513 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
514 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
515 entry->wc_flags = IB_WC_WITH_IMM;
516 entry->imm_data = cqe->imm_etype_pkey_eec;
517 entry->opcode = IB_WC_RECV;
519 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
520 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
521 entry->wc_flags = IB_WC_WITH_IMM;
522 entry->imm_data = cqe->imm_etype_pkey_eec;
523 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
527 entry->opcode = IB_WC_RECV;
530 entry->slid = be16_to_cpu(cqe->rlid);
531 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
532 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
533 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
534 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
535 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
539 entry->status = IB_WC_SUCCESS;
542 if (likely(free_cqe)) {
551 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
554 struct mthca_dev *dev = to_mdev(ibcq->device);
555 struct mthca_cq *cq = to_mcq(ibcq);
556 struct mthca_qp *qp = NULL;
562 spin_lock_irqsave(&cq->lock, flags);
564 for (npolled = 0; npolled < num_entries; ++npolled) {
565 err = mthca_poll_one(dev, cq, &qp,
566 &freed, entry + npolled);
573 update_cons_index(dev, cq, freed);
576 spin_unlock_irqrestore(&cq->lock, flags);
578 return err == 0 || err == -EAGAIN ? npolled : err;
581 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
585 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
586 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
587 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
589 doorbell[1] = 0xffffffff;
591 mthca_write64(doorbell,
592 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
593 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
598 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
600 struct mthca_cq *cq = to_mcq(ibcq);
606 ci = cpu_to_be32(cq->cons_index);
609 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
610 (notify == IB_CQ_SOLICITED ? 1 : 2));
612 mthca_write_db_rec(doorbell, cq->arm_db);
615 * Make sure that the doorbell record in host memory is
616 * written before ringing the doorbell via PCI MMIO.
620 doorbell[0] = cpu_to_be32((sn << 28) |
621 (notify == IB_CQ_SOLICITED ?
622 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
623 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
627 mthca_write64(doorbell,
628 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
629 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
634 static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
640 dma_free_coherent(&dev->pdev->dev,
641 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
642 cq->queue.direct.buf,
643 pci_unmap_addr(&cq->queue.direct,
646 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
647 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
648 if (cq->queue.page_list[i].buf)
649 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
650 cq->queue.page_list[i].buf,
651 pci_unmap_addr(&cq->queue.page_list[i],
654 kfree(cq->queue.page_list);
658 static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
663 u64 *dma_list = NULL;
667 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
670 shift = get_order(size) + PAGE_SHIFT;
672 cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
673 size, &t, GFP_KERNEL);
674 if (!cq->queue.direct.buf)
677 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
679 memset(cq->queue.direct.buf, 0, size);
681 while (t & ((1 << shift) - 1)) {
686 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
690 for (i = 0; i < npages; ++i)
691 dma_list[i] = t + i * (1 << shift);
694 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
697 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
701 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
703 if (!cq->queue.page_list)
706 for (i = 0; i < npages; ++i)
707 cq->queue.page_list[i].buf = NULL;
709 for (i = 0; i < npages; ++i) {
710 cq->queue.page_list[i].buf =
711 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
713 if (!cq->queue.page_list[i].buf)
717 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
719 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
723 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
724 dma_list, shift, npages,
726 MTHCA_MPT_FLAG_LOCAL_WRITE |
727 MTHCA_MPT_FLAG_LOCAL_READ,
737 mthca_free_cq_buf(dev, cq);
745 int mthca_init_cq(struct mthca_dev *dev, int nent,
746 struct mthca_ucontext *ctx, u32 pdn,
749 int size = nent * MTHCA_CQ_ENTRY_SIZE;
750 struct mthca_mailbox *mailbox;
751 struct mthca_cq_context *cq_context;
758 cq->ibcq.cqe = nent - 1;
759 cq->is_kernel = !ctx;
761 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
765 if (mthca_is_memfree(dev)) {
766 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
775 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
776 cq->cqn, &cq->set_ci_db);
777 if (cq->set_ci_db_index < 0)
780 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
781 cq->cqn, &cq->arm_db);
782 if (cq->arm_db_index < 0)
787 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
791 cq_context = mailbox->buf;
794 err = mthca_alloc_cq_buf(dev, size, cq);
796 goto err_out_mailbox;
798 for (i = 0; i < nent; ++i)
799 set_cqe_hw(get_cqe(cq, i));
802 spin_lock_init(&cq->lock);
803 atomic_set(&cq->refcount, 1);
804 init_waitqueue_head(&cq->wait);
806 memset(cq_context, 0, sizeof *cq_context);
807 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
808 MTHCA_CQ_STATE_DISARMED |
810 cq_context->start = cpu_to_be64(0);
811 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
813 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
815 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
816 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
817 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
818 cq_context->pd = cpu_to_be32(pdn);
819 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
820 cq_context->cqn = cpu_to_be32(cq->cqn);
822 if (mthca_is_memfree(dev)) {
823 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
824 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
827 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
829 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
830 goto err_out_free_mr;
834 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
837 goto err_out_free_mr;
840 spin_lock_irq(&dev->cq_table.lock);
841 if (mthca_array_set(&dev->cq_table.cq,
842 cq->cqn & (dev->limits.num_cqs - 1),
844 spin_unlock_irq(&dev->cq_table.lock);
845 goto err_out_free_mr;
847 spin_unlock_irq(&dev->cq_table.lock);
851 mthca_free_mailbox(dev, mailbox);
857 mthca_free_mr(dev, &cq->mr);
858 mthca_free_cq_buf(dev, cq);
862 mthca_free_mailbox(dev, mailbox);
865 if (cq->is_kernel && mthca_is_memfree(dev))
866 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
869 if (cq->is_kernel && mthca_is_memfree(dev))
870 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
873 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
876 mthca_free(&dev->cq_table.alloc, cq->cqn);
881 void mthca_free_cq(struct mthca_dev *dev,
884 struct mthca_mailbox *mailbox;
890 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
891 if (IS_ERR(mailbox)) {
892 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
896 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
898 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
900 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
903 u32 *ctx = mailbox->buf;
906 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
907 cq->cqn, cq->cons_index,
908 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
909 for (j = 0; j < 16; ++j)
910 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
913 spin_lock_irq(&dev->cq_table.lock);
914 mthca_array_clear(&dev->cq_table.cq,
915 cq->cqn & (dev->limits.num_cqs - 1));
916 spin_unlock_irq(&dev->cq_table.lock);
918 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
919 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
921 synchronize_irq(dev->pdev->irq);
923 atomic_dec(&cq->refcount);
924 wait_event(cq->wait, !atomic_read(&cq->refcount));
927 mthca_free_mr(dev, &cq->mr);
928 mthca_free_cq_buf(dev, cq);
929 if (mthca_is_memfree(dev)) {
930 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
931 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
935 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
936 mthca_free(&dev->cq_table.alloc, cq->cqn);
937 mthca_free_mailbox(dev, mailbox);
940 int __devinit mthca_init_cq_table(struct mthca_dev *dev)
944 spin_lock_init(&dev->cq_table.lock);
946 err = mthca_alloc_init(&dev->cq_table.alloc,
949 dev->limits.reserved_cqs);
953 err = mthca_array_init(&dev->cq_table.cq,
954 dev->limits.num_cqs);
956 mthca_alloc_cleanup(&dev->cq_table.alloc);
961 void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev)
963 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
964 mthca_alloc_cleanup(&dev->cq_table.alloc);