2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include "c2_status.h"
42 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
44 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
49 spin_lock_irqsave(&c2dev->lock, flags);
50 cq = c2dev->qptr_array[cqn];
52 spin_unlock_irqrestore(&c2dev->lock, flags);
55 atomic_inc(&cq->refcount);
56 spin_unlock_irqrestore(&c2dev->lock, flags);
60 static void c2_cq_put(struct c2_cq *cq)
62 if (atomic_dec_and_test(&cq->refcount))
66 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
70 cq = c2_cq_get(c2dev, mq_index);
72 printk("discarding events on destroyed CQN=%d\n", mq_index);
76 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
80 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
85 cq = c2_cq_get(c2dev, mq_index);
89 spin_lock_irq(&cq->lock);
91 if (q && !c2_mq_empty(q)) {
95 while (priv != be16_to_cpu(*q->shared)) {
96 msg = (struct c2wr_ce *)
97 (q->msg_pool.host + priv * q->msg_size);
98 if (msg->qp_user_context == (u64) (unsigned long) qp) {
99 msg->qp_user_context = (u64) 0;
101 priv = (priv + 1) % q->q_size;
104 spin_unlock_irq(&cq->lock);
108 static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
112 return IB_WC_SUCCESS;
114 return IB_WC_WR_FLUSH_ERR;
115 case CCERR_BASE_AND_BOUNDS_VIOLATION:
116 return IB_WC_LOC_PROT_ERR;
117 case CCERR_ACCESS_VIOLATION:
118 return IB_WC_LOC_ACCESS_ERR;
119 case CCERR_TOTAL_LENGTH_TOO_BIG:
120 return IB_WC_LOC_LEN_ERR;
121 case CCERR_INVALID_WINDOW:
122 return IB_WC_MW_BIND_ERR;
124 return IB_WC_GENERAL_ERR;
129 static inline int c2_poll_one(struct c2_dev *c2dev,
130 struct c2_cq *cq, struct ib_wc *entry)
136 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
142 * if the qp returned is null then this qp has already
143 * been freed and we are unable process the completion.
144 * try pulling the next message
147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
149 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle;
161 entry->dlid_path_bits = 0;
162 entry->pkey_index = 0;
164 switch (c2_wr_get_id(ce)) {
165 case C2_WR_TYPE_SEND:
166 entry->opcode = IB_WC_SEND;
168 case C2_WR_TYPE_RDMA_WRITE:
169 entry->opcode = IB_WC_RDMA_WRITE;
171 case C2_WR_TYPE_RDMA_READ:
172 entry->opcode = IB_WC_RDMA_READ;
174 case C2_WR_TYPE_BIND_MW:
175 entry->opcode = IB_WC_BIND_MW;
177 case C2_WR_TYPE_RECV:
178 entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
179 entry->opcode = IB_WC_RECV;
186 /* consume the WQEs */
188 c2_mq_lconsume(&qp->rq_mq, 1);
190 c2_mq_lconsume(&qp->sq_mq,
191 be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
193 /* free the message */
199 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
201 struct c2_dev *c2dev = to_c2dev(ibcq->device);
202 struct c2_cq *cq = to_c2cq(ibcq);
206 spin_lock_irqsave(&cq->lock, flags);
208 for (npolled = 0; npolled < num_entries; ++npolled) {
210 err = c2_poll_one(c2dev, cq, entry + npolled);
215 spin_unlock_irqrestore(&cq->lock, flags);
220 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
222 struct c2_mq_shared __iomem *shared;
226 shared = cq->mq.peer;
228 if (notify == IB_CQ_NEXT_COMP)
229 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
230 else if (notify == IB_CQ_SOLICITED)
231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
235 writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
238 * Now read back shared->armed to make the PCI
239 * write synchronous. This is necessary for
240 * correct cq notification semantics.
242 readb(&shared->armed);
247 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
250 dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping),
251 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
252 free_pages((unsigned long) mq->msg_pool.host,
253 get_order(mq->q_size * mq->msg_size));
256 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
259 unsigned long pool_start;
261 pool_start = __get_free_pages(GFP_KERNEL,
262 get_order(q_size * msg_size));
267 0, /* index (currently unknown) */
271 NULL, /* peer (currently unknown) */
274 mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
276 q_size * msg_size, DMA_FROM_DEVICE);
277 pci_unmap_addr_set(mq, mapping, mq->host_dma);
282 int c2_init_cq(struct c2_dev *c2dev, int entries,
283 struct c2_ucontext *ctx, struct c2_cq *cq)
285 struct c2wr_cq_create_req wr;
286 struct c2wr_cq_create_rep *reply;
287 unsigned long peer_pa;
288 struct c2_vq_req *vq_req;
293 cq->ibcq.cqe = entries - 1;
294 cq->is_kernel = !ctx;
296 /* Allocate a shared pointer */
297 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
298 &cq->mq.shared_dma, GFP_KERNEL);
302 /* Allocate pages for the message pool */
303 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
307 vq_req = vq_req_alloc(c2dev);
313 memset(&wr, 0, sizeof(wr));
314 c2_wr_set_id(&wr, CCWR_CQ_CREATE);
315 wr.hdr.context = (unsigned long) vq_req;
316 wr.rnic_handle = c2dev->adapter_handle;
317 wr.msg_size = cpu_to_be32(cq->mq.msg_size);
318 wr.depth = cpu_to_be32(cq->mq.q_size);
319 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
320 wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
321 wr.user_context = (u64) (unsigned long) (cq);
323 vq_req_get(c2dev, vq_req);
325 err = vq_send_wr(c2dev, (union c2wr *) & wr);
327 vq_req_put(c2dev, vq_req);
331 err = vq_wait_for_reply(c2dev, vq_req);
335 reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
341 if ((err = c2_errno(reply)) != 0)
344 cq->adapter_handle = reply->cq_handle;
345 cq->mq.index = be32_to_cpu(reply->mq_index);
347 peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
348 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
354 vq_repbuf_free(c2dev, reply);
355 vq_req_free(c2dev, vq_req);
357 spin_lock_init(&cq->lock);
358 atomic_set(&cq->refcount, 1);
359 init_waitqueue_head(&cq->wait);
362 * Use the MQ index allocated by the adapter to
363 * store the CQ in the qptr_array
365 cq->cqn = cq->mq.index;
366 c2dev->qptr_array[cq->cqn] = cq;
371 vq_repbuf_free(c2dev, reply);
373 vq_req_free(c2dev, vq_req);
375 c2_free_cq_buf(c2dev, &cq->mq);
377 c2_free_mqsp(cq->mq.shared);
382 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
385 struct c2_vq_req *vq_req;
386 struct c2wr_cq_destroy_req wr;
387 struct c2wr_cq_destroy_rep *reply;
391 /* Clear CQ from the qptr array */
392 spin_lock_irq(&c2dev->lock);
393 c2dev->qptr_array[cq->mq.index] = NULL;
394 atomic_dec(&cq->refcount);
395 spin_unlock_irq(&c2dev->lock);
397 wait_event(cq->wait, !atomic_read(&cq->refcount));
399 vq_req = vq_req_alloc(c2dev);
404 memset(&wr, 0, sizeof(wr));
405 c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
406 wr.hdr.context = (unsigned long) vq_req;
407 wr.rnic_handle = c2dev->adapter_handle;
408 wr.cq_handle = cq->adapter_handle;
410 vq_req_get(c2dev, vq_req);
412 err = vq_send_wr(c2dev, (union c2wr *) & wr);
414 vq_req_put(c2dev, vq_req);
418 err = vq_wait_for_reply(c2dev, vq_req);
422 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
424 vq_repbuf_free(c2dev, reply);
426 vq_req_free(c2dev, vq_req);
429 c2_free_cq_buf(c2dev, &cq->mq);