2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include "c2_status.h"
42 #define C2_MAX_ORD_PER_QP 128
43 #define C2_MAX_IRD_PER_QP 128
45 #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
46 #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
47 #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
50 static const u8 c2_opcode[] = {
51 [IB_WR_SEND] = C2_WR_TYPE_SEND,
52 [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
53 [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
54 [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
55 [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
56 [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
57 [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
60 static int to_c2_state(enum ib_qp_state ib_state)
64 return C2_QP_STATE_IDLE;
66 return C2_QP_STATE_RTS;
68 return C2_QP_STATE_CLOSING;
70 return C2_QP_STATE_CLOSING;
72 return C2_QP_STATE_ERROR;
78 static int to_ib_state(enum c2_qp_state c2_state)
81 case C2_QP_STATE_IDLE:
83 case C2_QP_STATE_CONNECTING:
87 case C2_QP_STATE_CLOSING:
89 case C2_QP_STATE_ERROR:
91 case C2_QP_STATE_TERMINATE:
98 static const char *to_ib_state_str(int ib_state)
100 static const char *state_str[] = {
109 if (ib_state < IB_QPS_RESET ||
110 ib_state > IB_QPS_ERR)
111 return "<invalid IB QP state>";
113 ib_state -= IB_QPS_RESET;
114 return state_str[ib_state];
117 void c2_set_qp_state(struct c2_qp *qp, int c2_state)
119 int new_state = to_ib_state(c2_state);
121 pr_debug("%s: qp[%p] state modify %s --> %s\n",
124 to_ib_state_str(qp->state),
125 to_ib_state_str(new_state));
126 qp->state = new_state;
129 #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
131 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
132 struct ib_qp_attr *attr, int attr_mask)
134 struct c2wr_qp_modify_req wr;
135 struct c2wr_qp_modify_rep *reply;
136 struct c2_vq_req *vq_req;
141 pr_debug("%s:%d qp=%p, %s --> %s\n",
142 __FUNCTION__, __LINE__,
144 to_ib_state_str(qp->state),
145 to_ib_state_str(attr->qp_state));
147 vq_req = vq_req_alloc(c2dev);
151 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
152 wr.hdr.context = (unsigned long) vq_req;
153 wr.rnic_handle = c2dev->adapter_handle;
154 wr.qp_handle = qp->adapter_handle;
155 wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
156 wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
157 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
158 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
160 if (attr_mask & IB_QP_STATE) {
161 /* Ensure the state is valid */
162 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
165 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
167 if (attr->qp_state == IB_QPS_ERR) {
168 spin_lock_irqsave(&qp->lock, flags);
169 if (qp->cm_id && qp->state == IB_QPS_RTS) {
170 pr_debug("Generating CLOSE event for QP-->ERR, "
171 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
172 /* Generate an CLOSE event */
173 vq_req->cm_id = qp->cm_id;
174 vq_req->event = IW_CM_EVENT_CLOSE;
176 spin_unlock_irqrestore(&qp->lock, flags);
178 next_state = attr->qp_state;
180 } else if (attr_mask & IB_QP_CUR_STATE) {
182 if (attr->cur_qp_state != IB_QPS_RTR &&
183 attr->cur_qp_state != IB_QPS_RTS &&
184 attr->cur_qp_state != IB_QPS_SQD &&
185 attr->cur_qp_state != IB_QPS_SQE)
189 cpu_to_be32(to_c2_state(attr->cur_qp_state));
191 next_state = attr->cur_qp_state;
198 /* reference the request struct */
199 vq_req_get(c2dev, vq_req);
201 err = vq_send_wr(c2dev, (union c2wr *) & wr);
203 vq_req_put(c2dev, vq_req);
207 err = vq_wait_for_reply(c2dev, vq_req);
211 reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
217 err = c2_errno(reply);
219 qp->state = next_state;
222 pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
225 * If we're going to error and generating the event here, then
226 * we need to remove the reference because there will be no
227 * close event generated by the adapter
229 spin_lock_irqsave(&qp->lock, flags);
230 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
231 qp->cm_id->rem_ref(qp->cm_id);
234 spin_unlock_irqrestore(&qp->lock, flags);
236 vq_repbuf_free(c2dev, reply);
238 vq_req_free(c2dev, vq_req);
240 pr_debug("%s:%d qp=%p, cur_state=%s\n",
241 __FUNCTION__, __LINE__,
243 to_ib_state_str(qp->state));
247 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
250 struct c2wr_qp_modify_req wr;
251 struct c2wr_qp_modify_rep *reply;
252 struct c2_vq_req *vq_req;
255 vq_req = vq_req_alloc(c2dev);
259 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
260 wr.hdr.context = (unsigned long) vq_req;
261 wr.rnic_handle = c2dev->adapter_handle;
262 wr.qp_handle = qp->adapter_handle;
263 wr.ord = cpu_to_be32(ord);
264 wr.ird = cpu_to_be32(ird);
265 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
266 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
267 wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
269 /* reference the request struct */
270 vq_req_get(c2dev, vq_req);
272 err = vq_send_wr(c2dev, (union c2wr *) & wr);
274 vq_req_put(c2dev, vq_req);
278 err = vq_wait_for_reply(c2dev, vq_req);
282 reply = (struct c2wr_qp_modify_rep *) (unsigned long)
289 err = c2_errno(reply);
290 vq_repbuf_free(c2dev, reply);
292 vq_req_free(c2dev, vq_req);
296 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
298 struct c2_vq_req *vq_req;
299 struct c2wr_qp_destroy_req wr;
300 struct c2wr_qp_destroy_rep *reply;
305 * Allocate a verb request message
307 vq_req = vq_req_alloc(c2dev);
315 c2_wr_set_id(&wr, CCWR_QP_DESTROY);
316 wr.hdr.context = (unsigned long) vq_req;
317 wr.rnic_handle = c2dev->adapter_handle;
318 wr.qp_handle = qp->adapter_handle;
321 * reference the request struct. dereferenced in the int handler.
323 vq_req_get(c2dev, vq_req);
325 spin_lock_irqsave(&qp->lock, flags);
326 if (qp->cm_id && qp->state == IB_QPS_RTS) {
327 pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
328 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
329 /* Generate an CLOSE event */
331 vq_req->cm_id = qp->cm_id;
332 vq_req->event = IW_CM_EVENT_CLOSE;
334 spin_unlock_irqrestore(&qp->lock, flags);
339 err = vq_send_wr(c2dev, (union c2wr *) & wr);
341 vq_req_put(c2dev, vq_req);
346 * Wait for reply from adapter
348 err = vq_wait_for_reply(c2dev, vq_req);
356 reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
362 spin_lock_irqsave(&qp->lock, flags);
364 qp->cm_id->rem_ref(qp->cm_id);
367 spin_unlock_irqrestore(&qp->lock, flags);
369 vq_repbuf_free(c2dev, reply);
371 vq_req_free(c2dev, vq_req);
375 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
380 spin_lock_irq(&c2dev->qp_table.lock);
381 ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
382 c2dev->qp_table.last++, &qp->qpn);
383 spin_unlock_irq(&c2dev->qp_table.lock);
384 } while ((ret == -EAGAIN) &&
385 idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
389 static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
391 spin_lock_irq(&c2dev->qp_table.lock);
392 idr_remove(&c2dev->qp_table.idr, qpn);
393 spin_unlock_irq(&c2dev->qp_table.lock);
396 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
401 spin_lock_irqsave(&c2dev->qp_table.lock, flags);
402 qp = idr_find(&c2dev->qp_table.idr, qpn);
403 spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
407 int c2_alloc_qp(struct c2_dev *c2dev,
409 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
411 struct c2wr_qp_create_req wr;
412 struct c2wr_qp_create_rep *reply;
413 struct c2_vq_req *vq_req;
414 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
415 struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
416 unsigned long peer_pa;
417 u32 q_size, msg_size, mmap_size;
421 err = c2_alloc_qpn(c2dev, qp);
424 qp->ibqp.qp_num = qp->qpn;
425 qp->ibqp.qp_type = IB_QPT_RC;
427 /* Allocate the SQ and RQ shared pointers */
428 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
429 &qp->sq_mq.shared_dma, GFP_KERNEL);
430 if (!qp->sq_mq.shared) {
435 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
436 &qp->rq_mq.shared_dma, GFP_KERNEL);
437 if (!qp->rq_mq.shared) {
442 /* Allocate the verbs request */
443 vq_req = vq_req_alloc(c2dev);
444 if (vq_req == NULL) {
449 /* Initialize the work request */
450 memset(&wr, 0, sizeof(wr));
451 c2_wr_set_id(&wr, CCWR_QP_CREATE);
452 wr.hdr.context = (unsigned long) vq_req;
453 wr.rnic_handle = c2dev->adapter_handle;
454 wr.sq_cq_handle = send_cq->adapter_handle;
455 wr.rq_cq_handle = recv_cq->adapter_handle;
456 wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
457 wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
459 wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
460 QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
461 wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
462 wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
463 wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
464 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
465 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
466 wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
467 wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
468 wr.pd_id = pd->pd_id;
469 wr.user_context = (unsigned long) qp;
471 vq_req_get(c2dev, vq_req);
473 /* Send the WR to the adapter */
474 err = vq_send_wr(c2dev, (union c2wr *) & wr);
476 vq_req_put(c2dev, vq_req);
480 /* Wait for the verb reply */
481 err = vq_wait_for_reply(c2dev, vq_req);
486 /* Process the reply */
487 reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
493 if ((err = c2_wr_get_result(reply)) != 0) {
497 /* Fill in the kernel QP struct */
498 atomic_set(&qp->refcount, 1);
499 qp->adapter_handle = reply->qp_handle;
500 qp->state = IB_QPS_RESET;
501 qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
502 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
503 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
505 /* Initialize the SQ MQ */
506 q_size = be32_to_cpu(reply->sq_depth);
507 msg_size = be32_to_cpu(reply->sq_msg_size);
508 peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
509 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
510 mmap = ioremap_nocache(peer_pa, mmap_size);
516 c2_mq_req_init(&qp->sq_mq,
517 be32_to_cpu(reply->sq_mq_index),
520 mmap + sizeof(struct c2_mq_shared), /* pool start */
522 C2_MQ_ADAPTER_TARGET);
524 /* Initialize the RQ mq */
525 q_size = be32_to_cpu(reply->rq_depth);
526 msg_size = be32_to_cpu(reply->rq_msg_size);
527 peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
528 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
529 mmap = ioremap_nocache(peer_pa, mmap_size);
535 c2_mq_req_init(&qp->rq_mq,
536 be32_to_cpu(reply->rq_mq_index),
539 mmap + sizeof(struct c2_mq_shared), /* pool start */
541 C2_MQ_ADAPTER_TARGET);
543 vq_repbuf_free(c2dev, reply);
544 vq_req_free(c2dev, vq_req);
549 iounmap(qp->sq_mq.peer);
551 destroy_qp(c2dev, qp);
553 vq_repbuf_free(c2dev, reply);
555 vq_req_free(c2dev, vq_req);
557 c2_free_mqsp(qp->rq_mq.shared);
559 c2_free_mqsp(qp->sq_mq.shared);
561 c2_free_qpn(c2dev, qp->qpn);
565 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
567 struct c2_cq *send_cq;
568 struct c2_cq *recv_cq;
570 send_cq = to_c2cq(qp->ibqp.send_cq);
571 recv_cq = to_c2cq(qp->ibqp.recv_cq);
574 * Lock CQs here, so that CQ polling code can do QP lookup
575 * without taking a lock.
577 spin_lock_irq(&send_cq->lock);
578 if (send_cq != recv_cq)
579 spin_lock(&recv_cq->lock);
581 c2_free_qpn(c2dev, qp->qpn);
583 if (send_cq != recv_cq)
584 spin_unlock(&recv_cq->lock);
585 spin_unlock_irq(&send_cq->lock);
588 * Destory qp in the rnic...
590 destroy_qp(c2dev, qp);
593 * Mark any unreaped CQEs as null and void.
595 c2_cq_clean(c2dev, qp, send_cq->cqn);
596 if (send_cq != recv_cq)
597 c2_cq_clean(c2dev, qp, recv_cq->cqn);
599 * Unmap the MQs and return the shared pointers
600 * to the message pool.
602 iounmap(qp->sq_mq.peer);
603 iounmap(qp->rq_mq.peer);
604 c2_free_mqsp(qp->sq_mq.shared);
605 c2_free_mqsp(qp->rq_mq.shared);
607 atomic_dec(&qp->refcount);
608 wait_event(qp->wait, !atomic_read(&qp->refcount));
615 * Move an SGL from the user's work request struct into a CCIL Work Request
616 * message, swapping to WR byte order and ensure the total length doesn't
620 * dst - ptr to CCIL Work Request message SGL memory.
621 * src - ptr to the consumers SGL memory.
629 move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
632 u32 tot = 0; /* running total */
633 u8 acount = 0; /* running total non-0 len sge's */
637 * If the addition of this SGE causes the
638 * total SGL length to exceed 2^32-1, then
641 * If the current total plus the next element length
642 * wraps, then it will go negative and be less than the
645 if ((tot + src->length) < tot) {
649 * Bug: 1456 (as well as 1498 & 1643)
650 * Skip over any sge's supplied with len=0
654 dst->stag = cpu_to_be32(src->lkey);
655 dst->to = cpu_to_be64(src->addr);
656 dst->length = cpu_to_be32(src->length);
666 * Bug: 1476 (as well as 1498, 1456 and 1643)
667 * Setup the SGL in the WR to make it easier for the RNIC.
668 * This way, the FW doesn't have to deal with special cases.
669 * Setting length=0 should be sufficient.
677 *actual_count = acount;
682 * Function: c2_activity (private function)
685 * Post an mq index to the host->adapter activity fifo.
688 * c2dev - ptr to c2dev structure
689 * mq_index - mq index to post
690 * shared - value most recently written to shared
697 static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
700 * First read the register to see if the FIFO is full, and if so,
701 * spin until it's not. This isn't perfect -- there is no
702 * synchronization among the clients of the register, but in
703 * practice it prevents multiple CPU from hammering the bus
704 * with PCI RETRY. Note that when this does happen, the card
705 * cannot get on the bus and the card and system hang in a
706 * deadlock -- thus the need for this code. [TOT]
708 while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) {
709 set_current_state(TASK_UNINTERRUPTIBLE);
713 __raw_writel(C2_HINT_MAKE(mq_index, shared),
714 c2dev->regs + PCI_BAR0_ADAPTER_HINT);
718 * Function: qp_wr_post
721 * This in-line function allocates a MQ msg, then moves the host-copy of
722 * the completed WR into msg. Then it posts the message.
725 * q - ptr to user MQ.
726 * wr - ptr to host-copy of the WR.
727 * qp - ptr to user qp
728 * size - Number of bytes to post. Assumed to be divisible by 4.
735 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
739 msg = c2_mq_alloc(q);
744 ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
748 * Since all header fields in the WR are the same as the
749 * CQE, set the following so the adapter need not.
751 c2_wr_set_result(wr, CCERR_PENDING);
754 * Copy the wr down to the adapter
756 memcpy((void *) msg, (void *) wr, size);
763 int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
764 struct ib_send_wr **bad_wr)
766 struct c2_dev *c2dev = to_c2dev(ibqp->device);
767 struct c2_qp *qp = to_c2qp(ibqp);
776 if (qp->state > IB_QPS_RTS)
782 wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
783 if (ib_wr->send_flags & IB_SEND_SIGNALED) {
784 flags |= SQ_SIGNALED;
787 switch (ib_wr->opcode) {
789 if (ib_wr->send_flags & IB_SEND_SOLICITED) {
790 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
791 msg_size = sizeof(struct c2wr_send_req);
793 c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
794 msg_size = sizeof(struct c2wr_send_req);
797 wr.sqwr.send.remote_stag = 0;
798 msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
799 if (ib_wr->num_sge > qp->send_sgl_depth) {
803 if (ib_wr->send_flags & IB_SEND_FENCE) {
804 flags |= SQ_READ_FENCE;
806 err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
809 &tot_len, &actual_sge_count);
810 wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
811 c2_wr_set_sge_count(&wr, actual_sge_count);
813 case IB_WR_RDMA_WRITE:
814 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
815 msg_size = sizeof(struct c2wr_rdma_write_req) +
816 (sizeof(struct c2_data_addr) * ib_wr->num_sge);
817 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
821 if (ib_wr->send_flags & IB_SEND_FENCE) {
822 flags |= SQ_READ_FENCE;
824 wr.sqwr.rdma_write.remote_stag =
825 cpu_to_be32(ib_wr->wr.rdma.rkey);
826 wr.sqwr.rdma_write.remote_to =
827 cpu_to_be64(ib_wr->wr.rdma.remote_addr);
828 err = move_sgl((struct c2_data_addr *)
829 & (wr.sqwr.rdma_write.data),
832 &tot_len, &actual_sge_count);
833 wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
834 c2_wr_set_sge_count(&wr, actual_sge_count);
836 case IB_WR_RDMA_READ:
837 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
838 msg_size = sizeof(struct c2wr_rdma_read_req);
840 /* IWarp only suppots 1 sge for RDMA reads */
841 if (ib_wr->num_sge > 1) {
847 * Move the local and remote stag/to/len into the WR.
849 wr.sqwr.rdma_read.local_stag =
850 cpu_to_be32(ib_wr->sg_list->lkey);
851 wr.sqwr.rdma_read.local_to =
852 cpu_to_be64(ib_wr->sg_list->addr);
853 wr.sqwr.rdma_read.remote_stag =
854 cpu_to_be32(ib_wr->wr.rdma.rkey);
855 wr.sqwr.rdma_read.remote_to =
856 cpu_to_be64(ib_wr->wr.rdma.remote_addr);
857 wr.sqwr.rdma_read.length =
858 cpu_to_be32(ib_wr->sg_list->length);
868 * If we had an error on the last wr build, then
869 * break out. Possible errors include bogus WR
870 * type, and a bogus SGL length...
879 c2_wr_set_flags(&wr, flags);
884 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
890 * Enqueue mq index to activity FIFO.
892 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
902 int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
903 struct ib_recv_wr **bad_wr)
905 struct c2_dev *c2dev = to_c2dev(ibqp->device);
906 struct c2_qp *qp = to_c2qp(ibqp);
910 if (qp->state > IB_QPS_RTS)
914 * Try and post each work request
920 if (ib_wr->num_sge > qp->recv_sgl_depth) {
926 * Create local host-copy of the WR
928 wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
929 c2_wr_set_id(&wr, CCWR_RECV);
930 c2_wr_set_flags(&wr, 0);
932 /* sge_count is limited to eight bits. */
933 BUG_ON(ib_wr->num_sge >= 256);
934 err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
936 ib_wr->num_sge, &tot_len, &actual_sge_count);
937 c2_wr_set_sge_count(&wr, actual_sge_count);
940 * If we had an error on the last wr build, then
941 * break out. Possible errors include bogus WR
942 * type, and a bogus SGL length...
948 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
954 * Enqueue mq index to activity FIFO
956 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
966 void __devinit c2_init_qp_table(struct c2_dev *c2dev)
968 spin_lock_init(&c2dev->qp_table.lock);
969 idr_init(&c2dev->qp_table.idr);
972 void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
974 idr_destroy(&c2dev->qp_table.idr);