2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
48 * Convert the AETH credit code into the number of credits.
50 static u32 credit_table[31] = {
84 static u32 alloc_qpn(struct ipath_qp_table *qpt)
86 u32 i, offset, max_scan, qpn;
93 offset = qpn & BITS_PER_PAGE_MASK;
94 map = &qpt->map[qpn / BITS_PER_PAGE];
95 max_scan = qpt->nmaps - !offset;
97 if (unlikely(!map->page)) {
98 unsigned long page = get_zeroed_page(GFP_KERNEL);
102 * Free the page if someone raced with us
105 spin_lock_irqsave(&qpt->lock, flags);
109 map->page = (void *)page;
110 spin_unlock_irqrestore(&qpt->lock, flags);
111 if (unlikely(!map->page))
114 if (likely(atomic_read(&map->n_free))) {
116 if (!test_and_set_bit(offset, map->page)) {
117 atomic_dec(&map->n_free);
122 offset = find_next_offset(map, offset);
123 qpn = mk_qpn(qpt, map, offset);
125 * This test differs from alloc_pidmap().
126 * If find_next_offset() does find a zero
127 * bit, we don't need to check for QPN
128 * wrapping around past our starting QPN.
129 * We just need to be sure we don't loop
132 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
135 * In order to keep the number of pages allocated to a
136 * minimum, we scan the all existing pages before increasing
137 * the size of the bitmap table.
139 if (++i > max_scan) {
140 if (qpt->nmaps == QPNMAP_ENTRIES)
142 map = &qpt->map[qpt->nmaps++];
144 } else if (map < &qpt->map[qpt->nmaps]) {
151 qpn = mk_qpn(qpt, map, offset);
160 static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
164 map = qpt->map + qpn / BITS_PER_PAGE;
166 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
167 atomic_inc(&map->n_free);
171 * ipath_alloc_qpn - allocate a QP number
174 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
176 * Allocate the next available QPN and put the QP into the hash table.
177 * The hash table holds a reference to the QP.
179 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
180 enum ib_qp_type type)
186 if (type == IB_QPT_SMI)
188 else if (type == IB_QPT_GSI)
191 /* Allocate the next available QPN */
192 qpn = alloc_qpn(qpt);
198 qp->ibqp.qp_num = qpn;
200 /* Add the QP to the hash table. */
201 spin_lock_irqsave(&qpt->lock, flags);
204 qp->next = qpt->table[qpn];
205 qpt->table[qpn] = qp;
206 atomic_inc(&qp->refcount);
208 spin_unlock_irqrestore(&qpt->lock, flags);
216 * ipath_free_qp - remove a QP from the QP table
218 * @qp: the QP to remove
220 * Remove the QP from the table so it can't be found asynchronously by
221 * the receive interrupt routine.
223 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
225 struct ipath_qp *q, **qpp;
229 spin_lock_irqsave(&qpt->lock, flags);
231 /* Remove QP from the hash table. */
232 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
233 for (; (q = *qpp) != NULL; qpp = &q->next) {
237 atomic_dec(&qp->refcount);
243 spin_unlock_irqrestore(&qpt->lock, flags);
248 /* If QPN is not reserved, mark QPN free in the bitmap. */
249 if (qp->ibqp.qp_num > 1)
250 free_qpn(qpt, qp->ibqp.qp_num);
252 wait_event(qp->wait, !atomic_read(&qp->refcount));
256 * ipath_free_all_qps - remove all QPs from the table
257 * @qpt: the QP table to empty
259 void ipath_free_all_qps(struct ipath_qp_table *qpt)
262 struct ipath_qp *qp, *nqp;
265 for (n = 0; n < qpt->max; n++) {
266 spin_lock_irqsave(&qpt->lock, flags);
268 qpt->table[n] = NULL;
269 spin_unlock_irqrestore(&qpt->lock, flags);
273 if (qp->ibqp.qp_num > 1)
274 free_qpn(qpt, qp->ibqp.qp_num);
275 if (!atomic_dec_and_test(&qp->refcount) ||
276 !ipath_destroy_qp(&qp->ibqp))
277 ipath_dbg(KERN_INFO "QP memory leak!\n");
282 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
283 if (qpt->map[n].page)
284 free_page((unsigned long)qpt->map[n].page);
289 * ipath_lookup_qpn - return the QP with the given QPN
291 * @qpn: the QP number to look up
293 * The caller is responsible for decrementing the QP reference count
296 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
301 spin_lock_irqsave(&qpt->lock, flags);
303 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
304 if (qp->ibqp.qp_num == qpn) {
305 atomic_inc(&qp->refcount);
310 spin_unlock_irqrestore(&qpt->lock, flags);
315 * ipath_reset_qp - initialize the QP state to the reset state
316 * @qp: the QP to reset
318 static void ipath_reset_qp(struct ipath_qp *qp)
322 qp->qp_access_flags = 0;
323 clear_bit(IPATH_S_BUSY, &qp->s_flags);
328 if (qp->ibqp.qp_type == IB_QPT_RC) {
329 qp->s_state = IB_OPCODE_RC_SEND_LAST;
330 qp->r_state = IB_OPCODE_RC_SEND_LAST;
332 qp->s_state = IB_OPCODE_UC_SEND_LAST;
333 qp->r_state = IB_OPCODE_UC_SEND_LAST;
335 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
336 qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
338 qp->r_wrid_valid = 0;
339 qp->s_rnr_timeout = 0;
346 qp->s_wait_credit = 0;
348 qp->r_rq.wq->head = 0;
349 qp->r_rq.wq->tail = 0;
355 * ipath_error_qp - put a QP into an error state
356 * @qp: the QP to put into an error state
357 * @err: the receive completion error to signal if a RWQE is active
359 * Flushes both send and receive work queues.
360 * QP s_lock should be held and interrupts disabled.
363 void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
365 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
368 ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
369 qp->ibqp.qp_num, qp->remote_qpn);
371 spin_lock(&dev->pending_lock);
372 /* XXX What if its already removed by the timeout code? */
373 if (!list_empty(&qp->timerwait))
374 list_del_init(&qp->timerwait);
375 if (!list_empty(&qp->piowait))
376 list_del_init(&qp->piowait);
377 spin_unlock(&dev->pending_lock);
382 wc.qp_num = qp->ibqp.qp_num;
388 wc.dlid_path_bits = 0;
390 if (qp->r_wrid_valid) {
391 qp->r_wrid_valid = 0;
393 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
395 wc.status = IB_WC_WR_FLUSH_ERR;
397 while (qp->s_last != qp->s_head) {
398 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
400 wc.wr_id = wqe->wr.wr_id;
401 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
402 if (++qp->s_last >= qp->s_size)
404 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
406 qp->s_cur = qp->s_tail = qp->s_head;
408 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
411 struct ipath_rwq *wq;
415 spin_lock(&qp->r_rq.lock);
417 /* sanity check pointers before trusting them */
420 if (head >= qp->r_rq.size)
423 if (tail >= qp->r_rq.size)
425 wc.opcode = IB_WC_RECV;
426 while (tail != head) {
427 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
428 if (++tail >= qp->r_rq.size)
430 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
434 spin_unlock(&qp->r_rq.lock);
439 * ipath_modify_qp - modify the attributes of a queue pair
440 * @ibqp: the queue pair who's attributes we're modifying
441 * @attr: the new attributes
442 * @attr_mask: the mask of attributes to modify
443 * @udata: user data for ipathverbs.so
445 * Returns 0 on success, otherwise returns an errno.
447 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
448 int attr_mask, struct ib_udata *udata)
450 struct ipath_ibdev *dev = to_idev(ibqp->device);
451 struct ipath_qp *qp = to_iqp(ibqp);
452 enum ib_qp_state cur_state, new_state;
456 spin_lock_irqsave(&qp->s_lock, flags);
458 cur_state = attr_mask & IB_QP_CUR_STATE ?
459 attr->cur_qp_state : qp->state;
460 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
462 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
466 if (attr_mask & IB_QP_AV) {
467 if (attr->ah_attr.dlid == 0 ||
468 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
471 if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
472 (attr->ah_attr.grh.sgid_index > 1))
476 if (attr_mask & IB_QP_PKEY_INDEX)
477 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
480 if (attr_mask & IB_QP_MIN_RNR_TIMER)
481 if (attr->min_rnr_timer > 31)
484 if (attr_mask & IB_QP_PORT)
485 if (attr->port_num == 0 ||
486 attr->port_num > ibqp->device->phys_port_cnt)
489 if (attr_mask & IB_QP_PATH_MTU)
490 if (attr->path_mtu > IB_MTU_4096)
493 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
494 if (attr->max_dest_rd_atomic > 1)
497 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
498 if (attr->max_rd_atomic > 1)
501 if (attr_mask & IB_QP_PATH_MIG_STATE)
502 if (attr->path_mig_state != IB_MIG_MIGRATED &&
503 attr->path_mig_state != IB_MIG_REARM)
512 ipath_error_qp(qp, IB_WC_GENERAL_ERR);
520 if (attr_mask & IB_QP_PKEY_INDEX)
521 qp->s_pkey_index = attr->pkey_index;
523 if (attr_mask & IB_QP_DEST_QPN)
524 qp->remote_qpn = attr->dest_qp_num;
526 if (attr_mask & IB_QP_SQ_PSN) {
527 qp->s_psn = qp->s_next_psn = attr->sq_psn;
528 qp->s_last_psn = qp->s_next_psn - 1;
531 if (attr_mask & IB_QP_RQ_PSN)
532 qp->r_psn = attr->rq_psn;
534 if (attr_mask & IB_QP_ACCESS_FLAGS)
535 qp->qp_access_flags = attr->qp_access_flags;
537 if (attr_mask & IB_QP_AV)
538 qp->remote_ah_attr = attr->ah_attr;
540 if (attr_mask & IB_QP_PATH_MTU)
541 qp->path_mtu = attr->path_mtu;
543 if (attr_mask & IB_QP_RETRY_CNT)
544 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
546 if (attr_mask & IB_QP_RNR_RETRY) {
547 qp->s_rnr_retry = attr->rnr_retry;
548 if (qp->s_rnr_retry > 7)
550 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
553 if (attr_mask & IB_QP_MIN_RNR_TIMER)
554 qp->r_min_rnr_timer = attr->min_rnr_timer;
556 if (attr_mask & IB_QP_TIMEOUT)
557 qp->timeout = attr->timeout;
559 if (attr_mask & IB_QP_QKEY)
560 qp->qkey = attr->qkey;
562 qp->state = new_state;
563 spin_unlock_irqrestore(&qp->s_lock, flags);
569 spin_unlock_irqrestore(&qp->s_lock, flags);
576 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
577 int attr_mask, struct ib_qp_init_attr *init_attr)
579 struct ipath_qp *qp = to_iqp(ibqp);
581 attr->qp_state = qp->state;
582 attr->cur_qp_state = attr->qp_state;
583 attr->path_mtu = qp->path_mtu;
584 attr->path_mig_state = 0;
585 attr->qkey = qp->qkey;
586 attr->rq_psn = qp->r_psn;
587 attr->sq_psn = qp->s_next_psn;
588 attr->dest_qp_num = qp->remote_qpn;
589 attr->qp_access_flags = qp->qp_access_flags;
590 attr->cap.max_send_wr = qp->s_size - 1;
591 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
592 attr->cap.max_send_sge = qp->s_max_sge;
593 attr->cap.max_recv_sge = qp->r_rq.max_sge;
594 attr->cap.max_inline_data = 0;
595 attr->ah_attr = qp->remote_ah_attr;
596 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
597 attr->pkey_index = qp->s_pkey_index;
598 attr->alt_pkey_index = 0;
599 attr->en_sqd_async_notify = 0;
600 attr->sq_draining = 0;
601 attr->max_rd_atomic = 1;
602 attr->max_dest_rd_atomic = 1;
603 attr->min_rnr_timer = qp->r_min_rnr_timer;
605 attr->timeout = qp->timeout;
606 attr->retry_cnt = qp->s_retry_cnt;
607 attr->rnr_retry = qp->s_rnr_retry;
608 attr->alt_port_num = 0;
609 attr->alt_timeout = 0;
611 init_attr->event_handler = qp->ibqp.event_handler;
612 init_attr->qp_context = qp->ibqp.qp_context;
613 init_attr->send_cq = qp->ibqp.send_cq;
614 init_attr->recv_cq = qp->ibqp.recv_cq;
615 init_attr->srq = qp->ibqp.srq;
616 init_attr->cap = attr->cap;
617 if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
618 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
620 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
621 init_attr->qp_type = qp->ibqp.qp_type;
622 init_attr->port_num = 1;
627 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
628 * @qp: the queue pair to compute the AETH for
632 __be32 ipath_compute_aeth(struct ipath_qp *qp)
634 u32 aeth = qp->r_msn & IPATH_MSN_MASK;
638 * Shared receive queues don't generate credits.
639 * Set the credit field to the invalid value.
641 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
645 struct ipath_rwq *wq = qp->r_rq.wq;
649 /* sanity check pointers before trusting them */
651 if (head >= qp->r_rq.size)
654 if (tail >= qp->r_rq.size)
657 * Compute the number of credits available (RWQEs).
658 * XXX Not holding the r_rq.lock here so there is a small
659 * chance that the pair of reads are not atomic.
661 credits = head - tail;
662 if ((int)credits < 0)
663 credits += qp->r_rq.size;
665 * Binary search the credit table to find the code to
672 if (credit_table[x] == credits)
674 if (credit_table[x] > credits)
681 aeth |= x << IPATH_AETH_CREDIT_SHIFT;
683 return cpu_to_be32(aeth);
687 * ipath_create_qp - create a queue pair for a device
688 * @ibpd: the protection domain who's device we create the queue pair for
689 * @init_attr: the attributes of the queue pair
690 * @udata: unused by InfiniPath
692 * Returns the queue pair on success, otherwise returns an errno.
694 * Called by the ib_create_qp() core verbs function.
696 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
697 struct ib_qp_init_attr *init_attr,
698 struct ib_udata *udata)
702 struct ipath_swqe *swq = NULL;
703 struct ipath_ibdev *dev;
707 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
708 init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
709 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
710 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
711 ret = ERR_PTR(-ENOMEM);
715 if (init_attr->cap.max_send_sge +
716 init_attr->cap.max_recv_sge +
717 init_attr->cap.max_send_wr +
718 init_attr->cap.max_recv_wr == 0) {
719 ret = ERR_PTR(-EINVAL);
723 switch (init_attr->qp_type) {
726 sz = sizeof(struct ipath_sge) *
727 init_attr->cap.max_send_sge +
728 sizeof(struct ipath_swqe);
729 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
731 ret = ERR_PTR(-ENOMEM);
739 if (init_attr->srq) {
740 struct ipath_srq *srq = to_isrq(init_attr->srq);
742 sz += sizeof(*qp->r_sg_list) *
745 sz += sizeof(*qp->r_sg_list) *
746 init_attr->cap.max_recv_sge;
747 qp = kmalloc(sz, GFP_KERNEL);
749 ret = ERR_PTR(-ENOMEM);
752 if (init_attr->srq) {
755 qp->r_rq.max_sge = 0;
757 init_attr->cap.max_recv_wr = 0;
758 init_attr->cap.max_recv_sge = 0;
760 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
761 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
762 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
763 sizeof(struct ipath_rwqe);
764 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
767 ret = ERR_PTR(-ENOMEM);
773 * ib_create_qp() will initialize qp->ibqp
774 * except for qp->ibqp.qp_num.
776 spin_lock_init(&qp->s_lock);
777 spin_lock_init(&qp->r_rq.lock);
778 atomic_set(&qp->refcount, 0);
779 init_waitqueue_head(&qp->wait);
780 tasklet_init(&qp->s_task, ipath_do_ruc_send,
782 INIT_LIST_HEAD(&qp->piowait);
783 INIT_LIST_HEAD(&qp->timerwait);
784 qp->state = IB_QPS_RESET;
786 qp->s_size = init_attr->cap.max_send_wr + 1;
787 qp->s_max_sge = init_attr->cap.max_send_sge;
788 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
789 qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
792 dev = to_idev(ibpd->device);
793 err = ipath_alloc_qpn(&dev->qp_table, qp,
804 /* Don't support raw QPs */
805 ret = ERR_PTR(-ENOSYS);
809 init_attr->cap.max_inline_data = 0;
812 * Return the address of the RWQ as the offset to mmap.
813 * See ipath_mmap() for details.
815 if (udata && udata->outlen >= sizeof(__u64)) {
816 struct ipath_mmap_info *ip;
817 __u64 offset = (__u64) qp->r_rq.wq;
820 err = ib_copy_to_udata(udata, &offset, sizeof(offset));
827 /* Allocate info for ipath_mmap(). */
828 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
830 ret = ERR_PTR(-ENOMEM);
834 ip->context = ibpd->uobject->context;
835 ip->obj = qp->r_rq.wq;
838 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
840 spin_lock_irq(&dev->pending_lock);
841 ip->next = dev->pending_mmaps;
842 dev->pending_mmaps = ip;
843 spin_unlock_irq(&dev->pending_lock);
847 spin_lock(&dev->n_qps_lock);
848 if (dev->n_qps_allocated == ib_ipath_max_qps) {
849 spin_unlock(&dev->n_qps_lock);
850 ret = ERR_PTR(-ENOMEM);
854 dev->n_qps_allocated++;
855 spin_unlock(&dev->n_qps_lock);
873 * ipath_destroy_qp - destroy a queue pair
874 * @ibqp: the queue pair to destroy
876 * Returns 0 on success.
878 * Note that this can be called while the QP is actively sending or
881 int ipath_destroy_qp(struct ib_qp *ibqp)
883 struct ipath_qp *qp = to_iqp(ibqp);
884 struct ipath_ibdev *dev = to_idev(ibqp->device);
887 spin_lock_irqsave(&qp->s_lock, flags);
888 qp->state = IB_QPS_ERR;
889 spin_unlock_irqrestore(&qp->s_lock, flags);
890 spin_lock(&dev->n_qps_lock);
891 dev->n_qps_allocated--;
892 spin_unlock(&dev->n_qps_lock);
894 /* Stop the sending tasklet. */
895 tasklet_kill(&qp->s_task);
897 /* Make sure the QP isn't on the timeout list. */
898 spin_lock_irqsave(&dev->pending_lock, flags);
899 if (!list_empty(&qp->timerwait))
900 list_del_init(&qp->timerwait);
901 if (!list_empty(&qp->piowait))
902 list_del_init(&qp->piowait);
903 spin_unlock_irqrestore(&dev->pending_lock, flags);
906 * Make sure that the QP is not in the QPN table so receive
907 * interrupts will discard packets for this QP. XXX Also remove QP
908 * from multicast table.
910 if (atomic_read(&qp->refcount) != 0)
911 ipath_free_qp(&dev->qp_table, qp);
914 kref_put(&qp->ip->ref, ipath_release_mmap_info);
923 * ipath_init_qp_table - initialize the QP table for a device
924 * @idev: the device who's QP table we're initializing
925 * @size: the size of the QP table
927 * Returns 0 on success, otherwise returns an errno.
929 int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
934 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
935 idev->qp_table.max = size;
936 idev->qp_table.nmaps = 1;
937 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
939 if (idev->qp_table.table == NULL) {
944 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
945 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
946 idev->qp_table.map[i].page = NULL;
956 * ipath_sqerror_qp - put a QP's send queue into an error state
957 * @qp: QP who's send queue will be put into an error state
958 * @wc: the WC responsible for putting the QP in this state
960 * Flushes the send work queue.
961 * The QP s_lock should be held.
964 void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
966 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
967 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
969 ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
970 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
972 spin_lock(&dev->pending_lock);
973 /* XXX What if its already removed by the timeout code? */
974 if (!list_empty(&qp->timerwait))
975 list_del_init(&qp->timerwait);
976 if (!list_empty(&qp->piowait))
977 list_del_init(&qp->piowait);
978 spin_unlock(&dev->pending_lock);
980 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
981 if (++qp->s_last >= qp->s_size)
984 wc->status = IB_WC_WR_FLUSH_ERR;
986 while (qp->s_last != qp->s_head) {
987 wc->wr_id = wqe->wr.wr_id;
988 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
989 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
990 if (++qp->s_last >= qp->s_size)
992 wqe = get_swqe_ptr(qp, qp->s_last);
994 qp->s_cur = qp->s_tail = qp->s_head;
995 qp->state = IB_QPS_SQE;
999 * ipath_get_credit - flush the send work queue of a QP
1000 * @qp: the qp who's send work queue to flush
1001 * @aeth: the Acknowledge Extended Transport Header
1003 * The QP s_lock should be held.
1005 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1007 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1010 * If the credit is invalid, we can send
1011 * as many packets as we like. Otherwise, we have to
1012 * honor the credit field.
1014 if (credit == IPATH_AETH_CREDIT_INVAL)
1015 qp->s_lsn = (u32) -1;
1016 else if (qp->s_lsn != (u32) -1) {
1017 /* Compute new LSN (i.e., MSN + credit) */
1018 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1019 if (ipath_cmp24(credit, qp->s_lsn) > 0)
1023 /* Restart sending if it was blocked due to lack of credits. */
1024 if (qp->s_cur != qp->s_head &&
1025 (qp->s_lsn == (u32) -1 ||
1026 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1027 qp->s_lsn + 1) <= 0))
1028 tasklet_hi_schedule(&qp->s_task);