2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
35 #include <linux/slab.h>
36 #include <linux/string.h>
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_memfree.h"
43 #include "mthca_wqe.h"
46 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
49 struct mthca_tavor_srq_context {
50 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
54 __be16 limit_watermark;
59 struct mthca_arbel_srq_context {
60 __be32 state_logsize_srqn;
63 __be32 logstride_usrpage;
66 __be16 limit_watermark;
73 static void *get_wqe(struct mthca_srq *srq, int n)
76 return srq->queue.direct.buf + (n << srq->wqe_shift);
78 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
79 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
83 * Return a pointer to the location within a WQE that we're using as a
84 * link when the WQE is in the free list. We use the imm field
85 * because in the Tavor case, posting a WQE may overwrite the next
86 * segment of the previous WQE, but a receive WQE will never touch the
87 * imm field. This avoids corrupting our free list if the previous
88 * WQE has already completed and been put on the free list when we
91 static inline int *wqe_to_link(void *wqe)
93 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
96 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
98 struct mthca_srq *srq,
99 struct mthca_tavor_srq_context *context)
101 memset(context, 0, sizeof *context);
103 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
104 context->state_pd = cpu_to_be32(pd->pd_num);
105 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
107 if (pd->ibpd.uobject)
109 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
111 context->uar = cpu_to_be32(dev->driver_uar.index);
114 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
116 struct mthca_srq *srq,
117 struct mthca_arbel_srq_context *context)
121 memset(context, 0, sizeof *context);
124 * Put max in a temporary variable to work around gcc bug
125 * triggered by ilog2() on sparc64.
128 logsize = ilog2(max);
129 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
130 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
131 context->db_index = cpu_to_be32(srq->db_index);
132 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
133 if (pd->ibpd.uobject)
134 context->logstride_usrpage |=
135 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
137 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
138 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
141 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
143 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
144 srq->is_direct, &srq->mr);
148 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
149 struct mthca_srq *srq)
151 struct mthca_data_seg *scatter;
156 if (pd->ibpd.uobject)
159 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
163 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
164 MTHCA_MAX_DIRECT_SRQ_SIZE,
165 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
172 * Now initialize the SRQ buffer so that all of the WQEs are
173 * linked into the list of free WQEs. In addition, set the
174 * scatter list L_Keys to the sentry value of 0x100.
176 for (i = 0; i < srq->max; ++i) {
177 wqe = get_wqe(srq, i);
179 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
181 for (scatter = wqe + sizeof (struct mthca_next_seg);
182 (void *) scatter < wqe + (1 << srq->wqe_shift);
184 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
187 srq->last = get_wqe(srq, srq->max - 1);
192 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
193 struct ib_srq_attr *attr, struct mthca_srq *srq)
195 struct mthca_mailbox *mailbox;
200 /* Sanity check SRQ size before proceeding */
201 if (attr->max_wr > dev->limits.max_srq_wqes ||
202 attr->max_sge > dev->limits.max_srq_sge)
205 srq->max = attr->max_wr;
206 srq->max_gs = attr->max_sge;
209 if (mthca_is_memfree(dev))
210 srq->max = roundup_pow_of_two(srq->max + 1);
212 srq->max = srq->max + 1;
215 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
216 srq->max_gs * sizeof (struct mthca_data_seg)));
218 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
221 srq->wqe_shift = ilog2(ds);
223 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
227 if (mthca_is_memfree(dev)) {
228 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
232 if (!pd->ibpd.uobject) {
233 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
234 srq->srqn, &srq->db);
235 if (srq->db_index < 0) {
242 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
243 if (IS_ERR(mailbox)) {
244 err = PTR_ERR(mailbox);
248 err = mthca_alloc_srq_buf(dev, pd, srq);
250 goto err_out_mailbox;
252 spin_lock_init(&srq->lock);
254 init_waitqueue_head(&srq->wait);
255 mutex_init(&srq->mutex);
257 if (mthca_is_memfree(dev))
258 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
260 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
262 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
265 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
266 goto err_out_free_buf;
269 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
272 goto err_out_free_buf;
275 spin_lock_irq(&dev->srq_table.lock);
276 if (mthca_array_set(&dev->srq_table.srq,
277 srq->srqn & (dev->limits.num_srqs - 1),
279 spin_unlock_irq(&dev->srq_table.lock);
280 goto err_out_free_srq;
282 spin_unlock_irq(&dev->srq_table.lock);
284 mthca_free_mailbox(dev, mailbox);
287 srq->last_free = srq->max - 1;
289 attr->max_wr = srq->max - 1;
290 attr->max_sge = srq->max_gs;
295 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
297 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
299 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
302 if (!pd->ibpd.uobject)
303 mthca_free_srq_buf(dev, srq);
306 mthca_free_mailbox(dev, mailbox);
309 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
310 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
313 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
316 mthca_free(&dev->srq_table.alloc, srq->srqn);
321 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
325 spin_lock_irq(&dev->srq_table.lock);
327 spin_unlock_irq(&dev->srq_table.lock);
332 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
334 struct mthca_mailbox *mailbox;
338 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
339 if (IS_ERR(mailbox)) {
340 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
344 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
346 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
348 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
350 spin_lock_irq(&dev->srq_table.lock);
351 mthca_array_clear(&dev->srq_table.srq,
352 srq->srqn & (dev->limits.num_srqs - 1));
354 spin_unlock_irq(&dev->srq_table.lock);
356 wait_event(srq->wait, !get_srq_refcount(dev, srq));
358 if (!srq->ibsrq.uobject) {
359 mthca_free_srq_buf(dev, srq);
360 if (mthca_is_memfree(dev))
361 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
364 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
365 mthca_free(&dev->srq_table.alloc, srq->srqn);
366 mthca_free_mailbox(dev, mailbox);
369 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
370 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
372 struct mthca_dev *dev = to_mdev(ibsrq->device);
373 struct mthca_srq *srq = to_msrq(ibsrq);
377 /* We don't support resizing SRQs (yet?) */
378 if (attr_mask & IB_SRQ_MAX_WR)
381 if (attr_mask & IB_SRQ_LIMIT) {
382 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
383 if (attr->srq_limit > max_wr)
386 mutex_lock(&srq->mutex);
387 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
388 mutex_unlock(&srq->mutex);
399 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
401 struct mthca_dev *dev = to_mdev(ibsrq->device);
402 struct mthca_srq *srq = to_msrq(ibsrq);
403 struct mthca_mailbox *mailbox;
404 struct mthca_arbel_srq_context *arbel_ctx;
405 struct mthca_tavor_srq_context *tavor_ctx;
409 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
411 return PTR_ERR(mailbox);
413 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
417 if (mthca_is_memfree(dev)) {
418 arbel_ctx = mailbox->buf;
419 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
421 tavor_ctx = mailbox->buf;
422 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
425 srq_attr->max_wr = srq->max - 1;
426 srq_attr->max_sge = srq->max_gs;
429 mthca_free_mailbox(dev, mailbox);
434 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
435 enum ib_event_type event_type)
437 struct mthca_srq *srq;
438 struct ib_event event;
440 spin_lock(&dev->srq_table.lock);
441 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
444 spin_unlock(&dev->srq_table.lock);
447 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
451 if (!srq->ibsrq.event_handler)
454 event.device = &dev->ib_dev;
455 event.event = event_type;
456 event.element.srq = &srq->ibsrq;
457 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
460 spin_lock(&dev->srq_table.lock);
461 if (!--srq->refcount)
463 spin_unlock(&dev->srq_table.lock);
467 * This function must be called with IRQs disabled.
469 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
473 ind = wqe_addr >> srq->wqe_shift;
475 spin_lock(&srq->lock);
477 if (likely(srq->first_free >= 0))
478 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
480 srq->first_free = ind;
482 *wqe_to_link(get_wqe(srq, ind)) = -1;
483 srq->last_free = ind;
485 spin_unlock(&srq->lock);
488 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
489 struct ib_recv_wr **bad_wr)
491 struct mthca_dev *dev = to_mdev(ibsrq->device);
492 struct mthca_srq *srq = to_msrq(ibsrq);
504 spin_lock_irqsave(&srq->lock, flags);
506 first_ind = srq->first_free;
508 for (nreq = 0; wr; wr = wr->next) {
509 ind = srq->first_free;
512 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
518 wqe = get_wqe(srq, ind);
519 next_ind = *wqe_to_link(wqe);
522 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
528 prev_wqe = srq->last;
531 ((struct mthca_next_seg *) wqe)->nda_op = 0;
532 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
533 /* flags field will always remain 0 */
535 wqe += sizeof (struct mthca_next_seg);
537 if (unlikely(wr->num_sge > srq->max_gs)) {
540 srq->last = prev_wqe;
544 for (i = 0; i < wr->num_sge; ++i) {
545 ((struct mthca_data_seg *) wqe)->byte_count =
546 cpu_to_be32(wr->sg_list[i].length);
547 ((struct mthca_data_seg *) wqe)->lkey =
548 cpu_to_be32(wr->sg_list[i].lkey);
549 ((struct mthca_data_seg *) wqe)->addr =
550 cpu_to_be64(wr->sg_list[i].addr);
551 wqe += sizeof (struct mthca_data_seg);
554 if (i < srq->max_gs) {
555 ((struct mthca_data_seg *) wqe)->byte_count = 0;
556 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
557 ((struct mthca_data_seg *) wqe)->addr = 0;
560 ((struct mthca_next_seg *) prev_wqe)->nda_op =
561 cpu_to_be32((ind << srq->wqe_shift) | 1);
563 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
564 cpu_to_be32(MTHCA_NEXT_DBD);
566 srq->wrid[ind] = wr->wr_id;
567 srq->first_free = next_ind;
570 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
573 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
574 doorbell[1] = cpu_to_be32(srq->srqn << 8);
577 * Make sure that descriptors are written
578 * before doorbell is rung.
582 mthca_write64(doorbell,
583 dev->kar + MTHCA_RECEIVE_DOORBELL,
584 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
586 first_ind = srq->first_free;
591 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
592 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
595 * Make sure that descriptors are written before
600 mthca_write64(doorbell,
601 dev->kar + MTHCA_RECEIVE_DOORBELL,
602 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
606 * Make sure doorbells don't leak out of SRQ spinlock and
607 * reach the HCA out of order:
611 spin_unlock_irqrestore(&srq->lock, flags);
615 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
616 struct ib_recv_wr **bad_wr)
618 struct mthca_dev *dev = to_mdev(ibsrq->device);
619 struct mthca_srq *srq = to_msrq(ibsrq);
628 spin_lock_irqsave(&srq->lock, flags);
630 for (nreq = 0; wr; ++nreq, wr = wr->next) {
631 ind = srq->first_free;
634 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
640 wqe = get_wqe(srq, ind);
641 next_ind = *wqe_to_link(wqe);
644 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
650 ((struct mthca_next_seg *) wqe)->nda_op =
651 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
652 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
653 /* flags field will always remain 0 */
655 wqe += sizeof (struct mthca_next_seg);
657 if (unlikely(wr->num_sge > srq->max_gs)) {
663 for (i = 0; i < wr->num_sge; ++i) {
664 ((struct mthca_data_seg *) wqe)->byte_count =
665 cpu_to_be32(wr->sg_list[i].length);
666 ((struct mthca_data_seg *) wqe)->lkey =
667 cpu_to_be32(wr->sg_list[i].lkey);
668 ((struct mthca_data_seg *) wqe)->addr =
669 cpu_to_be64(wr->sg_list[i].addr);
670 wqe += sizeof (struct mthca_data_seg);
673 if (i < srq->max_gs) {
674 ((struct mthca_data_seg *) wqe)->byte_count = 0;
675 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
676 ((struct mthca_data_seg *) wqe)->addr = 0;
679 srq->wrid[ind] = wr->wr_id;
680 srq->first_free = next_ind;
684 srq->counter += nreq;
687 * Make sure that descriptors are written before
688 * we write doorbell record.
691 *srq->db = cpu_to_be32(srq->counter);
694 spin_unlock_irqrestore(&srq->lock, flags);
698 int mthca_max_srq_sge(struct mthca_dev *dev)
700 if (mthca_is_memfree(dev))
701 return dev->limits.max_sg;
704 * SRQ allocations are based on powers of 2 for Tavor,
705 * (although they only need to be multiples of 16 bytes).
707 * Therefore, we need to base the max number of sg entries on
708 * the largest power of 2 descriptor size that is <= to the
709 * actual max WQE descriptor size, rather than return the
710 * max_sg value given by the firmware (which is based on WQE
711 * sizes as multiples of 16, not powers of 2).
713 * If SRQ implementation is changed for Tavor to be based on
714 * multiples of 16, the calculation below can be deleted and
715 * the FW max_sg value returned.
717 return min_t(int, dev->limits.max_sg,
718 ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
719 sizeof (struct mthca_next_seg)) /
720 sizeof (struct mthca_data_seg));
723 int mthca_init_srq_table(struct mthca_dev *dev)
727 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
730 spin_lock_init(&dev->srq_table.lock);
732 err = mthca_alloc_init(&dev->srq_table.alloc,
733 dev->limits.num_srqs,
734 dev->limits.num_srqs - 1,
735 dev->limits.reserved_srqs);
739 err = mthca_array_init(&dev->srq_table.srq,
740 dev->limits.num_srqs);
742 mthca_alloc_cleanup(&dev->srq_table.alloc);
747 void mthca_cleanup_srq_table(struct mthca_dev *dev)
749 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
752 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
753 mthca_alloc_cleanup(&dev->srq_table.alloc);