2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/err.h>
34 #include <linux/vmalloc.h>
36 #include "ipath_verbs.h"
39 * ipath_cq_enter - add a new entry to the completion queue
40 * @cq: completion queue
41 * @entry: work completion entry to add
42 * @sig: true if @entry is a solicitated entry
44 * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
46 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
51 spin_lock_irqsave(&cq->lock, flags);
53 if (cq->head == cq->ibcq.cqe)
57 if (unlikely(next == cq->tail)) {
58 spin_unlock_irqrestore(&cq->lock, flags);
59 if (cq->ibcq.event_handler) {
62 ev.device = cq->ibcq.device;
63 ev.element.cq = &cq->ibcq;
64 ev.event = IB_EVENT_CQ_ERR;
65 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
69 cq->queue[cq->head] = *entry;
72 if (cq->notify == IB_CQ_NEXT_COMP ||
73 (cq->notify == IB_CQ_SOLICITED && solicited)) {
74 cq->notify = IB_CQ_NONE;
77 * This will cause send_complete() to be called in
80 tasklet_hi_schedule(&cq->comptask);
83 spin_unlock_irqrestore(&cq->lock, flags);
85 if (entry->status != IB_WC_SUCCESS)
86 to_idev(cq->ibcq.device)->n_wqe_errs++;
90 * ipath_poll_cq - poll for work completion entries
91 * @ibcq: the completion queue to poll
92 * @num_entries: the maximum number of entries to return
93 * @entry: pointer to array where work completions are placed
95 * Returns the number of completion entries polled.
97 * This may be called from interrupt context. Also called by ib_poll_cq()
98 * in the generic verbs code.
100 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
102 struct ipath_cq *cq = to_icq(ibcq);
106 spin_lock_irqsave(&cq->lock, flags);
108 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
109 if (cq->tail == cq->head)
111 *entry = cq->queue[cq->tail];
112 if (cq->tail == cq->ibcq.cqe)
118 spin_unlock_irqrestore(&cq->lock, flags);
123 static void send_complete(unsigned long data)
125 struct ipath_cq *cq = (struct ipath_cq *)data;
128 * The completion handler will most likely rearm the notification
129 * and poll for all pending entries. If a new completion entry
130 * is added while we are in this routine, tasklet_hi_schedule()
131 * won't call us again until we return so we check triggered to
132 * see if we need to call the handler again.
135 u8 triggered = cq->triggered;
137 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
139 if (cq->triggered == triggered)
145 * ipath_create_cq - create a completion queue
146 * @ibdev: the device this completion queue is attached to
147 * @entries: the minimum size of the completion queue
148 * @context: unused by the InfiniPath driver
149 * @udata: unused by the InfiniPath driver
151 * Returns a pointer to the completion queue or negative errno values
154 * Called by ib_create_cq() in the generic verbs code.
156 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
157 struct ib_ucontext *context,
158 struct ib_udata *udata)
165 * Need to use vmalloc() if we want to support large #s of
168 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
170 ret = ERR_PTR(-ENOMEM);
175 * Need to use vmalloc() if we want to support large #s of entries.
177 wc = vmalloc(sizeof(*wc) * (entries + 1));
180 ret = ERR_PTR(-ENOMEM);
184 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
185 * The number of entries should be >= the number requested or return
188 cq->ibcq.cqe = entries;
189 cq->notify = IB_CQ_NONE;
191 spin_lock_init(&cq->lock);
192 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
204 * ipath_destroy_cq - destroy a completion queue
205 * @ibcq: the completion queue to destroy.
207 * Returns 0 for success.
209 * Called by ib_destroy_cq() in the generic verbs code.
211 int ipath_destroy_cq(struct ib_cq *ibcq)
213 struct ipath_cq *cq = to_icq(ibcq);
215 tasklet_kill(&cq->comptask);
223 * ipath_req_notify_cq - change the notification type for a completion queue
224 * @ibcq: the completion queue
225 * @notify: the type of notification to request
227 * Returns 0 for success.
229 * This may be called from interrupt context. Also called by
230 * ib_req_notify_cq() in the generic verbs code.
232 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
234 struct ipath_cq *cq = to_icq(ibcq);
237 spin_lock_irqsave(&cq->lock, flags);
239 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
240 * any other transitions.
242 if (cq->notify != IB_CQ_NEXT_COMP)
244 spin_unlock_irqrestore(&cq->lock, flags);
248 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
250 struct ipath_cq *cq = to_icq(ibcq);
251 struct ib_wc *wc, *old_wc;
256 * Need to use vmalloc() if we want to support large #s of entries.
258 wc = vmalloc(sizeof(*wc) * (cqe + 1));
264 spin_lock_irq(&cq->lock);
265 if (cq->head < cq->tail)
266 n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
268 n = cq->head - cq->tail;
269 if (unlikely((u32)cqe < n)) {
270 spin_unlock_irq(&cq->lock);
275 for (n = 0; cq->tail != cq->head; n++) {
276 wc[n] = cq->queue[cq->tail];
277 if (cq->tail == cq->ibcq.cqe)
287 spin_unlock_irq(&cq->lock);