2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_cq_enter - add a new entry to the completion queue
41 * @cq: completion queue
42 * @entry: work completion entry to add
43 * @sig: true if @entry is a solicitated entry
45 * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
52 spin_lock_irqsave(&cq->lock, flags);
54 if (cq->head == cq->ibcq.cqe)
58 if (unlikely(next == cq->tail)) {
59 spin_unlock_irqrestore(&cq->lock, flags);
60 if (cq->ibcq.event_handler) {
63 ev.device = cq->ibcq.device;
64 ev.element.cq = &cq->ibcq;
65 ev.event = IB_EVENT_CQ_ERR;
66 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
70 cq->queue[cq->head] = *entry;
73 if (cq->notify == IB_CQ_NEXT_COMP ||
74 (cq->notify == IB_CQ_SOLICITED && solicited)) {
75 cq->notify = IB_CQ_NONE;
78 * This will cause send_complete() to be called in
81 tasklet_hi_schedule(&cq->comptask);
84 spin_unlock_irqrestore(&cq->lock, flags);
86 if (entry->status != IB_WC_SUCCESS)
87 to_idev(cq->ibcq.device)->n_wqe_errs++;
91 * ipath_poll_cq - poll for work completion entries
92 * @ibcq: the completion queue to poll
93 * @num_entries: the maximum number of entries to return
94 * @entry: pointer to array where work completions are placed
96 * Returns the number of completion entries polled.
98 * This may be called from interrupt context. Also called by ib_poll_cq()
99 * in the generic verbs code.
101 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
103 struct ipath_cq *cq = to_icq(ibcq);
107 spin_lock_irqsave(&cq->lock, flags);
109 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
110 if (cq->tail == cq->head)
112 *entry = cq->queue[cq->tail];
113 if (cq->tail == cq->ibcq.cqe)
119 spin_unlock_irqrestore(&cq->lock, flags);
124 static void send_complete(unsigned long data)
126 struct ipath_cq *cq = (struct ipath_cq *)data;
129 * The completion handler will most likely rearm the notification
130 * and poll for all pending entries. If a new completion entry
131 * is added while we are in this routine, tasklet_hi_schedule()
132 * won't call us again until we return so we check triggered to
133 * see if we need to call the handler again.
136 u8 triggered = cq->triggered;
138 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
140 if (cq->triggered == triggered)
146 * ipath_create_cq - create a completion queue
147 * @ibdev: the device this completion queue is attached to
148 * @entries: the minimum size of the completion queue
149 * @context: unused by the InfiniPath driver
150 * @udata: unused by the InfiniPath driver
152 * Returns a pointer to the completion queue or negative errno values
155 * Called by ib_create_cq() in the generic verbs code.
157 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
158 struct ib_ucontext *context,
159 struct ib_udata *udata)
166 * Need to use vmalloc() if we want to support large #s of
169 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
171 ret = ERR_PTR(-ENOMEM);
176 * Need to use vmalloc() if we want to support large #s of entries.
178 wc = vmalloc(sizeof(*wc) * (entries + 1));
181 ret = ERR_PTR(-ENOMEM);
185 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
186 * The number of entries should be >= the number requested or return
189 cq->ibcq.cqe = entries;
190 cq->notify = IB_CQ_NONE;
192 spin_lock_init(&cq->lock);
193 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
205 * ipath_destroy_cq - destroy a completion queue
206 * @ibcq: the completion queue to destroy.
208 * Returns 0 for success.
210 * Called by ib_destroy_cq() in the generic verbs code.
212 int ipath_destroy_cq(struct ib_cq *ibcq)
214 struct ipath_cq *cq = to_icq(ibcq);
216 tasklet_kill(&cq->comptask);
224 * ipath_req_notify_cq - change the notification type for a completion queue
225 * @ibcq: the completion queue
226 * @notify: the type of notification to request
228 * Returns 0 for success.
230 * This may be called from interrupt context. Also called by
231 * ib_req_notify_cq() in the generic verbs code.
233 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
235 struct ipath_cq *cq = to_icq(ibcq);
238 spin_lock_irqsave(&cq->lock, flags);
240 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
241 * any other transitions.
243 if (cq->notify != IB_CQ_NEXT_COMP)
245 spin_unlock_irqrestore(&cq->lock, flags);
249 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
251 struct ipath_cq *cq = to_icq(ibcq);
252 struct ib_wc *wc, *old_wc;
257 * Need to use vmalloc() if we want to support large #s of entries.
259 wc = vmalloc(sizeof(*wc) * (cqe + 1));
265 spin_lock_irq(&cq->lock);
266 if (cq->head < cq->tail)
267 n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
269 n = cq->head - cq->tail;
270 if (unlikely((u32)cqe < n)) {
271 spin_unlock_irq(&cq->lock);
276 for (n = 0; cq->tail != cq->head; n++) {
277 wc[n] = cq->queue[cq->tail];
278 if (cq->tail == cq->ibcq.cqe)
288 spin_unlock_irq(&cq->lock);