2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_cq_enter - add a new entry to the completion queue
41 * @cq: completion queue
42 * @entry: work completion entry to add
43 * @sig: true if @entry is a solicitated entry
45 * This may be called with qp->s_lock held.
47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
49 struct ipath_cq_wc *wc;
54 spin_lock_irqsave(&cq->lock, flags);
57 * Note that the head pointer might be writable by user processes.
58 * Take care to verify it is a sane value.
62 if (head >= (unsigned) cq->ibcq.cqe) {
67 if (unlikely(next == wc->tail)) {
68 spin_unlock_irqrestore(&cq->lock, flags);
69 if (cq->ibcq.event_handler) {
72 ev.device = cq->ibcq.device;
73 ev.element.cq = &cq->ibcq;
74 ev.event = IB_EVENT_CQ_ERR;
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
80 wc->uqueue[head].wr_id = entry->wr_id;
81 wc->uqueue[head].status = entry->status;
82 wc->uqueue[head].opcode = entry->opcode;
83 wc->uqueue[head].vendor_err = entry->vendor_err;
84 wc->uqueue[head].byte_len = entry->byte_len;
85 wc->uqueue[head].imm_data = (__u32 __force)entry->imm_data;
86 wc->uqueue[head].qp_num = entry->qp->qp_num;
87 wc->uqueue[head].src_qp = entry->src_qp;
88 wc->uqueue[head].wc_flags = entry->wc_flags;
89 wc->uqueue[head].pkey_index = entry->pkey_index;
90 wc->uqueue[head].slid = entry->slid;
91 wc->uqueue[head].sl = entry->sl;
92 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
93 wc->uqueue[head].port_num = entry->port_num;
94 /* Make sure entry is written before the head index. */
97 wc->kqueue[head] = *entry;
100 if (cq->notify == IB_CQ_NEXT_COMP ||
101 (cq->notify == IB_CQ_SOLICITED && solicited)) {
102 cq->notify = IB_CQ_NONE;
105 * This will cause send_complete() to be called in
108 tasklet_hi_schedule(&cq->comptask);
111 spin_unlock_irqrestore(&cq->lock, flags);
113 if (entry->status != IB_WC_SUCCESS)
114 to_idev(cq->ibcq.device)->n_wqe_errs++;
118 * ipath_poll_cq - poll for work completion entries
119 * @ibcq: the completion queue to poll
120 * @num_entries: the maximum number of entries to return
121 * @entry: pointer to array where work completions are placed
123 * Returns the number of completion entries polled.
125 * This may be called from interrupt context. Also called by ib_poll_cq()
126 * in the generic verbs code.
128 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
130 struct ipath_cq *cq = to_icq(ibcq);
131 struct ipath_cq_wc *wc;
136 /* The kernel can only poll a kernel completion queue */
142 spin_lock_irqsave(&cq->lock, flags);
146 if (tail > (u32) cq->ibcq.cqe)
147 tail = (u32) cq->ibcq.cqe;
148 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
149 if (tail == wc->head)
151 /* The kernel doesn't need a RMB since it has the lock. */
152 *entry = wc->kqueue[tail];
153 if (tail >= cq->ibcq.cqe)
160 spin_unlock_irqrestore(&cq->lock, flags);
166 static void send_complete(unsigned long data)
168 struct ipath_cq *cq = (struct ipath_cq *)data;
171 * The completion handler will most likely rearm the notification
172 * and poll for all pending entries. If a new completion entry
173 * is added while we are in this routine, tasklet_hi_schedule()
174 * won't call us again until we return so we check triggered to
175 * see if we need to call the handler again.
178 u8 triggered = cq->triggered;
180 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
182 if (cq->triggered == triggered)
188 * ipath_create_cq - create a completion queue
189 * @ibdev: the device this completion queue is attached to
190 * @entries: the minimum size of the completion queue
191 * @context: unused by the InfiniPath driver
192 * @udata: unused by the InfiniPath driver
194 * Returns a pointer to the completion queue or negative errno values
197 * Called by ib_create_cq() in the generic verbs code.
199 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
200 struct ib_ucontext *context,
201 struct ib_udata *udata)
203 struct ipath_ibdev *dev = to_idev(ibdev);
205 struct ipath_cq_wc *wc;
209 if (entries < 1 || entries > ib_ipath_max_cqes) {
210 ret = ERR_PTR(-EINVAL);
214 /* Allocate the completion queue structure. */
215 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
217 ret = ERR_PTR(-ENOMEM);
222 * Allocate the completion queue entries and head/tail pointers.
223 * This is allocated separately so that it can be resized and
224 * also mapped into user space.
225 * We need to use vmalloc() in order to support mmap and large
226 * numbers of entries.
229 if (udata && udata->outlen >= sizeof(__u64))
230 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
232 sz += sizeof(struct ib_wc) * (entries + 1);
233 wc = vmalloc_user(sz);
235 ret = ERR_PTR(-ENOMEM);
240 * Return the address of the WC as the offset to mmap.
241 * See ipath_mmap() for details.
243 if (udata && udata->outlen >= sizeof(__u64)) {
246 cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
248 ret = ERR_PTR(-ENOMEM);
252 err = ib_copy_to_udata(udata, &cq->ip->offset,
253 sizeof(cq->ip->offset));
261 spin_lock(&dev->n_cqs_lock);
262 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
263 spin_unlock(&dev->n_cqs_lock);
264 ret = ERR_PTR(-ENOMEM);
268 dev->n_cqs_allocated++;
269 spin_unlock(&dev->n_cqs_lock);
272 spin_lock_irq(&dev->pending_lock);
273 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
274 spin_unlock_irq(&dev->pending_lock);
278 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
279 * The number of entries should be >= the number requested or return
282 cq->ibcq.cqe = entries;
283 cq->notify = IB_CQ_NONE;
285 spin_lock_init(&cq->lock);
286 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
306 * ipath_destroy_cq - destroy a completion queue
307 * @ibcq: the completion queue to destroy.
309 * Returns 0 for success.
311 * Called by ib_destroy_cq() in the generic verbs code.
313 int ipath_destroy_cq(struct ib_cq *ibcq)
315 struct ipath_ibdev *dev = to_idev(ibcq->device);
316 struct ipath_cq *cq = to_icq(ibcq);
318 tasklet_kill(&cq->comptask);
319 spin_lock(&dev->n_cqs_lock);
320 dev->n_cqs_allocated--;
321 spin_unlock(&dev->n_cqs_lock);
323 kref_put(&cq->ip->ref, ipath_release_mmap_info);
332 * ipath_req_notify_cq - change the notification type for a completion queue
333 * @ibcq: the completion queue
334 * @notify_flags: the type of notification to request
336 * Returns 0 for success.
338 * This may be called from interrupt context. Also called by
339 * ib_req_notify_cq() in the generic verbs code.
341 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
343 struct ipath_cq *cq = to_icq(ibcq);
347 spin_lock_irqsave(&cq->lock, flags);
349 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
350 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
352 if (cq->notify != IB_CQ_NEXT_COMP)
353 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
355 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
356 cq->queue->head != cq->queue->tail)
359 spin_unlock_irqrestore(&cq->lock, flags);
365 * ipath_resize_cq - change the size of the CQ
366 * @ibcq: the completion queue
368 * Returns 0 for success.
370 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
372 struct ipath_cq *cq = to_icq(ibcq);
373 struct ipath_cq_wc *old_wc;
374 struct ipath_cq_wc *wc;
379 if (cqe < 1 || cqe > ib_ipath_max_cqes) {
385 * Need to use vmalloc() if we want to support large #s of entries.
388 if (udata && udata->outlen >= sizeof(__u64))
389 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
391 sz += sizeof(struct ib_wc) * (cqe + 1);
392 wc = vmalloc_user(sz);
399 * Return the address of the WC as the offset to mmap.
400 * See ipath_mmap() for details.
402 if (udata && udata->outlen >= sizeof(__u64)) {
403 __u64 offset = (__u64) wc;
405 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
410 spin_lock_irq(&cq->lock);
412 * Make sure head and tail are sane since they
413 * might be user writable.
417 if (head > (u32) cq->ibcq.cqe)
418 head = (u32) cq->ibcq.cqe;
420 if (tail > (u32) cq->ibcq.cqe)
421 tail = (u32) cq->ibcq.cqe;
423 n = cq->ibcq.cqe + 1 + head - tail;
426 if (unlikely((u32)cqe < n)) {
427 spin_unlock_irq(&cq->lock);
432 for (n = 0; tail != head; n++) {
434 wc->uqueue[n] = old_wc->uqueue[tail];
436 wc->kqueue[n] = old_wc->kqueue[tail];
437 if (tail == (u32) cq->ibcq.cqe)
446 spin_unlock_irq(&cq->lock);
451 struct ipath_ibdev *dev = to_idev(ibcq->device);
452 struct ipath_mmap_info *ip = cq->ip;
454 ipath_update_mmap_info(dev, ip, sz, wc);
455 spin_lock_irq(&dev->pending_lock);
456 if (list_empty(&ip->pending_mmaps))
457 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
458 spin_unlock_irq(&dev->pending_lock);