Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_srq.c
1 /*
2  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/err.h>
34 #include <linux/vmalloc.h>
35
36 #include "ipath_verbs.h"
37
38 /**
39  * ipath_post_srq_receive - post a receive on a shared receive queue
40  * @ibsrq: the SRQ to post the receive on
41  * @wr: the list of work requests to post
42  * @bad_wr: the first WR to cause a problem is put here
43  *
44  * This may be called from interrupt context.
45  */
46 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
47                            struct ib_recv_wr **bad_wr)
48 {
49         struct ipath_srq *srq = to_isrq(ibsrq);
50         struct ipath_ibdev *dev = to_idev(ibsrq->device);
51         unsigned long flags;
52         int ret;
53
54         for (; wr; wr = wr->next) {
55                 struct ipath_rwqe *wqe;
56                 u32 next;
57                 int i, j;
58
59                 if (wr->num_sge > srq->rq.max_sge) {
60                         *bad_wr = wr;
61                         ret = -ENOMEM;
62                         goto bail;
63                 }
64
65                 spin_lock_irqsave(&srq->rq.lock, flags);
66                 next = srq->rq.head + 1;
67                 if (next >= srq->rq.size)
68                         next = 0;
69                 if (next == srq->rq.tail) {
70                         spin_unlock_irqrestore(&srq->rq.lock, flags);
71                         *bad_wr = wr;
72                         ret = -ENOMEM;
73                         goto bail;
74                 }
75
76                 wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
77                 wqe->wr_id = wr->wr_id;
78                 wqe->sg_list[0].mr = NULL;
79                 wqe->sg_list[0].vaddr = NULL;
80                 wqe->sg_list[0].length = 0;
81                 wqe->sg_list[0].sge_length = 0;
82                 wqe->length = 0;
83                 for (i = 0, j = 0; i < wr->num_sge; i++) {
84                         /* Check LKEY */
85                         if (to_ipd(srq->ibsrq.pd)->user &&
86                             wr->sg_list[i].lkey == 0) {
87                                 spin_unlock_irqrestore(&srq->rq.lock,
88                                                        flags);
89                                 *bad_wr = wr;
90                                 ret = -EINVAL;
91                                 goto bail;
92                         }
93                         if (wr->sg_list[i].length == 0)
94                                 continue;
95                         if (!ipath_lkey_ok(&dev->lk_table,
96                                            &wqe->sg_list[j],
97                                            &wr->sg_list[i],
98                                            IB_ACCESS_LOCAL_WRITE)) {
99                                 spin_unlock_irqrestore(&srq->rq.lock,
100                                                        flags);
101                                 *bad_wr = wr;
102                                 ret = -EINVAL;
103                                 goto bail;
104                         }
105                         wqe->length += wr->sg_list[i].length;
106                         j++;
107                 }
108                 wqe->num_sge = j;
109                 srq->rq.head = next;
110                 spin_unlock_irqrestore(&srq->rq.lock, flags);
111         }
112         ret = 0;
113
114 bail:
115         return ret;
116 }
117
118 /**
119  * ipath_create_srq - create a shared receive queue
120  * @ibpd: the protection domain of the SRQ to create
121  * @attr: the attributes of the SRQ
122  * @udata: not used by the InfiniPath verbs driver
123  */
124 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
125                                 struct ib_srq_init_attr *srq_init_attr,
126                                 struct ib_udata *udata)
127 {
128         struct ipath_srq *srq;
129         u32 sz;
130         struct ib_srq *ret;
131
132         if (srq_init_attr->attr.max_sge < 1) {
133                 ret = ERR_PTR(-EINVAL);
134                 goto bail;
135         }
136
137         srq = kmalloc(sizeof(*srq), GFP_KERNEL);
138         if (!srq) {
139                 ret = ERR_PTR(-ENOMEM);
140                 goto bail;
141         }
142
143         /*
144          * Need to use vmalloc() if we want to support large #s of entries.
145          */
146         srq->rq.size = srq_init_attr->attr.max_wr + 1;
147         sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge +
148                 sizeof(struct ipath_rwqe);
149         srq->rq.wq = vmalloc(srq->rq.size * sz);
150         if (!srq->rq.wq) {
151                 kfree(srq);
152                 ret = ERR_PTR(-ENOMEM);
153                 goto bail;
154         }
155
156         /*
157          * ib_create_srq() will initialize srq->ibsrq.
158          */
159         spin_lock_init(&srq->rq.lock);
160         srq->rq.head = 0;
161         srq->rq.tail = 0;
162         srq->rq.max_sge = srq_init_attr->attr.max_sge;
163         srq->limit = srq_init_attr->attr.srq_limit;
164
165         ret = &srq->ibsrq;
166
167 bail:
168         return ret;
169 }
170
171 /**
172  * ipath_modify_srq - modify a shared receive queue
173  * @ibsrq: the SRQ to modify
174  * @attr: the new attributes of the SRQ
175  * @attr_mask: indicates which attributes to modify
176  */
177 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
178                      enum ib_srq_attr_mask attr_mask)
179 {
180         struct ipath_srq *srq = to_isrq(ibsrq);
181         unsigned long flags;
182         int ret;
183
184         if (attr_mask & IB_SRQ_LIMIT) {
185                 spin_lock_irqsave(&srq->rq.lock, flags);
186                 srq->limit = attr->srq_limit;
187                 spin_unlock_irqrestore(&srq->rq.lock, flags);
188         }
189         if (attr_mask & IB_SRQ_MAX_WR) {
190                 u32 size = attr->max_wr + 1;
191                 struct ipath_rwqe *wq, *p;
192                 u32 n;
193                 u32 sz;
194
195                 if (attr->max_sge < srq->rq.max_sge) {
196                         ret = -EINVAL;
197                         goto bail;
198                 }
199
200                 sz = sizeof(struct ipath_rwqe) +
201                         attr->max_sge * sizeof(struct ipath_sge);
202                 wq = vmalloc(size * sz);
203                 if (!wq) {
204                         ret = -ENOMEM;
205                         goto bail;
206                 }
207
208                 spin_lock_irqsave(&srq->rq.lock, flags);
209                 if (srq->rq.head < srq->rq.tail)
210                         n = srq->rq.size + srq->rq.head - srq->rq.tail;
211                 else
212                         n = srq->rq.head - srq->rq.tail;
213                 if (size <= n || size <= srq->limit) {
214                         spin_unlock_irqrestore(&srq->rq.lock, flags);
215                         vfree(wq);
216                         ret = -EINVAL;
217                         goto bail;
218                 }
219                 n = 0;
220                 p = wq;
221                 while (srq->rq.tail != srq->rq.head) {
222                         struct ipath_rwqe *wqe;
223                         int i;
224
225                         wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
226                         p->wr_id = wqe->wr_id;
227                         p->length = wqe->length;
228                         p->num_sge = wqe->num_sge;
229                         for (i = 0; i < wqe->num_sge; i++)
230                                 p->sg_list[i] = wqe->sg_list[i];
231                         n++;
232                         p = (struct ipath_rwqe *)((char *) p + sz);
233                         if (++srq->rq.tail >= srq->rq.size)
234                                 srq->rq.tail = 0;
235                 }
236                 vfree(srq->rq.wq);
237                 srq->rq.wq = wq;
238                 srq->rq.size = size;
239                 srq->rq.head = n;
240                 srq->rq.tail = 0;
241                 srq->rq.max_sge = attr->max_sge;
242                 spin_unlock_irqrestore(&srq->rq.lock, flags);
243         }
244
245         ret = 0;
246
247 bail:
248         return ret;
249 }
250
251 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
252 {
253         struct ipath_srq *srq = to_isrq(ibsrq);
254
255         attr->max_wr = srq->rq.size - 1;
256         attr->max_sge = srq->rq.max_sge;
257         attr->srq_limit = srq->limit;
258         return 0;
259 }
260
261 /**
262  * ipath_destroy_srq - destroy a shared receive queue
263  * @ibsrq: the SRQ to destroy
264  */
265 int ipath_destroy_srq(struct ib_srq *ibsrq)
266 {
267         struct ipath_srq *srq = to_isrq(ibsrq);
268
269         vfree(srq->rq.wq);
270         kfree(srq);
271
272         return 0;
273 }