Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / drivers / infiniband / hw / ehca / ehca_cq.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  Completion queue handling
5  *
6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
7  *           Khadija Souissi <souissi@de.ibm.com>
8  *           Reinhard Ernst <rernst@de.ibm.com>
9  *           Heiko J Schick <schickhj@de.ibm.com>
10  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11  *
12  *
13  *  Copyright (c) 2005 IBM Corporation
14  *
15  *  All rights reserved.
16  *
17  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
18  *  BSD.
19  *
20  * OpenIB BSD License
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions are met:
24  *
25  * Redistributions of source code must retain the above copyright notice, this
26  * list of conditions and the following disclaimer.
27  *
28  * Redistributions in binary form must reproduce the above copyright notice,
29  * this list of conditions and the following disclaimer in the documentation
30  * and/or other materials
31  * provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43  * POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 #include <asm/current.h>
47
48 #include "ehca_iverbs.h"
49 #include "ehca_classes.h"
50 #include "ehca_irq.h"
51 #include "hcp_if.h"
52
53 static struct kmem_cache *cq_cache;
54
55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56 {
57         unsigned int qp_num = qp->real_qp_num;
58         unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59         unsigned long spl_flags;
60
61         spin_lock_irqsave(&cq->spinlock, spl_flags);
62         hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63         spin_unlock_irqrestore(&cq->spinlock, spl_flags);
64
65         ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66                  cq->cq_number, qp_num);
67
68         return 0;
69 }
70
71 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72 {
73         int ret = -EINVAL;
74         unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75         struct hlist_node *iter;
76         struct ehca_qp *qp;
77         unsigned long spl_flags;
78
79         spin_lock_irqsave(&cq->spinlock, spl_flags);
80         hlist_for_each(iter, &cq->qp_hashtab[key]) {
81                 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82                 if (qp->real_qp_num == real_qp_num) {
83                         hlist_del(iter);
84                         ehca_dbg(cq->ib_cq.device,
85                                  "removed qp from cq .cq_num=%x real_qp_num=%x",
86                                  cq->cq_number, real_qp_num);
87                         ret = 0;
88                         break;
89                 }
90         }
91         spin_unlock_irqrestore(&cq->spinlock, spl_flags);
92         if (ret)
93                 ehca_err(cq->ib_cq.device,
94                          "qp not found cq_num=%x real_qp_num=%x",
95                          cq->cq_number, real_qp_num);
96
97         return ret;
98 }
99
100 struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101 {
102         struct ehca_qp *ret = NULL;
103         unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104         struct hlist_node *iter;
105         struct ehca_qp *qp;
106         hlist_for_each(iter, &cq->qp_hashtab[key]) {
107                 qp = hlist_entry(iter, struct ehca_qp, list_entries);
108                 if (qp->real_qp_num == real_qp_num) {
109                         ret = qp;
110                         break;
111                 }
112         }
113         return ret;
114 }
115
116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
117                              struct ib_ucontext *context,
118                              struct ib_udata *udata)
119 {
120         static const u32 additional_cqe = 20;
121         struct ib_cq *cq;
122         struct ehca_cq *my_cq;
123         struct ehca_shca *shca =
124                 container_of(device, struct ehca_shca, ib_device);
125         struct ipz_adapter_handle adapter_handle;
126         struct ehca_alloc_cq_parms param; /* h_call's out parameters */
127         struct h_galpa gal;
128         void *vpage;
129         u32 counter;
130         u64 rpage, cqx_fec, h_ret;
131         int ipz_rc, ret, i;
132         unsigned long flags;
133
134         if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
135                 return ERR_PTR(-EINVAL);
136
137         my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
138         if (!my_cq) {
139                 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
140                          device);
141                 return ERR_PTR(-ENOMEM);
142         }
143
144         memset(my_cq, 0, sizeof(struct ehca_cq));
145         memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
146
147         spin_lock_init(&my_cq->spinlock);
148         spin_lock_init(&my_cq->cb_lock);
149         spin_lock_init(&my_cq->task_lock);
150         my_cq->ownpid = current->tgid;
151
152         cq = &my_cq->ib_cq;
153
154         adapter_handle = shca->ipz_hca_handle;
155         param.eq_handle = shca->eq.ipz_eq_handle;
156
157         do {
158                 if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
159                         cq = ERR_PTR(-ENOMEM);
160                         ehca_err(device, "Can't reserve idr nr. device=%p",
161                                  device);
162                         goto create_cq_exit1;
163                 }
164
165                 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
166                 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
167                 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
168
169         } while (ret == -EAGAIN);
170
171         if (ret) {
172                 cq = ERR_PTR(-ENOMEM);
173                 ehca_err(device, "Can't allocate new idr entry. device=%p",
174                          device);
175                 goto create_cq_exit1;
176         }
177
178         /*
179          * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
180          * for receiving errors CQEs.
181          */
182         param.nr_cqe = cqe + additional_cqe;
183         h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
184
185         if (h_ret != H_SUCCESS) {
186                 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
187                          "h_ret=%lx device=%p", h_ret, device);
188                 cq = ERR_PTR(ehca2ib_return_code(h_ret));
189                 goto create_cq_exit2;
190         }
191
192         ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
193                                 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
194         if (!ipz_rc) {
195                 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
196                          ipz_rc, device);
197                 cq = ERR_PTR(-EINVAL);
198                 goto create_cq_exit3;
199         }
200
201         for (counter = 0; counter < param.act_pages; counter++) {
202                 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
203                 if (!vpage) {
204                         ehca_err(device, "ipz_qpageit_get_inc() "
205                                  "returns NULL device=%p", device);
206                         cq = ERR_PTR(-EAGAIN);
207                         goto create_cq_exit4;
208                 }
209                 rpage = virt_to_abs(vpage);
210
211                 h_ret = hipz_h_register_rpage_cq(adapter_handle,
212                                                  my_cq->ipz_cq_handle,
213                                                  &my_cq->pf,
214                                                  0,
215                                                  0,
216                                                  rpage,
217                                                  1,
218                                                  my_cq->galpas.
219                                                  kernel);
220
221                 if (h_ret < H_SUCCESS) {
222                         ehca_err(device, "hipz_h_register_rpage_cq() failed "
223                                  "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
224                                  "act_pages=%i", my_cq, my_cq->cq_number,
225                                  h_ret, counter, param.act_pages);
226                         cq = ERR_PTR(-EINVAL);
227                         goto create_cq_exit4;
228                 }
229
230                 if (counter == (param.act_pages - 1)) {
231                         vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
232                         if ((h_ret != H_SUCCESS) || vpage) {
233                                 ehca_err(device, "Registration of pages not "
234                                          "complete ehca_cq=%p cq_num=%x "
235                                          "h_ret=%lx", my_cq, my_cq->cq_number,
236                                          h_ret);
237                                 cq = ERR_PTR(-EAGAIN);
238                                 goto create_cq_exit4;
239                         }
240                 } else {
241                         if (h_ret != H_PAGE_REGISTERED) {
242                                 ehca_err(device, "Registration of page failed "
243                                          "ehca_cq=%p cq_num=%x h_ret=%lx"
244                                          "counter=%i act_pages=%i",
245                                          my_cq, my_cq->cq_number,
246                                          h_ret, counter, param.act_pages);
247                                 cq = ERR_PTR(-ENOMEM);
248                                 goto create_cq_exit4;
249                         }
250                 }
251         }
252
253         ipz_qeit_reset(&my_cq->ipz_queue);
254
255         gal = my_cq->galpas.kernel;
256         cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
257         ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
258                  my_cq, my_cq->cq_number, cqx_fec);
259
260         my_cq->ib_cq.cqe = my_cq->nr_of_entries =
261                 param.act_nr_of_entries - additional_cqe;
262         my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
263
264         for (i = 0; i < QP_HASHTAB_LEN; i++)
265                 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
266
267         if (context) {
268                 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269                 struct ehca_create_cq_resp resp;
270                 struct vm_area_struct *vma;
271                 memset(&resp, 0, sizeof(resp));
272                 resp.cq_number = my_cq->cq_number;
273                 resp.token = my_cq->token;
274                 resp.ipz_queue.qe_size = ipz_queue->qe_size;
275                 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
276                 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277                 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278                 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279                 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280                                        ipz_queue->queue_length,
281                                        (void**)&resp.ipz_queue.queue,
282                                        &vma);
283                 if (ret) {
284                         ehca_err(device, "Could not mmap queue pages");
285                         cq = ERR_PTR(ret);
286                         goto create_cq_exit4;
287                 }
288                 my_cq->uspace_queue = resp.ipz_queue.queue;
289                 resp.galpas = my_cq->galpas;
290                 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291                                          (void**)&resp.galpas.kernel.fw_handle,
292                                          &vma);
293                 if (ret) {
294                         ehca_err(device, "Could not mmap fw_handle");
295                         cq = ERR_PTR(ret);
296                         goto create_cq_exit5;
297                 }
298                 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299                 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300                         ehca_err(device, "Copy to udata failed.");
301                         goto create_cq_exit6;
302                 }
303         }
304
305         return cq;
306
307 create_cq_exit6:
308         ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310 create_cq_exit5:
311         ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313 create_cq_exit4:
314         ipz_queue_dtor(&my_cq->ipz_queue);
315
316 create_cq_exit3:
317         h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
318         if (h_ret != H_SUCCESS)
319                 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
320                          "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
321
322 create_cq_exit2:
323         spin_lock_irqsave(&ehca_cq_idr_lock, flags);
324         idr_remove(&ehca_cq_idr, my_cq->token);
325         spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
326
327 create_cq_exit1:
328         kmem_cache_free(cq_cache, my_cq);
329
330         return cq;
331 }
332
333 int ehca_destroy_cq(struct ib_cq *cq)
334 {
335         u64 h_ret;
336         int ret;
337         struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338         int cq_num = my_cq->cq_number;
339         struct ib_device *device = cq->device;
340         struct ehca_shca *shca = container_of(device, struct ehca_shca,
341                                               ib_device);
342         struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
343         u32 cur_pid = current->tgid;
344         unsigned long flags;
345
346         spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347         while (my_cq->nr_callbacks) {
348                 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
349                 yield();
350                 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
351         }
352
353         idr_remove(&ehca_cq_idr, my_cq->token);
354         spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355
356         if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357                 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358                          cur_pid, my_cq->ownpid);
359                 return -EINVAL;
360         }
361
362         /* un-mmap if vma alloc */
363         if (my_cq->uspace_queue ) {
364                 ret = ehca_munmap(my_cq->uspace_queue,
365                                   my_cq->ipz_queue.queue_length);
366                 if (ret)
367                         ehca_err(device, "Could not munmap queue ehca_cq=%p "
368                                  "cq_num=%x", my_cq, cq_num);
369                 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370                 if (ret)
371                         ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372                                  "cq_num=%x", my_cq, cq_num);
373         }
374
375         h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376         if (h_ret == H_R_STATE) {
377                 /* cq in err: read err data and destroy it forcibly */
378                 ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
379                          "state. Try to delete it forcibly.",
380                          my_cq, cq_num, my_cq->ipz_cq_handle.handle);
381                 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
382                 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
383                 if (h_ret == H_SUCCESS)
384                         ehca_dbg(device, "cq_num=%x deleted successfully.",
385                                  cq_num);
386         }
387         if (h_ret != H_SUCCESS) {
388                 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
389                          "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
390                 return ehca2ib_return_code(h_ret);
391         }
392         ipz_queue_dtor(&my_cq->ipz_queue);
393         kmem_cache_free(cq_cache, my_cq);
394
395         return 0;
396 }
397
398 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
399 {
400         struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401         u32 cur_pid = current->tgid;
402
403         if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
404                 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405                          cur_pid, my_cq->ownpid);
406                 return -EINVAL;
407         }
408
409         /* TODO: proper resize needs to be done */
410         ehca_err(cq->device, "not implemented yet");
411
412         return -EFAULT;
413 }
414
415 int ehca_init_cq_cache(void)
416 {
417         cq_cache = kmem_cache_create("ehca_cache_cq",
418                                      sizeof(struct ehca_cq), 0,
419                                      SLAB_HWCACHE_ALIGN,
420                                      NULL, NULL);
421         if (!cq_cache)
422                 return -ENOMEM;
423         return 0;
424 }
425
426 void ehca_cleanup_cq_cache(void)
427 {
428         if (cq_cache)
429                 kmem_cache_destroy(cq_cache);
430 }