Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6] / drivers / infiniband / hw / ehca / ehca_uverbs.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  userspace support verbs
5  *
6  *  Authors: Christoph Raisch <raisch@de.ibm.com>
7  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8  *           Heiko J Schick <schickhj@de.ibm.com>
9  *
10  *  Copyright (c) 2005 IBM Corporation
11  *
12  *  All rights reserved.
13  *
14  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15  *  BSD.
16  *
17  * OpenIB BSD License
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions are met:
21  *
22  * Redistributions of source code must retain the above copyright notice, this
23  * list of conditions and the following disclaimer.
24  *
25  * Redistributions in binary form must reproduce the above copyright notice,
26  * this list of conditions and the following disclaimer in the documentation
27  * and/or other materials
28  * provided with the distribution.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42
43 #include <asm/current.h>
44
45 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "ehca_tools.h"
49 #include "hcp_if.h"
50
51 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52                                         struct ib_udata *udata)
53 {
54         struct ehca_ucontext *my_context;
55
56         my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
57         if (!my_context) {
58                 ehca_err(device, "Out of memory device=%p", device);
59                 return ERR_PTR(-ENOMEM);
60         }
61
62         return &my_context->ib_ucontext;
63 }
64
65 int ehca_dealloc_ucontext(struct ib_ucontext *context)
66 {
67         kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
68         return 0;
69 }
70
71 struct page *ehca_nopage(struct vm_area_struct *vma,
72                          unsigned long address, int *type)
73 {
74         struct page *mypage = NULL;
75         u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
76         u32 idr_handle = fileoffset >> 32;
77         u32 q_type = (fileoffset >> 28) & 0xF;    /* CQ, QP,...        */
78         u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
79         u32 cur_pid = current->tgid;
80         unsigned long flags;
81         struct ehca_cq *cq;
82         struct ehca_qp *qp;
83         struct ehca_pd *pd;
84         u64 offset;
85         void *vaddr;
86
87         switch (q_type) {
88         case 1: /* CQ */
89                 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
90                 cq = idr_find(&ehca_cq_idr, idr_handle);
91                 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
92
93                 /* make sure this mmap really belongs to the authorized user */
94                 if (!cq) {
95                         ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
96                         return NOPAGE_SIGBUS;
97                 }
98
99                 if (cq->ownpid != cur_pid) {
100                         ehca_err(cq->ib_cq.device,
101                                  "Invalid caller pid=%x ownpid=%x",
102                                  cur_pid, cq->ownpid);
103                         return NOPAGE_SIGBUS;
104                 }
105
106                 if (rsrc_type == 2) {
107                         ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
108                         offset = address - vma->vm_start;
109                         vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
110                         ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
111                                  offset, vaddr);
112                         mypage = virt_to_page(vaddr);
113                 }
114                 break;
115
116         case 2: /* QP */
117                 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
118                 qp = idr_find(&ehca_qp_idr, idr_handle);
119                 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
120
121                 /* make sure this mmap really belongs to the authorized user */
122                 if (!qp) {
123                         ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
124                         return NOPAGE_SIGBUS;
125                 }
126
127                 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
128                 if (pd->ownpid != cur_pid) {
129                         ehca_err(qp->ib_qp.device,
130                                  "Invalid caller pid=%x ownpid=%x",
131                                  cur_pid, pd->ownpid);
132                         return NOPAGE_SIGBUS;
133                 }
134
135                 if (rsrc_type == 2) {   /* rqueue */
136                         ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
137                         offset = address - vma->vm_start;
138                         vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
139                         ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
140                                  offset, vaddr);
141                         mypage = virt_to_page(vaddr);
142                 } else if (rsrc_type == 3) {    /* squeue */
143                         ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
144                         offset = address - vma->vm_start;
145                         vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146                         ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147                                  offset, vaddr);
148                         mypage = virt_to_page(vaddr);
149                 }
150                 break;
151
152         default:
153                 ehca_gen_err("bad queue type %x", q_type);
154                 return NOPAGE_SIGBUS;
155         }
156
157         if (!mypage) {
158                 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159                 return NOPAGE_SIGBUS;
160         }
161         get_page(mypage);
162
163         return mypage;
164 }
165
166 static struct vm_operations_struct ehcau_vm_ops = {
167         .nopage = ehca_nopage,
168 };
169
170 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171 {
172         u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
173         u32 idr_handle = fileoffset >> 32;
174         u32 q_type = (fileoffset >> 28) & 0xF;    /* CQ, QP,...        */
175         u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176         u32 cur_pid = current->tgid;
177         u32 ret;
178         u64 vsize, physical;
179         unsigned long flags;
180         struct ehca_cq *cq;
181         struct ehca_qp *qp;
182         struct ehca_pd *pd;
183
184         switch (q_type) {
185         case  1: /* CQ */
186                 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
187                 cq = idr_find(&ehca_cq_idr, idr_handle);
188                 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
189
190                 /* make sure this mmap really belongs to the authorized user */
191                 if (!cq)
192                         return -EINVAL;
193
194                 if (cq->ownpid != cur_pid) {
195                         ehca_err(cq->ib_cq.device,
196                                  "Invalid caller pid=%x ownpid=%x",
197                                  cur_pid, cq->ownpid);
198                         return -ENOMEM;
199                 }
200
201                 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202                         return -EINVAL;
203
204                 switch (rsrc_type) {
205                 case 1: /* galpa fw handle */
206                         ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
207                         vma->vm_flags |= VM_RESERVED;
208                         vsize = vma->vm_end - vma->vm_start;
209                         if (vsize != EHCA_PAGESIZE) {
210                                 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211                                          vma->vm_end - vma->vm_start);
212                                 return -EINVAL;
213                         }
214
215                         physical = cq->galpas.user.fw_handle;
216                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217                         vma->vm_flags |= VM_IO | VM_RESERVED;
218
219                         ehca_dbg(cq->ib_cq.device,
220                                  "vsize=%lx physical=%lx", vsize, physical);
221                         ret = remap_pfn_range(vma, vma->vm_start,
222                                               physical >> PAGE_SHIFT, vsize,
223                                               vma->vm_page_prot);
224                         if (ret) {
225                                 ehca_err(cq->ib_cq.device,
226                                          "remap_pfn_range() failed ret=%x",
227                                          ret);
228                                 return -ENOMEM;
229                         }
230                         break;
231
232                 case 2: /* cq queue_addr */
233                         ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234                         vma->vm_flags |= VM_RESERVED;
235                         vma->vm_ops = &ehcau_vm_ops;
236                         break;
237
238                 default:
239                         ehca_err(cq->ib_cq.device, "bad resource type %x",
240                                  rsrc_type);
241                         return -EINVAL;
242                 }
243                 break;
244
245         case 2: /* QP */
246                 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
247                 qp = idr_find(&ehca_qp_idr, idr_handle);
248                 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
249
250                 /* make sure this mmap really belongs to the authorized user */
251                 if (!qp)
252                         return -EINVAL;
253
254                 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
255                 if (pd->ownpid != cur_pid) {
256                         ehca_err(qp->ib_qp.device,
257                                  "Invalid caller pid=%x ownpid=%x",
258                                  cur_pid, pd->ownpid);
259                         return -ENOMEM;
260                 }
261
262                 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263                         return -EINVAL;
264
265                 switch (rsrc_type) {
266                 case 1: /* galpa fw handle */
267                         ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
268                         vma->vm_flags |= VM_RESERVED;
269                         vsize = vma->vm_end - vma->vm_start;
270                         if (vsize != EHCA_PAGESIZE) {
271                                 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272                                          vma->vm_end - vma->vm_start);
273                                 return -EINVAL;
274                         }
275
276                         physical = qp->galpas.user.fw_handle;
277                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278                         vma->vm_flags |= VM_IO | VM_RESERVED;
279
280                         ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281                                  vsize, physical);
282                         ret = remap_pfn_range(vma, vma->vm_start,
283                                               physical >> PAGE_SHIFT, vsize,
284                                               vma->vm_page_prot);
285                         if (ret) {
286                                 ehca_err(qp->ib_qp.device,
287                                          "remap_pfn_range() failed ret=%x",
288                                          ret);
289                                 return -ENOMEM;
290                         }
291                         break;
292
293                 case 2: /* qp rqueue_addr */
294                         ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295                         vma->vm_flags |= VM_RESERVED;
296                         vma->vm_ops = &ehcau_vm_ops;
297                         break;
298
299                 case 3: /* qp squeue_addr */
300                         ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301                         vma->vm_flags |= VM_RESERVED;
302                         vma->vm_ops = &ehcau_vm_ops;
303                         break;
304
305                 default:
306                         ehca_err(qp->ib_qp.device, "bad resource type %x",
307                                  rsrc_type);
308                         return -EINVAL;
309                 }
310                 break;
311
312         default:
313                 ehca_gen_err("bad queue type %x", q_type);
314                 return -EINVAL;
315         }
316
317         return 0;
318 }
319
320 int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321                      struct vm_area_struct **vma)
322 {
323         down_write(&current->mm->mmap_sem);
324         *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325                                  MAP_SHARED | MAP_ANONYMOUS,
326                                  foffset);
327         up_write(&current->mm->mmap_sem);
328         if (!(*mapped)) {
329                 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330                              foffset, length);
331                 return -EINVAL;
332         }
333
334         *vma = find_vma(current->mm, (u64)*mapped);
335         if (!(*vma)) {
336                 down_write(&current->mm->mmap_sem);
337                 do_munmap(current->mm, 0, length);
338                 up_write(&current->mm->mmap_sem);
339                 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340                 return -EINVAL;
341         }
342         (*vma)->vm_flags |= VM_RESERVED;
343         (*vma)->vm_ops = &ehcau_vm_ops;
344
345         return 0;
346 }
347
348 int ehca_mmap_register(u64 physical, void **mapped,
349                        struct vm_area_struct **vma)
350 {
351         int ret;
352         unsigned long vsize;
353         /* ehca hw supports only 4k page */
354         ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355         if (ret) {
356                 ehca_gen_err("could'nt mmap physical=%lx", physical);
357                 return ret;
358         }
359
360         (*vma)->vm_flags |= VM_RESERVED;
361         vsize = (*vma)->vm_end - (*vma)->vm_start;
362         if (vsize != EHCA_PAGESIZE) {
363                 ehca_gen_err("invalid vsize=%lx",
364                              (*vma)->vm_end - (*vma)->vm_start);
365                 return -EINVAL;
366         }
367
368         (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369         (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371         ret = remap_pfn_range((*vma), (*vma)->vm_start,
372                               physical >> PAGE_SHIFT, vsize,
373                               (*vma)->vm_page_prot);
374         if (ret) {
375                 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376                 return -ENOMEM;
377         }
378
379         return 0;
380
381 }
382
383 int ehca_munmap(unsigned long addr, size_t len) {
384         int ret = 0;
385         struct mm_struct *mm = current->mm;
386         if (mm) {
387                 down_write(&mm->mmap_sem);
388                 ret = do_munmap(mm, addr, len);
389                 up_write(&mm->mmap_sem);
390         }
391         return ret;
392 }