2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * userspace support verbs
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <asm/current.h>
45 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "ehca_tools.h"
51 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52 struct ib_udata *udata)
54 struct ehca_ucontext *my_context;
56 my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
58 ehca_err(device, "Out of memory device=%p", device);
59 return ERR_PTR(-ENOMEM);
62 return &my_context->ib_ucontext;
65 int ehca_dealloc_ucontext(struct ib_ucontext *context)
67 kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
71 struct page *ehca_nopage(struct vm_area_struct *vma,
72 unsigned long address, int *type)
74 struct page *mypage = NULL;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
76 u32 idr_handle = fileoffset >> 32;
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
79 u32 cur_pid = current->tgid;
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
90 cq = idr_find(&ehca_cq_idr, idr_handle);
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
93 /* make sure this mmap really belongs to the authorized user */
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
99 if (cq->ownpid != cur_pid) {
100 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x",
102 cur_pid, cq->ownpid);
103 return NOPAGE_SIGBUS;
106 if (rsrc_type == 2) {
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
108 offset = address - vma->vm_start;
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
112 mypage = virt_to_page(vaddr);
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
118 qp = idr_find(&ehca_qp_idr, idr_handle);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
121 /* make sure this mmap really belongs to the authorized user */
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
124 return NOPAGE_SIGBUS;
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
128 if (pd->ownpid != cur_pid) {
129 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x",
131 cur_pid, pd->ownpid);
132 return NOPAGE_SIGBUS;
135 if (rsrc_type == 2) { /* rqueue */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
137 offset = address - vma->vm_start;
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
141 mypage = virt_to_page(vaddr);
142 } else if (rsrc_type == 3) { /* squeue */
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
148 mypage = virt_to_page(vaddr);
153 ehca_gen_err("bad queue type %x", q_type);
154 return NOPAGE_SIGBUS;
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
166 static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
170 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
173 u32 idr_handle = fileoffset >> 32;
174 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid;
186 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
187 cq = idr_find(&ehca_cq_idr, idr_handle);
188 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
190 /* make sure this mmap really belongs to the authorized user */
194 if (cq->ownpid != cur_pid) {
195 ehca_err(cq->ib_cq.device,
196 "Invalid caller pid=%x ownpid=%x",
197 cur_pid, cq->ownpid);
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
205 case 1: /* galpa fw handle */
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
207 vma->vm_flags |= VM_RESERVED;
208 vsize = vma->vm_end - vma->vm_start;
209 if (vsize != EHCA_PAGESIZE) {
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
246 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
247 qp = idr_find(&ehca_qp_idr, idr_handle);
248 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
250 /* make sure this mmap really belongs to the authorized user */
254 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
255 if (pd->ownpid != cur_pid) {
256 ehca_err(qp->ib_qp.device,
257 "Invalid caller pid=%x ownpid=%x",
258 cur_pid, pd->ownpid);
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
266 case 1: /* galpa fw handle */
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
268 vma->vm_flags |= VM_RESERVED;
269 vsize = vma->vm_end - vma->vm_start;
270 if (vsize != EHCA_PAGESIZE) {
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
313 ehca_gen_err("bad queue type %x", q_type);
320 int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
323 down_write(¤t->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
327 up_write(¤t->mm->mmap_sem);
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
334 *vma = find_vma(current->mm, (u64)*mapped);
336 down_write(¤t->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(¤t->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
348 int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
383 int ehca_munmap(unsigned long addr, size_t len) {
385 struct mm_struct *mm = current->mm;
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);