2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <rdma/ib_umem.h>
45 #include "ehca_iverbs.h"
46 #include "ehca_mrmw.h"
50 #define NUM_CHUNKS(length, chunk_size) \
51 (((length) + (chunk_size - 1)) / (chunk_size))
53 /* max number of rpages (per hcall register_rpages) */
54 #define MAX_RPAGES 512
56 static struct kmem_cache *mr_cache;
57 static struct kmem_cache *mw_cache;
60 EHCA_MR_PGSIZE4K = 0x1000L,
61 EHCA_MR_PGSIZE64K = 0x10000L,
62 EHCA_MR_PGSIZE1M = 0x100000L,
63 EHCA_MR_PGSIZE16M = 0x1000000L
66 #define EHCA_MR_PGSHIFT4K 12
67 #define EHCA_MR_PGSHIFT64K 16
68 #define EHCA_MR_PGSHIFT1M 20
69 #define EHCA_MR_PGSHIFT16M 24
71 static u32 ehca_encode_hwpage_size(u32 pgsize)
73 int log = ilog2(pgsize);
74 WARN_ON(log < 12 || log > 24 || log & 3);
75 return (log - 12) / 4;
78 static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
80 return 1UL << ilog2(shca->hca_cap_mr_pgsize);
83 static struct ehca_mr *ehca_mr_new(void)
87 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
89 spin_lock_init(&me->mrlock);
91 ehca_gen_err("alloc failed");
96 static void ehca_mr_delete(struct ehca_mr *me)
98 kmem_cache_free(mr_cache, me);
101 static struct ehca_mw *ehca_mw_new(void)
105 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
107 spin_lock_init(&me->mwlock);
109 ehca_gen_err("alloc failed");
114 static void ehca_mw_delete(struct ehca_mw *me)
116 kmem_cache_free(mw_cache, me);
119 /*----------------------------------------------------------------------*/
121 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
125 struct ehca_mr *e_maxmr;
126 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
127 struct ehca_shca *shca =
128 container_of(pd->device, struct ehca_shca, ib_device);
131 e_maxmr = ehca_mr_new();
133 ehca_err(&shca->ib_device, "out of memory");
134 ib_mr = ERR_PTR(-ENOMEM);
135 goto get_dma_mr_exit0;
138 ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
139 mr_access_flags, e_pd,
140 &e_maxmr->ib.ib_mr.lkey,
141 &e_maxmr->ib.ib_mr.rkey);
143 ehca_mr_delete(e_maxmr);
144 ib_mr = ERR_PTR(ret);
145 goto get_dma_mr_exit0;
147 ib_mr = &e_maxmr->ib.ib_mr;
149 ehca_err(&shca->ib_device, "no internal max-MR exist!");
150 ib_mr = ERR_PTR(-EINVAL);
151 goto get_dma_mr_exit0;
156 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
157 PTR_ERR(ib_mr), pd, mr_access_flags);
159 } /* end ehca_get_dma_mr() */
161 /*----------------------------------------------------------------------*/
163 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
164 struct ib_phys_buf *phys_buf_array,
171 struct ehca_mr *e_mr;
172 struct ehca_shca *shca =
173 container_of(pd->device, struct ehca_shca, ib_device);
174 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
178 if ((num_phys_buf <= 0) || !phys_buf_array) {
179 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
180 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
181 ib_mr = ERR_PTR(-EINVAL);
182 goto reg_phys_mr_exit0;
184 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
185 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
186 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
187 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
189 * Remote Write Access requires Local Write Access
190 * Remote Atomic Access requires Local Write Access
192 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
194 ib_mr = ERR_PTR(-EINVAL);
195 goto reg_phys_mr_exit0;
198 /* check physical buffer list and calculate size */
199 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
202 ib_mr = ERR_PTR(ret);
203 goto reg_phys_mr_exit0;
206 (((u64)iova_start + size) < (u64)iova_start)) {
207 ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
209 ib_mr = ERR_PTR(-EINVAL);
210 goto reg_phys_mr_exit0;
213 e_mr = ehca_mr_new();
215 ehca_err(pd->device, "out of memory");
216 ib_mr = ERR_PTR(-ENOMEM);
217 goto reg_phys_mr_exit0;
220 /* register MR on HCA */
221 if (ehca_mr_is_maxmr(size, iova_start)) {
222 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
223 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
224 e_pd, &e_mr->ib.ib_mr.lkey,
225 &e_mr->ib.ib_mr.rkey);
227 ib_mr = ERR_PTR(ret);
228 goto reg_phys_mr_exit1;
231 struct ehca_mr_pginfo pginfo;
236 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
238 /* for kernel space we try most possible pgsize */
239 hw_pgsize = ehca_get_max_hwpage_size(shca);
240 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
242 memset(&pginfo, 0, sizeof(pginfo));
243 pginfo.type = EHCA_MR_PGI_PHYS;
244 pginfo.num_kpages = num_kpages;
245 pginfo.hwpage_size = hw_pgsize;
246 pginfo.num_hwpages = num_hwpages;
247 pginfo.u.phy.num_phys_buf = num_phys_buf;
248 pginfo.u.phy.phys_buf_array = phys_buf_array;
250 ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
252 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
253 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
254 &e_mr->ib.ib_mr.rkey);
256 ib_mr = ERR_PTR(ret);
257 goto reg_phys_mr_exit1;
261 /* successful registration of all pages */
262 return &e_mr->ib.ib_mr;
265 ehca_mr_delete(e_mr);
268 ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
269 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
270 PTR_ERR(ib_mr), pd, phys_buf_array,
271 num_phys_buf, mr_access_flags, iova_start);
273 } /* end ehca_reg_phys_mr() */
275 /*----------------------------------------------------------------------*/
277 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
278 u64 virt, int mr_access_flags,
279 struct ib_udata *udata)
282 struct ehca_mr *e_mr;
283 struct ehca_shca *shca =
284 container_of(pd->device, struct ehca_shca, ib_device);
285 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
286 struct ehca_mr_pginfo pginfo;
293 ehca_gen_err("bad pd=%p", pd);
294 return ERR_PTR(-EFAULT);
297 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
298 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
299 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
300 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
302 * Remote Write Access requires Local Write Access
303 * Remote Atomic Access requires Local Write Access
305 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
307 ib_mr = ERR_PTR(-EINVAL);
308 goto reg_user_mr_exit0;
311 if (length == 0 || virt + length < virt) {
312 ehca_err(pd->device, "bad input values: length=%llx "
313 "virt_base=%llx", length, virt);
314 ib_mr = ERR_PTR(-EINVAL);
315 goto reg_user_mr_exit0;
318 e_mr = ehca_mr_new();
320 ehca_err(pd->device, "out of memory");
321 ib_mr = ERR_PTR(-ENOMEM);
322 goto reg_user_mr_exit0;
325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
327 if (IS_ERR(e_mr->umem)) {
328 ib_mr = (void *)e_mr->umem;
329 goto reg_user_mr_exit1;
332 if (e_mr->umem->page_size != PAGE_SIZE) {
333 ehca_err(pd->device, "page size not supported, "
334 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
335 ib_mr = ERR_PTR(-EINVAL);
336 goto reg_user_mr_exit2;
339 /* determine number of MR pages */
340 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
341 /* select proper hw_pgsize */
342 page_shift = PAGE_SHIFT;
343 if (e_mr->umem->hugetlb) {
344 /* determine page_shift, clamp between 4K and 16M */
345 page_shift = (fls64(length - 1) + 3) & ~3;
346 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
349 hwpage_size = 1UL << page_shift;
351 /* now that we have the desired page size, shift until it's
352 * supported, too. 4K is always supported, so this terminates.
354 while (!(hwpage_size & shca->hca_cap_mr_pgsize))
357 reg_user_mr_fallback:
358 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
359 /* register MR on HCA */
360 memset(&pginfo, 0, sizeof(pginfo));
361 pginfo.type = EHCA_MR_PGI_USER;
362 pginfo.hwpage_size = hwpage_size;
363 pginfo.num_kpages = num_kpages;
364 pginfo.num_hwpages = num_hwpages;
365 pginfo.u.usr.region = e_mr->umem;
366 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
367 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
368 (&e_mr->umem->chunk_list),
371 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
372 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
373 &e_mr->ib.ib_mr.rkey);
374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
375 ehca_warn(pd->device, "failed to register mr "
376 "with hwpage_size=%llx", hwpage_size);
377 ehca_info(pd->device, "try to register mr with "
378 "kpage_size=%lx", PAGE_SIZE);
380 * this means kpages are not contiguous for a hw page
381 * try kernel page size as fallback solution
383 hwpage_size = PAGE_SIZE;
384 goto reg_user_mr_fallback;
387 ib_mr = ERR_PTR(ret);
388 goto reg_user_mr_exit2;
391 /* successful registration of all pages */
392 return &e_mr->ib.ib_mr;
395 ib_umem_release(e_mr->umem);
397 ehca_mr_delete(e_mr);
400 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
401 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
403 } /* end ehca_reg_user_mr() */
405 /*----------------------------------------------------------------------*/
407 int ehca_rereg_phys_mr(struct ib_mr *mr,
410 struct ib_phys_buf *phys_buf_array,
417 struct ehca_shca *shca =
418 container_of(mr->device, struct ehca_shca, ib_device);
419 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
423 struct ehca_pd *new_pd;
424 u32 tmp_lkey, tmp_rkey;
425 unsigned long sl_flags;
428 struct ehca_mr_pginfo pginfo;
430 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
431 /* TODO not supported, because PHYP rereg hCall needs pages */
432 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
433 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
435 goto rereg_phys_mr_exit0;
438 if (mr_rereg_mask & IB_MR_REREG_PD) {
440 ehca_err(mr->device, "rereg with bad pd, pd=%p "
441 "mr_rereg_mask=%x", pd, mr_rereg_mask);
443 goto rereg_phys_mr_exit0;
448 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
449 (mr_rereg_mask == 0)) {
451 goto rereg_phys_mr_exit0;
454 /* check other parameters */
455 if (e_mr == shca->maxmr) {
456 /* should be impossible, however reject to be sure */
457 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
458 "shca->maxmr=%p mr->lkey=%x",
459 mr, shca->maxmr, mr->lkey);
461 goto rereg_phys_mr_exit0;
463 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
464 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
465 ehca_err(mr->device, "not supported for FMR, mr=%p "
466 "flags=%x", mr, e_mr->flags);
468 goto rereg_phys_mr_exit0;
470 if (!phys_buf_array || num_phys_buf <= 0) {
471 ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
472 " phys_buf_array=%p num_phys_buf=%x",
473 mr_rereg_mask, phys_buf_array, num_phys_buf);
475 goto rereg_phys_mr_exit0;
478 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
479 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
480 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
481 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
482 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
484 * Remote Write Access requires Local Write Access
485 * Remote Atomic Access requires Local Write Access
487 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
488 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
490 goto rereg_phys_mr_exit0;
493 /* set requested values dependent on rereg request */
494 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
495 new_start = e_mr->start;
496 new_size = e_mr->size;
498 new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
500 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
501 u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
503 new_start = iova_start; /* change address */
504 /* check physical buffer list and calculate size */
505 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
506 num_phys_buf, iova_start,
509 goto rereg_phys_mr_exit1;
510 if ((new_size == 0) ||
511 (((u64)iova_start + new_size) < (u64)iova_start)) {
512 ehca_err(mr->device, "bad input values: new_size=%llx "
513 "iova_start=%p", new_size, iova_start);
515 goto rereg_phys_mr_exit1;
517 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
518 new_size, PAGE_SIZE);
519 num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
520 new_size, hw_pgsize);
521 memset(&pginfo, 0, sizeof(pginfo));
522 pginfo.type = EHCA_MR_PGI_PHYS;
523 pginfo.num_kpages = num_kpages;
524 pginfo.hwpage_size = hw_pgsize;
525 pginfo.num_hwpages = num_hwpages;
526 pginfo.u.phy.num_phys_buf = num_phys_buf;
527 pginfo.u.phy.phys_buf_array = phys_buf_array;
529 ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
531 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
532 new_acl = mr_access_flags;
533 if (mr_rereg_mask & IB_MR_REREG_PD)
534 new_pd = container_of(pd, struct ehca_pd, ib_pd);
536 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
537 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
539 goto rereg_phys_mr_exit1;
541 /* successful reregistration */
542 if (mr_rereg_mask & IB_MR_REREG_PD)
548 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
551 ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
552 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
554 ret, mr, mr_rereg_mask, pd, phys_buf_array,
555 num_phys_buf, mr_access_flags, iova_start);
557 } /* end ehca_rereg_phys_mr() */
559 /*----------------------------------------------------------------------*/
561 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
565 struct ehca_shca *shca =
566 container_of(mr->device, struct ehca_shca, ib_device);
567 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
568 unsigned long sl_flags;
569 struct ehca_mr_hipzout_parms hipzout;
571 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
572 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
573 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
578 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
579 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
582 if (h_ret != H_SUCCESS) {
583 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
584 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
585 h_ret, mr, shca->ipz_hca_handle.handle,
586 e_mr->ipz_mr_handle.handle, mr->lkey);
587 ret = ehca2ib_return_code(h_ret);
590 mr_attr->pd = mr->pd;
591 mr_attr->device_virt_addr = hipzout.vaddr;
592 mr_attr->size = hipzout.len;
593 mr_attr->lkey = hipzout.lkey;
594 mr_attr->rkey = hipzout.rkey;
595 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
598 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
601 ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
604 } /* end ehca_query_mr() */
606 /*----------------------------------------------------------------------*/
608 int ehca_dereg_mr(struct ib_mr *mr)
612 struct ehca_shca *shca =
613 container_of(mr->device, struct ehca_shca, ib_device);
614 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
616 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
617 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
618 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
621 } else if (e_mr == shca->maxmr) {
622 /* should be impossible, however reject to be sure */
623 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
624 "shca->maxmr=%p mr->lkey=%x",
625 mr, shca->maxmr, mr->lkey);
630 /* TODO: BUSY: MR still has bound window(s) */
631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
632 if (h_ret != H_SUCCESS) {
633 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
634 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
636 e_mr->ipz_mr_handle.handle, mr->lkey);
637 ret = ehca2ib_return_code(h_ret);
642 ib_umem_release(e_mr->umem);
644 /* successful deregistration */
645 ehca_mr_delete(e_mr);
649 ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
651 } /* end ehca_dereg_mr() */
653 /*----------------------------------------------------------------------*/
655 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
659 struct ehca_mw *e_mw;
660 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
661 struct ehca_shca *shca =
662 container_of(pd->device, struct ehca_shca, ib_device);
663 struct ehca_mw_hipzout_parms hipzout;
665 e_mw = ehca_mw_new();
667 ib_mw = ERR_PTR(-ENOMEM);
671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
672 e_pd->fw_pd, &hipzout);
673 if (h_ret != H_SUCCESS) {
674 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
675 "shca=%p hca_hndl=%llx mw=%p",
676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
680 /* successful MW allocation */
681 e_mw->ipz_mw_handle = hipzout.handle;
682 e_mw->ib_mw.rkey = hipzout.rkey;
686 ehca_mw_delete(e_mw);
689 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
691 } /* end ehca_alloc_mw() */
693 /*----------------------------------------------------------------------*/
695 int ehca_bind_mw(struct ib_qp *qp,
697 struct ib_mw_bind *mw_bind)
699 /* TODO: not supported up to now */
700 ehca_gen_err("bind MW currently not supported by HCAD");
703 } /* end ehca_bind_mw() */
705 /*----------------------------------------------------------------------*/
707 int ehca_dealloc_mw(struct ib_mw *mw)
710 struct ehca_shca *shca =
711 container_of(mw->device, struct ehca_shca, ib_device);
712 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
715 if (h_ret != H_SUCCESS) {
716 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
717 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
719 e_mw->ipz_mw_handle.handle);
720 return ehca2ib_return_code(h_ret);
722 /* successful deallocation */
723 ehca_mw_delete(e_mw);
725 } /* end ehca_dealloc_mw() */
727 /*----------------------------------------------------------------------*/
729 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
731 struct ib_fmr_attr *fmr_attr)
733 struct ib_fmr *ib_fmr;
734 struct ehca_shca *shca =
735 container_of(pd->device, struct ehca_shca, ib_device);
736 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
737 struct ehca_mr *e_fmr;
739 u32 tmp_lkey, tmp_rkey;
740 struct ehca_mr_pginfo pginfo;
743 /* check other parameters */
744 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
745 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
746 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
747 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
749 * Remote Write Access requires Local Write Access
750 * Remote Atomic Access requires Local Write Access
752 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
754 ib_fmr = ERR_PTR(-EINVAL);
755 goto alloc_fmr_exit0;
757 if (mr_access_flags & IB_ACCESS_MW_BIND) {
758 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
760 ib_fmr = ERR_PTR(-EINVAL);
761 goto alloc_fmr_exit0;
763 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
764 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
765 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
766 fmr_attr->max_pages, fmr_attr->max_maps,
767 fmr_attr->page_shift);
768 ib_fmr = ERR_PTR(-EINVAL);
769 goto alloc_fmr_exit0;
772 hw_pgsize = 1 << fmr_attr->page_shift;
773 if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
774 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
775 fmr_attr->page_shift);
776 ib_fmr = ERR_PTR(-EINVAL);
777 goto alloc_fmr_exit0;
780 e_fmr = ehca_mr_new();
782 ib_fmr = ERR_PTR(-ENOMEM);
783 goto alloc_fmr_exit0;
785 e_fmr->flags |= EHCA_MR_FLAG_FMR;
787 /* register MR on HCA */
788 memset(&pginfo, 0, sizeof(pginfo));
789 pginfo.hwpage_size = hw_pgsize;
791 * pginfo.num_hwpages==0, ie register_rpages() will not be called
792 * but deferred to map_phys_fmr()
794 ret = ehca_reg_mr(shca, e_fmr, NULL,
795 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
796 mr_access_flags, e_pd, &pginfo,
797 &tmp_lkey, &tmp_rkey);
799 ib_fmr = ERR_PTR(ret);
800 goto alloc_fmr_exit1;
804 e_fmr->hwpage_size = hw_pgsize;
805 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
806 e_fmr->fmr_max_pages = fmr_attr->max_pages;
807 e_fmr->fmr_max_maps = fmr_attr->max_maps;
808 e_fmr->fmr_map_cnt = 0;
809 return &e_fmr->ib.ib_fmr;
812 ehca_mr_delete(e_fmr);
815 } /* end ehca_alloc_fmr() */
817 /*----------------------------------------------------------------------*/
819 int ehca_map_phys_fmr(struct ib_fmr *fmr,
825 struct ehca_shca *shca =
826 container_of(fmr->device, struct ehca_shca, ib_device);
827 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
828 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
829 struct ehca_mr_pginfo pginfo;
830 u32 tmp_lkey, tmp_rkey;
832 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
833 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
834 e_fmr, e_fmr->flags);
836 goto map_phys_fmr_exit0;
838 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
840 goto map_phys_fmr_exit0;
841 if (iova % e_fmr->fmr_page_size) {
842 /* only whole-numbered pages */
843 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
844 iova, e_fmr->fmr_page_size);
846 goto map_phys_fmr_exit0;
848 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
849 /* HCAD does not limit the maps, however trace this anyway */
850 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
851 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
852 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
855 memset(&pginfo, 0, sizeof(pginfo));
856 pginfo.type = EHCA_MR_PGI_FMR;
857 pginfo.num_kpages = list_len;
858 pginfo.hwpage_size = e_fmr->hwpage_size;
860 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
861 pginfo.u.fmr.page_list = page_list;
863 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
864 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
866 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
867 list_len * e_fmr->fmr_page_size,
868 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
870 goto map_phys_fmr_exit0;
872 /* successful reregistration */
873 e_fmr->fmr_map_cnt++;
874 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
875 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
881 "iova=%llx", ret, fmr, page_list, list_len, iova);
883 } /* end ehca_map_phys_fmr() */
885 /*----------------------------------------------------------------------*/
887 int ehca_unmap_fmr(struct list_head *fmr_list)
890 struct ib_fmr *ib_fmr;
891 struct ehca_shca *shca = NULL;
892 struct ehca_shca *prev_shca;
893 struct ehca_mr *e_fmr;
895 u32 unmap_fmr_cnt = 0;
897 /* check all FMR belong to same SHCA, and check internal flag */
898 list_for_each_entry(ib_fmr, fmr_list, list) {
901 ehca_gen_err("bad fmr=%p in list", ib_fmr);
903 goto unmap_fmr_exit0;
905 shca = container_of(ib_fmr->device, struct ehca_shca,
907 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
908 if ((shca != prev_shca) && prev_shca) {
909 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
910 "prev_shca=%p e_fmr=%p",
911 shca, prev_shca, e_fmr);
913 goto unmap_fmr_exit0;
915 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
916 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
917 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
919 goto unmap_fmr_exit0;
924 /* loop over all FMRs to unmap */
925 list_for_each_entry(ib_fmr, fmr_list, list) {
927 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
928 shca = container_of(ib_fmr->device, struct ehca_shca,
930 ret = ehca_unmap_one_fmr(shca, e_fmr);
932 /* unmap failed, stop unmapping of rest of FMRs */
933 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
934 "stop rest, e_fmr=%p num_fmr=%x "
935 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
936 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
937 goto unmap_fmr_exit0;
943 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
944 ret, fmr_list, num_fmr, unmap_fmr_cnt);
946 } /* end ehca_unmap_fmr() */
948 /*----------------------------------------------------------------------*/
950 int ehca_dealloc_fmr(struct ib_fmr *fmr)
954 struct ehca_shca *shca =
955 container_of(fmr->device, struct ehca_shca, ib_device);
956 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
958 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
959 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
960 e_fmr, e_fmr->flags);
965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
966 if (h_ret != H_SUCCESS) {
967 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
968 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
969 h_ret, e_fmr, shca->ipz_hca_handle.handle,
970 e_fmr->ipz_mr_handle.handle, fmr->lkey);
971 ret = ehca2ib_return_code(h_ret);
974 /* successful deregistration */
975 ehca_mr_delete(e_fmr);
980 ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
982 } /* end ehca_dealloc_fmr() */
984 /*----------------------------------------------------------------------*/
986 int ehca_reg_mr(struct ehca_shca *shca,
987 struct ehca_mr *e_mr,
991 struct ehca_pd *e_pd,
992 struct ehca_mr_pginfo *pginfo,
999 struct ehca_mr_hipzout_parms hipzout;
1001 ehca_mrmw_map_acl(acl, &hipz_acl);
1002 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1003 if (ehca_use_hp_mr == 1)
1004 hipz_acl |= 0x00000001;
1006 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
1007 (u64)iova_start, size, hipz_acl,
1008 e_pd->fw_pd, &hipzout);
1009 if (h_ret != H_SUCCESS) {
1010 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
1011 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
1012 ret = ehca2ib_return_code(h_ret);
1013 goto ehca_reg_mr_exit0;
1016 e_mr->ipz_mr_handle = hipzout.handle;
1018 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
1020 goto ehca_reg_mr_exit1;
1022 /* successful registration */
1023 e_mr->num_kpages = pginfo->num_kpages;
1024 e_mr->num_hwpages = pginfo->num_hwpages;
1025 e_mr->hwpage_size = pginfo->hwpage_size;
1026 e_mr->start = iova_start;
1029 *lkey = hipzout.lkey;
1030 *rkey = hipzout.rkey;
1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1035 if (h_ret != H_SUCCESS) {
1036 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
1037 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
1038 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1040 hipzout.lkey, pginfo, pginfo->num_kpages,
1041 pginfo->num_hwpages, ret);
1042 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1048 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1049 "num_kpages=%llx num_hwpages=%llx",
1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1051 pginfo->num_kpages, pginfo->num_hwpages);
1053 } /* end ehca_reg_mr() */
1055 /*----------------------------------------------------------------------*/
1057 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1058 struct ehca_mr *e_mr,
1059 struct ehca_mr_pginfo *pginfo)
1068 if (!pginfo->num_hwpages) /* in case of fmr */
1071 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1073 ehca_err(&shca->ib_device, "kpage alloc failed");
1075 goto ehca_reg_mr_rpages_exit0;
1078 /* max MAX_RPAGES ehca mr pages per register call */
1079 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1081 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1082 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1084 rnum = MAX_RPAGES; /* last shot is full */
1088 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
1090 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1091 "bad rc, ret=%i rnum=%x kpage=%p",
1093 goto ehca_reg_mr_rpages_exit1;
1097 rpage = virt_to_abs(kpage);
1099 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1102 goto ehca_reg_mr_rpages_exit1;
1107 h_ret = hipz_h_register_rpage_mr(
1108 shca->ipz_hca_handle, e_mr,
1109 ehca_encode_hwpage_size(pginfo->hwpage_size),
1112 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1114 * check for 'registration complete'==H_SUCCESS
1115 * and for 'page registered'==H_PAGE_REGISTERED
1117 if (h_ret != H_SUCCESS) {
1118 ehca_err(&shca->ib_device, "last "
1119 "hipz_reg_rpage_mr failed, h_ret=%lli "
1120 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
1121 " lkey=%x", h_ret, e_mr, i,
1122 shca->ipz_hca_handle.handle,
1123 e_mr->ipz_mr_handle.handle,
1124 e_mr->ib.ib_mr.lkey);
1125 ret = ehca2ib_return_code(h_ret);
1129 } else if (h_ret != H_PAGE_REGISTERED) {
1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1131 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
1132 "mr_hndl=%llx", h_ret, e_mr, i,
1133 e_mr->ib.ib_mr.lkey,
1134 shca->ipz_hca_handle.handle,
1135 e_mr->ipz_mr_handle.handle);
1136 ret = ehca2ib_return_code(h_ret);
1143 ehca_reg_mr_rpages_exit1:
1144 ehca_free_fw_ctrlblock(kpage);
1145 ehca_reg_mr_rpages_exit0:
1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
1148 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1151 } /* end ehca_reg_mr_rpages() */
1153 /*----------------------------------------------------------------------*/
1155 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1156 struct ehca_mr *e_mr,
1160 struct ehca_pd *e_pd,
1161 struct ehca_mr_pginfo *pginfo,
1170 struct ehca_mr_pginfo pginfo_save;
1171 struct ehca_mr_hipzout_parms hipzout;
1173 ehca_mrmw_map_acl(acl, &hipz_acl);
1174 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1176 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1178 ehca_err(&shca->ib_device, "kpage alloc failed");
1180 goto ehca_rereg_mr_rereg1_exit0;
1183 pginfo_save = *pginfo;
1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1187 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1188 "kpage=%p", e_mr, pginfo, pginfo->type,
1189 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1190 goto ehca_rereg_mr_rereg1_exit1;
1192 rpage = virt_to_abs(kpage);
1194 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1196 goto ehca_rereg_mr_rereg1_exit1;
1198 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1199 (u64)iova_start, size, hipz_acl,
1200 e_pd->fw_pd, rpage, &hipzout);
1201 if (h_ret != H_SUCCESS) {
1203 * reregistration unsuccessful, try it again with the 3 hCalls,
1204 * e.g. this is required in case H_MR_CONDITION
1205 * (MW bound or MR is shared)
1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1208 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
1209 *pginfo = pginfo_save;
1211 } else if ((u64 *)hipzout.vaddr != iova_start) {
1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1213 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
1214 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1216 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1220 * successful reregistration
1221 * note: start and start_out are identical for eServer HCAs
1223 e_mr->num_kpages = pginfo->num_kpages;
1224 e_mr->num_hwpages = pginfo->num_hwpages;
1225 e_mr->hwpage_size = pginfo->hwpage_size;
1226 e_mr->start = iova_start;
1229 *lkey = hipzout.lkey;
1230 *rkey = hipzout.rkey;
1233 ehca_rereg_mr_rereg1_exit1:
1234 ehca_free_fw_ctrlblock(kpage);
1235 ehca_rereg_mr_rereg1_exit0:
1236 if ( ret && (ret != -EAGAIN) )
1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
1238 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1240 pginfo->num_hwpages);
1242 } /* end ehca_rereg_mr_rereg1() */
1244 /*----------------------------------------------------------------------*/
1246 int ehca_rereg_mr(struct ehca_shca *shca,
1247 struct ehca_mr *e_mr,
1251 struct ehca_pd *e_pd,
1252 struct ehca_mr_pginfo *pginfo,
1258 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1259 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1261 /* first determine reregistration hCall(s) */
1262 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1263 (e_mr->num_hwpages > MAX_RPAGES) ||
1264 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1265 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1266 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1267 pginfo->num_hwpages, e_mr->num_hwpages);
1272 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1275 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1276 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1280 if (rereg_1_hcall) {
1281 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1282 acl, e_pd, pginfo, lkey, rkey);
1287 goto ehca_rereg_mr_exit0;
1291 if (rereg_3_hcall) {
1292 struct ehca_mr save_mr;
1294 /* first deregister old MR */
1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1296 if (h_ret != H_SUCCESS) {
1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1298 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1300 h_ret, e_mr, shca->ipz_hca_handle.handle,
1301 e_mr->ipz_mr_handle.handle,
1302 e_mr->ib.ib_mr.lkey);
1303 ret = ehca2ib_return_code(h_ret);
1304 goto ehca_rereg_mr_exit0;
1306 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1308 ehca_mr_deletenew(e_mr);
1310 /* set some MR values */
1311 e_mr->flags = save_mr.flags;
1312 e_mr->hwpage_size = save_mr.hwpage_size;
1313 e_mr->fmr_page_size = save_mr.fmr_page_size;
1314 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1315 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1316 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1318 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1319 e_pd, pginfo, lkey, rkey);
1321 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1322 memcpy(&e_mr->flags, &(save_mr.flags),
1323 sizeof(struct ehca_mr) - offset);
1324 goto ehca_rereg_mr_exit0;
1328 ehca_rereg_mr_exit0:
1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1331 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1332 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1335 rereg_1_hcall, rereg_3_hcall);
1337 } /* end ehca_rereg_mr() */
1339 /*----------------------------------------------------------------------*/
1341 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1342 struct ehca_mr *e_fmr)
1346 struct ehca_pd *e_pd =
1347 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1348 struct ehca_mr save_fmr;
1349 u32 tmp_lkey, tmp_rkey;
1350 struct ehca_mr_pginfo pginfo;
1351 struct ehca_mr_hipzout_parms hipzout;
1352 struct ehca_mr save_mr;
1354 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1356 * note: after using rereg hcall with len=0,
1357 * rereg hcall must be used again for registering pages
1359 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1360 0, 0, e_pd->fw_pd, 0, &hipzout);
1361 if (h_ret == H_SUCCESS) {
1362 /* successful reregistration */
1363 e_fmr->start = NULL;
1365 tmp_lkey = hipzout.lkey;
1366 tmp_rkey = hipzout.rkey;
1370 * should not happen, because length checked above,
1371 * FMRs are not shared and no MW bound to FMRs
1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1374 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1375 "mr_hndl=%llx lkey=%x lkey_out=%x",
1376 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1377 e_fmr->ipz_mr_handle.handle,
1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1379 /* try free and rereg */
1382 /* first free old FMR */
1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1384 if (h_ret != H_SUCCESS) {
1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1386 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1388 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1389 e_fmr->ipz_mr_handle.handle,
1390 e_fmr->ib.ib_fmr.lkey);
1391 ret = ehca2ib_return_code(h_ret);
1392 goto ehca_unmap_one_fmr_exit0;
1394 /* clean ehca_mr_t, without changing lock */
1396 ehca_mr_deletenew(e_fmr);
1398 /* set some MR values */
1399 e_fmr->flags = save_fmr.flags;
1400 e_fmr->hwpage_size = save_fmr.hwpage_size;
1401 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1402 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1403 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1404 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1405 e_fmr->acl = save_fmr.acl;
1407 memset(&pginfo, 0, sizeof(pginfo));
1408 pginfo.type = EHCA_MR_PGI_FMR;
1409 ret = ehca_reg_mr(shca, e_fmr, NULL,
1410 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1411 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1414 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1415 memcpy(&e_fmr->flags, &(save_mr.flags),
1416 sizeof(struct ehca_mr) - offset);
1419 ehca_unmap_one_fmr_exit0:
1421 ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1423 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1425 } /* end ehca_unmap_one_fmr() */
1427 /*----------------------------------------------------------------------*/
1429 int ehca_reg_smr(struct ehca_shca *shca,
1430 struct ehca_mr *e_origmr,
1431 struct ehca_mr *e_newmr,
1434 struct ehca_pd *e_pd,
1441 struct ehca_mr_hipzout_parms hipzout;
1443 ehca_mrmw_map_acl(acl, &hipz_acl);
1444 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1446 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1447 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1449 if (h_ret != H_SUCCESS) {
1450 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1452 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1454 shca->ipz_hca_handle.handle,
1455 e_origmr->ipz_mr_handle.handle,
1456 e_origmr->ib.ib_mr.lkey);
1457 ret = ehca2ib_return_code(h_ret);
1458 goto ehca_reg_smr_exit0;
1460 /* successful registration */
1461 e_newmr->num_kpages = e_origmr->num_kpages;
1462 e_newmr->num_hwpages = e_origmr->num_hwpages;
1463 e_newmr->hwpage_size = e_origmr->hwpage_size;
1464 e_newmr->start = iova_start;
1465 e_newmr->size = e_origmr->size;
1467 e_newmr->ipz_mr_handle = hipzout.handle;
1468 *lkey = hipzout.lkey;
1469 *rkey = hipzout.rkey;
1474 ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1475 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1476 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1478 } /* end ehca_reg_smr() */
1480 /*----------------------------------------------------------------------*/
1482 /* register internal max-MR to internal SHCA */
1483 int ehca_reg_internal_maxmr(
1484 struct ehca_shca *shca,
1485 struct ehca_pd *e_pd,
1486 struct ehca_mr **e_maxmr) /*OUT*/
1489 struct ehca_mr *e_mr;
1492 struct ehca_mr_pginfo pginfo;
1493 struct ib_phys_buf ib_pbuf;
1498 e_mr = ehca_mr_new();
1500 ehca_err(&shca->ib_device, "out of memory");
1502 goto ehca_reg_internal_maxmr_exit0;
1504 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1506 /* register internal max-MR on HCA */
1507 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1508 iova_start = (u64 *)KERNELBASE;
1510 ib_pbuf.size = size_maxmr;
1511 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1513 hw_pgsize = ehca_get_max_hwpage_size(shca);
1514 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1517 memset(&pginfo, 0, sizeof(pginfo));
1518 pginfo.type = EHCA_MR_PGI_PHYS;
1519 pginfo.num_kpages = num_kpages;
1520 pginfo.num_hwpages = num_hwpages;
1521 pginfo.hwpage_size = hw_pgsize;
1522 pginfo.u.phy.num_phys_buf = 1;
1523 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1525 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1526 &pginfo, &e_mr->ib.ib_mr.lkey,
1527 &e_mr->ib.ib_mr.rkey);
1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1532 num_kpages, num_hwpages);
1533 goto ehca_reg_internal_maxmr_exit1;
1536 /* successful registration of all pages */
1537 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1538 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1539 e_mr->ib.ib_mr.uobject = NULL;
1540 atomic_inc(&(e_pd->ib_pd.usecnt));
1541 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1545 ehca_reg_internal_maxmr_exit1:
1546 ehca_mr_delete(e_mr);
1547 ehca_reg_internal_maxmr_exit0:
1549 ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1550 ret, shca, e_pd, e_maxmr);
1552 } /* end ehca_reg_internal_maxmr() */
1554 /*----------------------------------------------------------------------*/
1556 int ehca_reg_maxmr(struct ehca_shca *shca,
1557 struct ehca_mr *e_newmr,
1560 struct ehca_pd *e_pd,
1565 struct ehca_mr *e_origmr = shca->maxmr;
1567 struct ehca_mr_hipzout_parms hipzout;
1569 ehca_mrmw_map_acl(acl, &hipz_acl);
1570 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1572 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1573 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1575 if (h_ret != H_SUCCESS) {
1576 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1577 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1578 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1579 e_origmr->ipz_mr_handle.handle,
1580 e_origmr->ib.ib_mr.lkey);
1581 return ehca2ib_return_code(h_ret);
1583 /* successful registration */
1584 e_newmr->num_kpages = e_origmr->num_kpages;
1585 e_newmr->num_hwpages = e_origmr->num_hwpages;
1586 e_newmr->hwpage_size = e_origmr->hwpage_size;
1587 e_newmr->start = iova_start;
1588 e_newmr->size = e_origmr->size;
1590 e_newmr->ipz_mr_handle = hipzout.handle;
1591 *lkey = hipzout.lkey;
1592 *rkey = hipzout.rkey;
1594 } /* end ehca_reg_maxmr() */
1596 /*----------------------------------------------------------------------*/
1598 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1601 struct ehca_mr *e_maxmr;
1602 struct ib_pd *ib_pd;
1605 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1607 goto ehca_dereg_internal_maxmr_exit0;
1610 e_maxmr = shca->maxmr;
1611 ib_pd = e_maxmr->ib.ib_mr.pd;
1612 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1614 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1616 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1617 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1618 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1619 shca->maxmr = e_maxmr;
1620 goto ehca_dereg_internal_maxmr_exit0;
1623 atomic_dec(&ib_pd->usecnt);
1625 ehca_dereg_internal_maxmr_exit0:
1627 ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1628 ret, shca, shca->maxmr);
1630 } /* end ehca_dereg_internal_maxmr() */
1632 /*----------------------------------------------------------------------*/
1635 * check physical buffer array of MR verbs for validness and
1636 * calculates MR size
1638 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1643 struct ib_phys_buf *pbuf = phys_buf_array;
1647 if (num_phys_buf == 0) {
1648 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1651 /* check first buffer */
1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1654 "pbuf->addr=%llx pbuf->size=%llx",
1655 iova_start, pbuf->addr, pbuf->size);
1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1659 (num_phys_buf > 1)) {
1660 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
1661 "pbuf->size=%llx", pbuf->addr, pbuf->size);
1665 for (i = 0; i < num_phys_buf; i++) {
1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1667 ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
1669 i, pbuf->addr, pbuf->size);
1672 if (((i > 0) && /* not 1st */
1673 (i < (num_phys_buf - 1)) && /* not last */
1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1675 ehca_gen_err("bad size, i=%x pbuf->size=%llx",
1679 size_count += pbuf->size;
1685 } /* end ehca_mr_chk_buf_and_calc_size() */
1687 /*----------------------------------------------------------------------*/
1689 /* check page list of map FMR verb for validness */
1690 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1697 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1698 ehca_gen_err("bad list_len, list_len=%x "
1699 "e_fmr->fmr_max_pages=%x fmr=%p",
1700 list_len, e_fmr->fmr_max_pages, e_fmr);
1704 /* each page must be aligned */
1706 for (i = 0; i < list_len; i++) {
1707 if (*page % e_fmr->fmr_page_size) {
1708 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1709 "fmr_page_size=%x", i, *page, page, e_fmr,
1710 e_fmr->fmr_page_size);
1717 } /* end ehca_fmr_check_page_list() */
1719 /*----------------------------------------------------------------------*/
1721 /* PAGE_SIZE >= pginfo->hwpage_size */
1722 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1727 struct ib_umem_chunk *prev_chunk;
1728 struct ib_umem_chunk *chunk;
1732 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1734 /* loop over desired chunk entries */
1735 chunk = pginfo->u.usr.next_chunk;
1736 prev_chunk = pginfo->u.usr.next_chunk;
1737 list_for_each_entry_continue(
1738 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1739 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1740 pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
1742 *kpage = phys_to_abs(pgaddr +
1743 (pginfo->next_hwpage *
1744 pginfo->hwpage_size));
1746 ehca_gen_err("pgaddr=%llx "
1747 "chunk->page_list[i]=%llx "
1748 "i=%x next_hwpage=%llx",
1749 pgaddr, (u64)sg_dma_address(
1750 &chunk->page_list[i]),
1751 i, pginfo->next_hwpage);
1754 (pginfo->hwpage_cnt)++;
1755 (pginfo->next_hwpage)++;
1757 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1758 (pginfo->kpage_cnt)++;
1759 (pginfo->u.usr.next_nmap)++;
1760 pginfo->next_hwpage = 0;
1764 if (j >= number) break;
1766 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1768 pginfo->u.usr.next_nmap = 0;
1771 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1772 pginfo->u.usr.next_nmap = 0;
1774 } else if (j >= number)
1779 pginfo->u.usr.next_chunk =
1780 list_prepare_entry(prev_chunk,
1781 (&(pginfo->u.usr.region->chunk_list)),
1787 * check given pages for contiguous layout
1788 * last page addr is returned in prev_pgaddr for further check
1790 static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1791 int start_idx, int end_idx,
1795 for (t = start_idx; t <= end_idx; t++) {
1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1797 if (ehca_debug_level >= 3)
1798 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1801 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1802 "prev_pgaddr=%llx page_list_i=%x",
1803 pgaddr, *prev_pgaddr, t);
1806 *prev_pgaddr = pgaddr;
1811 /* PAGE_SIZE < pginfo->hwpage_size */
1812 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1817 struct ib_umem_chunk *prev_chunk;
1818 struct ib_umem_chunk *chunk;
1819 u64 pgaddr, prev_pgaddr;
1822 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1823 int nr_kpages = kpages_per_hwpage;
1825 /* loop over desired chunk entries */
1826 chunk = pginfo->u.usr.next_chunk;
1827 prev_chunk = pginfo->u.usr.next_chunk;
1828 list_for_each_entry_continue(
1829 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1830 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1831 if (nr_kpages == kpages_per_hwpage) {
1832 pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
1834 *kpage = phys_to_abs(pgaddr);
1836 ehca_gen_err("pgaddr=%llx i=%x",
1842 * The first page in a hwpage must be aligned;
1843 * the first MR page is exempt from this rule.
1845 if (pgaddr & (pginfo->hwpage_size - 1)) {
1846 if (pginfo->hwpage_cnt) {
1848 "invalid alignment "
1852 pginfo->hwpage_size);
1859 (pginfo->hwpage_size - 1)) >>
1861 nr_kpages -= pginfo->kpage_cnt;
1862 *kpage = phys_to_abs(
1864 ~(pginfo->hwpage_size - 1));
1866 if (ehca_debug_level >= 3) {
1867 u64 val = *(u64 *)abs_to_virt(
1868 phys_to_abs(pgaddr));
1869 ehca_gen_dbg("kpage=%llx chunk_page=%llx "
1871 *kpage, pgaddr, val);
1873 prev_pgaddr = pgaddr;
1875 pginfo->kpage_cnt++;
1876 pginfo->u.usr.next_nmap++;
1882 if (i + nr_kpages > chunk->nmap) {
1883 ret = ehca_check_kpages_per_ate(
1884 chunk->page_list, i,
1885 chunk->nmap - 1, &prev_pgaddr);
1886 if (ret) return ret;
1887 pginfo->kpage_cnt += chunk->nmap - i;
1888 pginfo->u.usr.next_nmap += chunk->nmap - i;
1889 nr_kpages -= chunk->nmap - i;
1893 ret = ehca_check_kpages_per_ate(chunk->page_list, i,
1896 if (ret) return ret;
1898 pginfo->kpage_cnt += nr_kpages;
1899 pginfo->u.usr.next_nmap += nr_kpages;
1901 nr_kpages = kpages_per_hwpage;
1902 (pginfo->hwpage_cnt)++;
1905 if (j >= number) break;
1907 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1909 pginfo->u.usr.next_nmap = 0;
1912 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1913 pginfo->u.usr.next_nmap = 0;
1915 } else if (j >= number)
1920 pginfo->u.usr.next_chunk =
1921 list_prepare_entry(prev_chunk,
1922 (&(pginfo->u.usr.region->chunk_list)),
1927 static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1928 u32 number, u64 *kpage)
1931 struct ib_phys_buf *pbuf;
1932 u64 num_hw, offs_hw;
1935 /* loop over desired phys_buf_array entries */
1936 while (i < number) {
1937 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1938 num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
1939 pbuf->size, pginfo->hwpage_size);
1940 offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
1941 pginfo->hwpage_size;
1942 while (pginfo->next_hwpage < offs_hw + num_hw) {
1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1946 ehca_gen_err("kpage_cnt >= num_kpages, "
1947 "kpage_cnt=%llx num_kpages=%llx "
1949 "num_hwpages=%llx i=%x",
1953 pginfo->num_hwpages, i);
1956 *kpage = phys_to_abs(
1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
1958 (pginfo->next_hwpage * pginfo->hwpage_size));
1959 if ( !(*kpage) && pbuf->addr ) {
1960 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
1961 "next_hwpage=%llx", pbuf->addr,
1962 pbuf->size, pginfo->next_hwpage);
1965 (pginfo->hwpage_cnt)++;
1966 (pginfo->next_hwpage)++;
1967 if (PAGE_SIZE >= pginfo->hwpage_size) {
1968 if (pginfo->next_hwpage %
1969 (PAGE_SIZE / pginfo->hwpage_size) == 0)
1970 (pginfo->kpage_cnt)++;
1972 pginfo->kpage_cnt += pginfo->hwpage_size /
1976 if (i >= number) break;
1978 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1979 (pginfo->u.phy.next_buf)++;
1980 pginfo->next_hwpage = 0;
1986 static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1987 u32 number, u64 *kpage)
1993 /* loop over desired page_list entries */
1994 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1995 for (i = 0; i < number; i++) {
1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
1997 pginfo->next_hwpage * pginfo->hwpage_size);
1999 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
2000 "next_listelem=%llx next_hwpage=%llx",
2002 pginfo->u.fmr.next_listelem,
2003 pginfo->next_hwpage);
2006 (pginfo->hwpage_cnt)++;
2007 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
2008 if (pginfo->next_hwpage %
2009 (pginfo->u.fmr.fmr_pgsize /
2010 pginfo->hwpage_size) == 0) {
2011 (pginfo->kpage_cnt)++;
2012 (pginfo->u.fmr.next_listelem)++;
2014 pginfo->next_hwpage = 0;
2016 (pginfo->next_hwpage)++;
2018 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
2019 pginfo->u.fmr.fmr_pgsize;
2022 /* check if adrs are contiguous */
2023 for (j = 1; j < cnt_per_hwpage; j++) {
2024 u64 p = phys_to_abs(fmrlist[j] &
2025 ~(pginfo->hwpage_size - 1));
2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2027 ehca_gen_err("uncontiguous fmr pages "
2028 "found prev=%llx p=%llx "
2029 "idx=%x", prev, p, i + j);
2034 pginfo->kpage_cnt += cnt_per_hwpage;
2035 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
2036 fmrlist += cnt_per_hwpage;
2043 /* setup page buffer from page info */
2044 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
2050 switch (pginfo->type) {
2051 case EHCA_MR_PGI_PHYS:
2052 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
2054 case EHCA_MR_PGI_USER:
2055 ret = PAGE_SIZE >= pginfo->hwpage_size ?
2056 ehca_set_pagebuf_user1(pginfo, number, kpage) :
2057 ehca_set_pagebuf_user2(pginfo, number, kpage);
2059 case EHCA_MR_PGI_FMR:
2060 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
2063 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
2068 } /* end ehca_set_pagebuf() */
2070 /*----------------------------------------------------------------------*/
2073 * check MR if it is a max-MR, i.e. uses whole memory
2074 * in case it's a max-MR 1 is returned, else 0
2076 int ehca_mr_is_maxmr(u64 size,
2079 /* a MR is treated as max-MR only if it fits following: */
2080 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
2081 (iova_start == (void *)KERNELBASE)) {
2082 ehca_gen_dbg("this is a max-MR");
2086 } /* end ehca_mr_is_maxmr() */
2088 /*----------------------------------------------------------------------*/
2090 /* map access control for MR/MW. This routine is used for MR and MW. */
2091 void ehca_mrmw_map_acl(int ib_acl,
2095 if (ib_acl & IB_ACCESS_REMOTE_READ)
2096 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2097 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2098 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2099 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2100 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2101 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2102 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2103 if (ib_acl & IB_ACCESS_MW_BIND)
2104 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2105 } /* end ehca_mrmw_map_acl() */
2107 /*----------------------------------------------------------------------*/
2109 /* sets page size in hipz access control for MR/MW. */
2110 void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
2112 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
2113 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2115 /*----------------------------------------------------------------------*/
2118 * reverse map access control for MR/MW.
2119 * This routine is used for MR and MW.
2121 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2122 int *ib_acl) /*OUT*/
2125 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2126 *ib_acl |= IB_ACCESS_REMOTE_READ;
2127 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2128 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2129 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2130 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2131 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2132 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2133 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2134 *ib_acl |= IB_ACCESS_MW_BIND;
2135 } /* end ehca_mrmw_reverse_map_acl() */
2138 /*----------------------------------------------------------------------*/
2141 * MR destructor and constructor
2142 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2143 * except struct ib_mr and spinlock
2145 void ehca_mr_deletenew(struct ehca_mr *mr)
2149 mr->num_hwpages = 0;
2152 mr->fmr_page_size = 0;
2153 mr->fmr_max_pages = 0;
2154 mr->fmr_max_maps = 0;
2155 mr->fmr_map_cnt = 0;
2156 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2157 memset(&mr->galpas, 0, sizeof(mr->galpas));
2158 } /* end ehca_mr_deletenew() */
2160 int ehca_init_mrmw_cache(void)
2162 mr_cache = kmem_cache_create("ehca_cache_mr",
2163 sizeof(struct ehca_mr), 0,
2168 mw_cache = kmem_cache_create("ehca_cache_mw",
2169 sizeof(struct ehca_mw), 0,
2173 kmem_cache_destroy(mr_cache);
2180 void ehca_cleanup_mrmw_cache(void)
2183 kmem_cache_destroy(mr_cache);
2185 kmem_cache_destroy(mw_cache);