2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
44 #include <asm/hvcall.h>
45 #include "ehca_tools.h"
49 #include "ipz_pt_fn.h"
51 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
52 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
53 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
54 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
55 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
56 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
57 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
58 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
60 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
61 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
62 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
63 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
65 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
67 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
68 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
70 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
73 /* direct access qp controls */
74 #define DAQP_CTRL_ENABLE 0x01
75 #define DAQP_CTRL_SEND_COMP 0x20
76 #define DAQP_CTRL_RECV_COMP 0x40
78 static u32 get_longbusy_msecs(int longbusy_rc)
80 switch (longbusy_rc) {
81 case H_LONG_BUSY_ORDER_1_MSEC:
83 case H_LONG_BUSY_ORDER_10_MSEC:
85 case H_LONG_BUSY_ORDER_100_MSEC:
87 case H_LONG_BUSY_ORDER_1_SEC:
89 case H_LONG_BUSY_ORDER_10_SEC:
91 case H_LONG_BUSY_ORDER_100_SEC:
98 static long ehca_plpar_hcall_norets(unsigned long opcode,
110 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
111 "arg5=%lx arg6=%lx arg7=%lx",
112 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
114 for (i = 0; i < 5; i++) {
115 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
118 if (H_IS_LONG_BUSY(ret)) {
119 sleep_msecs = get_longbusy_msecs(ret);
120 msleep_interruptible(sleep_msecs);
125 ehca_gen_err("opcode=%lx ret=%lx"
126 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
127 " arg5=%lx arg6=%lx arg7=%lx ",
129 arg1, arg2, arg3, arg4, arg5,
132 ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret);
140 static long ehca_plpar_hcall9(unsigned long opcode,
141 unsigned long *outs, /* array of 9 outputs */
155 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
156 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
157 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
160 for (i = 0; i < 5; i++) {
161 ret = plpar_hcall9(opcode, outs,
162 arg1, arg2, arg3, arg4, arg5,
163 arg6, arg7, arg8, arg9);
165 if (H_IS_LONG_BUSY(ret)) {
166 sleep_msecs = get_longbusy_msecs(ret);
167 msleep_interruptible(sleep_msecs);
172 ehca_gen_err("opcode=%lx ret=%lx"
173 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
174 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
176 " out1=%lx out2=%lx out3=%lx out4=%lx"
177 " out5=%lx out6=%lx out7=%lx out8=%lx"
180 arg1, arg2, arg3, arg4, arg5,
181 arg6, arg7, arg8, arg9,
182 outs[0], outs[1], outs[2], outs[3],
183 outs[4], outs[5], outs[6], outs[7],
186 ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
187 "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
189 opcode, ret, outs[0], outs[1], outs[2], outs[3],
190 outs[4], outs[5], outs[6], outs[7], outs[8]);
197 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
198 struct ehca_pfeq *pfeq,
199 const u32 neq_control,
200 const u32 number_of_entries,
201 struct ipz_eq_handle *eq_handle,
202 u32 *act_nr_of_entries,
207 u64 outs[PLPAR_HCALL9_BUFSIZE];
208 u64 allocate_controls;
211 allocate_controls = 3ULL;
213 /* ISN is associated */
214 if (neq_control != 1)
215 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
216 else /* notification event queue */
217 allocate_controls = (1ULL << 63) | allocate_controls;
219 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
220 adapter_handle.handle, /* r4 */
221 allocate_controls, /* r5 */
222 number_of_entries, /* r6 */
224 eq_handle->handle = outs[0];
225 *act_nr_of_entries = (u32)outs[3];
226 *act_pages = (u32)outs[4];
227 *eq_ist = (u32)outs[5];
229 if (ret == H_NOT_ENOUGH_RESOURCES)
230 ehca_gen_err("Not enough resource - ret=%lx ", ret);
235 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
236 struct ipz_eq_handle eq_handle,
237 const u64 event_mask)
239 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
240 adapter_handle.handle, /* r4 */
241 eq_handle.handle, /* r5 */
246 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
248 struct ehca_alloc_cq_parms *param)
251 u64 outs[PLPAR_HCALL9_BUFSIZE];
253 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
254 adapter_handle.handle, /* r4 */
256 param->eq_handle.handle, /* r6 */
258 param->nr_cqe, /* r8 */
260 cq->ipz_cq_handle.handle = outs[0];
261 param->act_nr_of_entries = (u32)outs[3];
262 param->act_pages = (u32)outs[4];
264 if (ret == H_SUCCESS)
265 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
267 if (ret == H_NOT_ENOUGH_RESOURCES)
268 ehca_gen_err("Not enough resources. ret=%lx", ret);
273 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
275 struct ehca_alloc_qp_parms *parms)
278 u64 allocate_controls;
280 u64 outs[PLPAR_HCALL9_BUFSIZE];
281 u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
282 u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
283 int daqp_ctrl = parms->daqp_ctrl;
286 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
287 (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
288 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
289 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
290 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
291 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
292 (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
293 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
294 (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
295 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
296 parms->ud_av_l_key_ctl)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
300 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
302 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
304 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
306 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
307 parms->max_recv_sge);
309 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
310 adapter_handle.handle, /* r4 */
311 allocate_controls, /* r5 */
312 qp->send_cq->ipz_cq_handle.handle,
313 qp->recv_cq->ipz_cq_handle.handle,
314 parms->ipz_eq_handle.handle,
315 ((u64)qp->token << 32) | parms->pd.value,
316 max_r10_reg, /* r10 */
317 parms->ud_av_l_key_ctl, /* r11 */
319 qp->ipz_qp_handle.handle = outs[0];
320 qp->real_qp_num = (u32)outs[1];
321 parms->act_nr_send_sges =
322 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
323 parms->act_nr_recv_wqes =
324 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
325 parms->act_nr_send_sges =
326 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
327 parms->act_nr_recv_sges =
328 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
330 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
332 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
334 if (ret == H_SUCCESS)
335 hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);
337 if (ret == H_NOT_ENOUGH_RESOURCES)
338 ehca_gen_err("Not enough resources. ret=%lx", ret);
343 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
345 struct hipz_query_port *query_port_response_block)
348 u64 r_cb = virt_to_abs(query_port_response_block);
350 if (r_cb & (EHCA_PAGESIZE-1)) {
351 ehca_gen_err("response block not page aligned");
355 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
356 adapter_handle.handle, /* r4 */
361 if (ehca_debug_level)
362 ehca_dmp(query_port_response_block, 64, "response_block");
367 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
368 struct hipz_query_hca *query_hca_rblock)
370 u64 r_cb = virt_to_abs(query_hca_rblock);
372 if (r_cb & (EHCA_PAGESIZE-1)) {
373 ehca_gen_err("response_block=%p not page aligned",
378 return ehca_plpar_hcall_norets(H_QUERY_HCA,
379 adapter_handle.handle, /* r4 */
384 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
387 const u64 resource_handle,
388 const u64 logical_address_of_page,
391 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
392 adapter_handle.handle, /* r4 */
393 queue_type | pagesize << 8, /* r5 */
394 resource_handle, /* r6 */
395 logical_address_of_page, /* r7 */
400 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
401 const struct ipz_eq_handle eq_handle,
402 struct ehca_pfeq *pfeq,
405 const u64 logical_address_of_page,
409 ehca_gen_err("Ppage counter=%lx", count);
412 return hipz_h_register_rpage(adapter_handle,
416 logical_address_of_page, count);
419 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
423 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
424 adapter_handle.handle, /* r4 */
428 if (ret != H_SUCCESS && ret != H_BUSY)
429 ehca_gen_err("Could not query interrupt state.");
434 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
435 const struct ipz_cq_handle cq_handle,
436 struct ehca_pfcq *pfcq,
439 const u64 logical_address_of_page,
441 const struct h_galpa gal)
444 ehca_gen_err("Page counter=%lx", count);
448 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
449 cq_handle.handle, logical_address_of_page,
453 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
454 const struct ipz_qp_handle qp_handle,
455 struct ehca_pfqp *pfqp,
458 const u64 logical_address_of_page,
460 const struct h_galpa galpa)
463 ehca_gen_err("Page counter=%lx", count);
467 return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
468 qp_handle.handle,logical_address_of_page,
472 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
473 const struct ipz_qp_handle qp_handle,
474 struct ehca_pfqp *pfqp,
475 void **log_addr_next_sq_wqe2processed,
476 void **log_addr_next_rq_wqe2processed,
477 int dis_and_get_function_code)
480 u64 outs[PLPAR_HCALL9_BUFSIZE];
482 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
483 adapter_handle.handle, /* r4 */
484 dis_and_get_function_code, /* r5 */
485 qp_handle.handle, /* r6 */
487 if (log_addr_next_sq_wqe2processed)
488 *log_addr_next_sq_wqe2processed = (void*)outs[0];
489 if (log_addr_next_rq_wqe2processed)
490 *log_addr_next_rq_wqe2processed = (void*)outs[1];
495 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
496 const struct ipz_qp_handle qp_handle,
497 struct ehca_pfqp *pfqp,
498 const u64 update_mask,
499 struct hcp_modify_qp_control_block *mqpcb,
503 u64 outs[PLPAR_HCALL9_BUFSIZE];
504 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
505 adapter_handle.handle, /* r4 */
506 qp_handle.handle, /* r5 */
507 update_mask, /* r6 */
508 virt_to_abs(mqpcb), /* r7 */
511 if (ret == H_NOT_ENOUGH_RESOURCES)
512 ehca_gen_err("Insufficient resources ret=%lx", ret);
517 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
518 const struct ipz_qp_handle qp_handle,
519 struct ehca_pfqp *pfqp,
520 struct hcp_modify_qp_control_block *qqpcb,
523 return ehca_plpar_hcall_norets(H_QUERY_QP,
524 adapter_handle.handle, /* r4 */
525 qp_handle.handle, /* r5 */
526 virt_to_abs(qqpcb), /* r6 */
530 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
534 u64 outs[PLPAR_HCALL9_BUFSIZE];
536 ret = hcp_galpas_dtor(&qp->galpas);
538 ehca_gen_err("Could not destruct qp->galpas");
541 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
542 adapter_handle.handle, /* r4 */
545 qp->ipz_qp_handle.handle, /* r6 */
547 if (ret == H_HARDWARE)
548 ehca_gen_err("HCA not operational. ret=%lx", ret);
550 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
551 adapter_handle.handle, /* r4 */
552 qp->ipz_qp_handle.handle, /* r5 */
555 if (ret == H_RESOURCE)
556 ehca_gen_err("Resource still in use. ret=%lx", ret);
561 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
562 const struct ipz_qp_handle qp_handle,
566 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
567 adapter_handle.handle, /* r4 */
568 qp_handle.handle, /* r5 */
573 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
574 const struct ipz_qp_handle qp_handle,
576 u32 port, u32 * pma_qp_nr,
580 u64 outs[PLPAR_HCALL9_BUFSIZE];
582 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
583 adapter_handle.handle, /* r4 */
584 qp_handle.handle, /* r5 */
587 *pma_qp_nr = (u32)outs[0];
588 *bma_qp_nr = (u32)outs[1];
590 if (ret == H_ALIAS_EXIST)
591 ehca_gen_err("AQP1 already exists. ret=%lx", ret);
596 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
597 const struct ipz_qp_handle qp_handle,
600 u64 subnet_prefix, u64 interface_id)
604 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
605 adapter_handle.handle, /* r4 */
606 qp_handle.handle, /* r5 */
608 interface_id, /* r7 */
609 subnet_prefix, /* r8 */
612 if (ret == H_NOT_ENOUGH_RESOURCES)
613 ehca_gen_err("Not enough resources. ret=%lx", ret);
618 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
619 const struct ipz_qp_handle qp_handle,
622 u64 subnet_prefix, u64 interface_id)
624 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
625 adapter_handle.handle, /* r4 */
626 qp_handle.handle, /* r5 */
628 interface_id, /* r7 */
629 subnet_prefix, /* r8 */
633 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
639 ret = hcp_galpas_dtor(&cq->galpas);
641 ehca_gen_err("Could not destruct cp->galpas");
645 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
646 adapter_handle.handle, /* r4 */
647 cq->ipz_cq_handle.handle, /* r5 */
648 force_flag != 0 ? 1L : 0L, /* r6 */
651 if (ret == H_RESOURCE)
652 ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
657 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
662 ret = hcp_galpas_dtor(&eq->galpas);
664 ehca_gen_err("Could not destruct eq->galpas");
668 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
669 adapter_handle.handle, /* r4 */
670 eq->ipz_eq_handle.handle, /* r5 */
673 if (ret == H_RESOURCE)
674 ehca_gen_err("Resource in use. ret=%lx ", ret);
679 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
680 const struct ehca_mr *mr,
683 const u32 access_ctrl,
684 const struct ipz_pd pd,
685 struct ehca_mr_hipzout_parms *outparms)
688 u64 outs[PLPAR_HCALL9_BUFSIZE];
690 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
691 adapter_handle.handle, /* r4 */
695 (((u64)access_ctrl) << 32ULL), /* r8 */
698 outparms->handle.handle = outs[0];
699 outparms->lkey = (u32)outs[2];
700 outparms->rkey = (u32)outs[3];
705 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
706 const struct ehca_mr *mr,
709 const u64 logical_address_of_page,
714 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
715 ehca_gen_err("logical_address_of_page not on a 4k boundary "
716 "adapter_handle=%lx mr=%p mr_handle=%lx "
717 "pagesize=%x queue_type=%x "
718 "logical_address_of_page=%lx count=%lx",
719 adapter_handle.handle, mr,
720 mr->ipz_mr_handle.handle, pagesize, queue_type,
721 logical_address_of_page, count);
724 ret = hipz_h_register_rpage(adapter_handle, pagesize,
726 mr->ipz_mr_handle.handle,
727 logical_address_of_page, count);
731 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
732 const struct ehca_mr *mr,
733 struct ehca_mr_hipzout_parms *outparms)
736 u64 outs[PLPAR_HCALL9_BUFSIZE];
738 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
739 adapter_handle.handle, /* r4 */
740 mr->ipz_mr_handle.handle, /* r5 */
741 0, 0, 0, 0, 0, 0, 0);
742 outparms->len = outs[0];
743 outparms->vaddr = outs[1];
744 outparms->acl = outs[4] >> 32;
745 outparms->lkey = (u32)(outs[5] >> 32);
746 outparms->rkey = (u32)(outs[5] & (0xffffffff));
751 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
752 const struct ehca_mr *mr)
754 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
755 adapter_handle.handle, /* r4 */
756 mr->ipz_mr_handle.handle, /* r5 */
760 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
761 const struct ehca_mr *mr,
764 const u32 access_ctrl,
765 const struct ipz_pd pd,
766 const u64 mr_addr_cb,
767 struct ehca_mr_hipzout_parms *outparms)
770 u64 outs[PLPAR_HCALL9_BUFSIZE];
772 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
773 adapter_handle.handle, /* r4 */
774 mr->ipz_mr_handle.handle, /* r5 */
778 ((((u64)access_ctrl) << 32ULL) | pd.value),
781 outparms->vaddr = outs[1];
782 outparms->lkey = (u32)outs[2];
783 outparms->rkey = (u32)outs[3];
788 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
789 const struct ehca_mr *mr,
790 const struct ehca_mr *orig_mr,
792 const u32 access_ctrl,
793 const struct ipz_pd pd,
794 struct ehca_mr_hipzout_parms *outparms)
797 u64 outs[PLPAR_HCALL9_BUFSIZE];
799 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
800 adapter_handle.handle, /* r4 */
801 orig_mr->ipz_mr_handle.handle, /* r5 */
803 (((u64)access_ctrl) << 32ULL), /* r7 */
806 outparms->handle.handle = outs[0];
807 outparms->lkey = (u32)outs[2];
808 outparms->rkey = (u32)outs[3];
813 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
814 const struct ehca_mw *mw,
815 const struct ipz_pd pd,
816 struct ehca_mw_hipzout_parms *outparms)
819 u64 outs[PLPAR_HCALL9_BUFSIZE];
821 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
822 adapter_handle.handle, /* r4 */
826 outparms->handle.handle = outs[0];
827 outparms->rkey = (u32)outs[3];
832 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
833 const struct ehca_mw *mw,
834 struct ehca_mw_hipzout_parms *outparms)
837 u64 outs[PLPAR_HCALL9_BUFSIZE];
839 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
840 adapter_handle.handle, /* r4 */
841 mw->ipz_mw_handle.handle, /* r5 */
842 0, 0, 0, 0, 0, 0, 0);
843 outparms->rkey = (u32)outs[3];
848 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
849 const struct ehca_mw *mw)
851 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
852 adapter_handle.handle, /* r4 */
853 mw->ipz_mw_handle.handle, /* r5 */
857 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
858 const u64 ressource_handle,
860 unsigned long *byte_count)
862 u64 r_cb = virt_to_abs(rblock);
864 if (r_cb & (EHCA_PAGESIZE-1)) {
865 ehca_gen_err("rblock not page aligned.");
869 return ehca_plpar_hcall_norets(H_ERROR_DATA,
870 adapter_handle.handle,