2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
56 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
57 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
58 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
59 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
61 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
62 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
63 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
64 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
66 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
67 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
68 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
69 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
70 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
72 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
73 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
74 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
75 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
77 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
78 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
80 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
81 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
82 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
84 static DEFINE_SPINLOCK(hcall_lock);
86 static u32 get_longbusy_msecs(int longbusy_rc)
88 switch (longbusy_rc) {
89 case H_LONG_BUSY_ORDER_1_MSEC:
91 case H_LONG_BUSY_ORDER_10_MSEC:
93 case H_LONG_BUSY_ORDER_100_MSEC:
95 case H_LONG_BUSY_ORDER_1_SEC:
97 case H_LONG_BUSY_ORDER_10_SEC:
99 case H_LONG_BUSY_ORDER_100_SEC:
106 static long ehca_plpar_hcall_norets(unsigned long opcode,
118 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
119 "arg5=%lx arg6=%lx arg7=%lx",
120 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
122 for (i = 0; i < 5; i++) {
123 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
126 if (H_IS_LONG_BUSY(ret)) {
127 sleep_msecs = get_longbusy_msecs(ret);
128 msleep_interruptible(sleep_msecs);
133 ehca_gen_err("opcode=%lx ret=%lx"
134 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
135 " arg5=%lx arg6=%lx arg7=%lx ",
137 arg1, arg2, arg3, arg4, arg5,
140 ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret);
148 static long ehca_plpar_hcall9(unsigned long opcode,
149 unsigned long *outs, /* array of 9 outputs */
161 int i, sleep_msecs, lock_is_set = 0;
162 unsigned long flags = 0;
164 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
165 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
166 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
169 for (i = 0; i < 5; i++) {
170 if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
171 spin_lock_irqsave(&hcall_lock, flags);
175 ret = plpar_hcall9(opcode, outs,
176 arg1, arg2, arg3, arg4, arg5,
177 arg6, arg7, arg8, arg9);
180 spin_unlock_irqrestore(&hcall_lock, flags);
182 if (H_IS_LONG_BUSY(ret)) {
183 sleep_msecs = get_longbusy_msecs(ret);
184 msleep_interruptible(sleep_msecs);
189 ehca_gen_err("opcode=%lx ret=%lx"
190 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
191 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
193 " out1=%lx out2=%lx out3=%lx out4=%lx"
194 " out5=%lx out6=%lx out7=%lx out8=%lx"
197 arg1, arg2, arg3, arg4, arg5,
198 arg6, arg7, arg8, arg9,
199 outs[0], outs[1], outs[2], outs[3],
200 outs[4], outs[5], outs[6], outs[7],
203 ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
204 "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
206 opcode, ret, outs[0], outs[1], outs[2], outs[3],
207 outs[4], outs[5], outs[6], outs[7], outs[8]);
214 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
215 struct ehca_pfeq *pfeq,
216 const u32 neq_control,
217 const u32 number_of_entries,
218 struct ipz_eq_handle *eq_handle,
219 u32 *act_nr_of_entries,
224 u64 outs[PLPAR_HCALL9_BUFSIZE];
225 u64 allocate_controls;
228 allocate_controls = 3ULL;
230 /* ISN is associated */
231 if (neq_control != 1)
232 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
233 else /* notification event queue */
234 allocate_controls = (1ULL << 63) | allocate_controls;
236 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
237 adapter_handle.handle, /* r4 */
238 allocate_controls, /* r5 */
239 number_of_entries, /* r6 */
241 eq_handle->handle = outs[0];
242 *act_nr_of_entries = (u32)outs[3];
243 *act_pages = (u32)outs[4];
244 *eq_ist = (u32)outs[5];
246 if (ret == H_NOT_ENOUGH_RESOURCES)
247 ehca_gen_err("Not enough resource - ret=%lx ", ret);
252 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
253 struct ipz_eq_handle eq_handle,
254 const u64 event_mask)
256 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
257 adapter_handle.handle, /* r4 */
258 eq_handle.handle, /* r5 */
263 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
265 struct ehca_alloc_cq_parms *param)
268 u64 outs[PLPAR_HCALL9_BUFSIZE];
270 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
271 adapter_handle.handle, /* r4 */
273 param->eq_handle.handle, /* r6 */
275 param->nr_cqe, /* r8 */
277 cq->ipz_cq_handle.handle = outs[0];
278 param->act_nr_of_entries = (u32)outs[3];
279 param->act_pages = (u32)outs[4];
281 if (ret == H_SUCCESS)
282 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
284 if (ret == H_NOT_ENOUGH_RESOURCES)
285 ehca_gen_err("Not enough resources. ret=%lx", ret);
290 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
291 struct ehca_alloc_qp_parms *parms)
294 u64 allocate_controls, max_r10_reg, r11, r12;
295 u64 outs[PLPAR_HCALL9_BUFSIZE];
298 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
299 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
300 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
302 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
303 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
304 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
305 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
306 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
307 parms->ud_av_l_key_ctl)
308 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
311 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
312 parms->max_send_wr + 1)
313 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
314 parms->max_recv_wr + 1)
315 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
317 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
318 parms->max_recv_sge);
320 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
322 if (parms->ext_type == EQPT_SRQ)
323 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
325 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
327 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
328 adapter_handle.handle, /* r4 */
329 allocate_controls, /* r5 */
330 parms->send_cq_handle.handle,
331 parms->recv_cq_handle.handle,
332 parms->eq_handle.handle,
333 ((u64)parms->token << 32) | parms->pd.value,
334 max_r10_reg, r11, r12);
336 parms->qp_handle.handle = outs[0];
337 parms->real_qp_num = (u32)outs[1];
338 parms->act_nr_send_wqes =
339 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
340 parms->act_nr_recv_wqes =
341 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
342 parms->act_nr_send_sges =
343 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
344 parms->act_nr_recv_sges =
345 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
347 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
349 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
351 if (ret == H_SUCCESS)
352 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
354 if (ret == H_NOT_ENOUGH_RESOURCES)
355 ehca_gen_err("Not enough resources. ret=%lx", ret);
360 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
362 struct hipz_query_port *query_port_response_block)
365 u64 r_cb = virt_to_abs(query_port_response_block);
367 if (r_cb & (EHCA_PAGESIZE-1)) {
368 ehca_gen_err("response block not page aligned");
372 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
373 adapter_handle.handle, /* r4 */
378 if (ehca_debug_level)
379 ehca_dmp(query_port_response_block, 64, "response_block");
384 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
385 const u8 port_id, const u32 port_cap,
386 const u8 init_type, const int modify_mask)
388 u64 port_attributes = port_cap;
390 if (modify_mask & IB_PORT_SHUTDOWN)
391 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
392 if (modify_mask & IB_PORT_INIT_TYPE)
393 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
394 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
395 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
397 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
398 adapter_handle.handle, /* r4 */
400 port_attributes, /* r6 */
404 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
405 struct hipz_query_hca *query_hca_rblock)
407 u64 r_cb = virt_to_abs(query_hca_rblock);
409 if (r_cb & (EHCA_PAGESIZE-1)) {
410 ehca_gen_err("response_block=%p not page aligned",
415 return ehca_plpar_hcall_norets(H_QUERY_HCA,
416 adapter_handle.handle, /* r4 */
421 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
424 const u64 resource_handle,
425 const u64 logical_address_of_page,
428 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
429 adapter_handle.handle, /* r4 */
430 queue_type | pagesize << 8, /* r5 */
431 resource_handle, /* r6 */
432 logical_address_of_page, /* r7 */
437 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
438 const struct ipz_eq_handle eq_handle,
439 struct ehca_pfeq *pfeq,
442 const u64 logical_address_of_page,
446 ehca_gen_err("Ppage counter=%lx", count);
449 return hipz_h_register_rpage(adapter_handle,
453 logical_address_of_page, count);
456 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
460 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
461 adapter_handle.handle, /* r4 */
465 if (ret != H_SUCCESS && ret != H_BUSY)
466 ehca_gen_err("Could not query interrupt state.");
471 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
472 const struct ipz_cq_handle cq_handle,
473 struct ehca_pfcq *pfcq,
476 const u64 logical_address_of_page,
478 const struct h_galpa gal)
481 ehca_gen_err("Page counter=%lx", count);
485 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
486 cq_handle.handle, logical_address_of_page,
490 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
491 const struct ipz_qp_handle qp_handle,
492 struct ehca_pfqp *pfqp,
495 const u64 logical_address_of_page,
497 const struct h_galpa galpa)
500 ehca_gen_err("Page counter=%lx", count);
504 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
505 qp_handle.handle, logical_address_of_page,
509 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
510 const struct ipz_qp_handle qp_handle,
511 struct ehca_pfqp *pfqp,
512 void **log_addr_next_sq_wqe2processed,
513 void **log_addr_next_rq_wqe2processed,
514 int dis_and_get_function_code)
517 u64 outs[PLPAR_HCALL9_BUFSIZE];
519 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
520 adapter_handle.handle, /* r4 */
521 dis_and_get_function_code, /* r5 */
522 qp_handle.handle, /* r6 */
524 if (log_addr_next_sq_wqe2processed)
525 *log_addr_next_sq_wqe2processed = (void *)outs[0];
526 if (log_addr_next_rq_wqe2processed)
527 *log_addr_next_rq_wqe2processed = (void *)outs[1];
532 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
533 const struct ipz_qp_handle qp_handle,
534 struct ehca_pfqp *pfqp,
535 const u64 update_mask,
536 struct hcp_modify_qp_control_block *mqpcb,
540 u64 outs[PLPAR_HCALL9_BUFSIZE];
541 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
542 adapter_handle.handle, /* r4 */
543 qp_handle.handle, /* r5 */
544 update_mask, /* r6 */
545 virt_to_abs(mqpcb), /* r7 */
548 if (ret == H_NOT_ENOUGH_RESOURCES)
549 ehca_gen_err("Insufficient resources ret=%lx", ret);
554 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
555 const struct ipz_qp_handle qp_handle,
556 struct ehca_pfqp *pfqp,
557 struct hcp_modify_qp_control_block *qqpcb,
560 return ehca_plpar_hcall_norets(H_QUERY_QP,
561 adapter_handle.handle, /* r4 */
562 qp_handle.handle, /* r5 */
563 virt_to_abs(qqpcb), /* r6 */
567 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
571 u64 outs[PLPAR_HCALL9_BUFSIZE];
573 ret = hcp_galpas_dtor(&qp->galpas);
575 ehca_gen_err("Could not destruct qp->galpas");
578 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
579 adapter_handle.handle, /* r4 */
582 qp->ipz_qp_handle.handle, /* r6 */
584 if (ret == H_HARDWARE)
585 ehca_gen_err("HCA not operational. ret=%lx", ret);
587 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
588 adapter_handle.handle, /* r4 */
589 qp->ipz_qp_handle.handle, /* r5 */
592 if (ret == H_RESOURCE)
593 ehca_gen_err("Resource still in use. ret=%lx", ret);
598 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
599 const struct ipz_qp_handle qp_handle,
603 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
604 adapter_handle.handle, /* r4 */
605 qp_handle.handle, /* r5 */
610 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
611 const struct ipz_qp_handle qp_handle,
613 u32 port, u32 * pma_qp_nr,
617 u64 outs[PLPAR_HCALL9_BUFSIZE];
619 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
620 adapter_handle.handle, /* r4 */
621 qp_handle.handle, /* r5 */
624 *pma_qp_nr = (u32)outs[0];
625 *bma_qp_nr = (u32)outs[1];
627 if (ret == H_ALIAS_EXIST)
628 ehca_gen_err("AQP1 already exists. ret=%lx", ret);
633 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
634 const struct ipz_qp_handle qp_handle,
637 u64 subnet_prefix, u64 interface_id)
641 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
642 adapter_handle.handle, /* r4 */
643 qp_handle.handle, /* r5 */
645 interface_id, /* r7 */
646 subnet_prefix, /* r8 */
649 if (ret == H_NOT_ENOUGH_RESOURCES)
650 ehca_gen_err("Not enough resources. ret=%lx", ret);
655 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
656 const struct ipz_qp_handle qp_handle,
659 u64 subnet_prefix, u64 interface_id)
661 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
662 adapter_handle.handle, /* r4 */
663 qp_handle.handle, /* r5 */
665 interface_id, /* r7 */
666 subnet_prefix, /* r8 */
670 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
676 ret = hcp_galpas_dtor(&cq->galpas);
678 ehca_gen_err("Could not destruct cp->galpas");
682 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
683 adapter_handle.handle, /* r4 */
684 cq->ipz_cq_handle.handle, /* r5 */
685 force_flag != 0 ? 1L : 0L, /* r6 */
688 if (ret == H_RESOURCE)
689 ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
694 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
699 ret = hcp_galpas_dtor(&eq->galpas);
701 ehca_gen_err("Could not destruct eq->galpas");
705 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
706 adapter_handle.handle, /* r4 */
707 eq->ipz_eq_handle.handle, /* r5 */
710 if (ret == H_RESOURCE)
711 ehca_gen_err("Resource in use. ret=%lx ", ret);
716 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
717 const struct ehca_mr *mr,
720 const u32 access_ctrl,
721 const struct ipz_pd pd,
722 struct ehca_mr_hipzout_parms *outparms)
725 u64 outs[PLPAR_HCALL9_BUFSIZE];
727 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
728 adapter_handle.handle, /* r4 */
732 (((u64)access_ctrl) << 32ULL), /* r8 */
735 outparms->handle.handle = outs[0];
736 outparms->lkey = (u32)outs[2];
737 outparms->rkey = (u32)outs[3];
742 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
743 const struct ehca_mr *mr,
746 const u64 logical_address_of_page,
751 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
752 ehca_gen_err("logical_address_of_page not on a 4k boundary "
753 "adapter_handle=%lx mr=%p mr_handle=%lx "
754 "pagesize=%x queue_type=%x "
755 "logical_address_of_page=%lx count=%lx",
756 adapter_handle.handle, mr,
757 mr->ipz_mr_handle.handle, pagesize, queue_type,
758 logical_address_of_page, count);
761 ret = hipz_h_register_rpage(adapter_handle, pagesize,
763 mr->ipz_mr_handle.handle,
764 logical_address_of_page, count);
768 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
769 const struct ehca_mr *mr,
770 struct ehca_mr_hipzout_parms *outparms)
773 u64 outs[PLPAR_HCALL9_BUFSIZE];
775 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
776 adapter_handle.handle, /* r4 */
777 mr->ipz_mr_handle.handle, /* r5 */
778 0, 0, 0, 0, 0, 0, 0);
779 outparms->len = outs[0];
780 outparms->vaddr = outs[1];
781 outparms->acl = outs[4] >> 32;
782 outparms->lkey = (u32)(outs[5] >> 32);
783 outparms->rkey = (u32)(outs[5] & (0xffffffff));
788 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
789 const struct ehca_mr *mr)
791 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
792 adapter_handle.handle, /* r4 */
793 mr->ipz_mr_handle.handle, /* r5 */
797 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
798 const struct ehca_mr *mr,
801 const u32 access_ctrl,
802 const struct ipz_pd pd,
803 const u64 mr_addr_cb,
804 struct ehca_mr_hipzout_parms *outparms)
807 u64 outs[PLPAR_HCALL9_BUFSIZE];
809 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
810 adapter_handle.handle, /* r4 */
811 mr->ipz_mr_handle.handle, /* r5 */
815 ((((u64)access_ctrl) << 32ULL) | pd.value),
818 outparms->vaddr = outs[1];
819 outparms->lkey = (u32)outs[2];
820 outparms->rkey = (u32)outs[3];
825 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
826 const struct ehca_mr *mr,
827 const struct ehca_mr *orig_mr,
829 const u32 access_ctrl,
830 const struct ipz_pd pd,
831 struct ehca_mr_hipzout_parms *outparms)
834 u64 outs[PLPAR_HCALL9_BUFSIZE];
836 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
837 adapter_handle.handle, /* r4 */
838 orig_mr->ipz_mr_handle.handle, /* r5 */
840 (((u64)access_ctrl) << 32ULL), /* r7 */
843 outparms->handle.handle = outs[0];
844 outparms->lkey = (u32)outs[2];
845 outparms->rkey = (u32)outs[3];
850 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
851 const struct ehca_mw *mw,
852 const struct ipz_pd pd,
853 struct ehca_mw_hipzout_parms *outparms)
856 u64 outs[PLPAR_HCALL9_BUFSIZE];
858 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
859 adapter_handle.handle, /* r4 */
863 outparms->handle.handle = outs[0];
864 outparms->rkey = (u32)outs[3];
869 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
870 const struct ehca_mw *mw,
871 struct ehca_mw_hipzout_parms *outparms)
874 u64 outs[PLPAR_HCALL9_BUFSIZE];
876 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
877 adapter_handle.handle, /* r4 */
878 mw->ipz_mw_handle.handle, /* r5 */
879 0, 0, 0, 0, 0, 0, 0);
880 outparms->rkey = (u32)outs[3];
885 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
886 const struct ehca_mw *mw)
888 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
889 adapter_handle.handle, /* r4 */
890 mw->ipz_mw_handle.handle, /* r5 */
894 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
895 const u64 ressource_handle,
897 unsigned long *byte_count)
899 u64 r_cb = virt_to_abs(rblock);
901 if (r_cb & (EHCA_PAGESIZE-1)) {
902 ehca_gen_err("rblock not page aligned.");
906 return ehca_plpar_hcall_norets(H_ERROR_DATA,
907 adapter_handle.handle,