2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Firmware Infiniband Interface code for POWER
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 * Waleri Fomin <fomin@de.ibm.com>
12 * Copyright (c) 2005 IBM Corporation
14 * All rights reserved.
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm/hvcall.h>
46 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53 #define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54 #define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55 #define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56 #define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57 #define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58 #define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59 #define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60 #define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61 #define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62 #define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
64 #define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65 #define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66 #define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67 #define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
69 #define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70 #define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71 #define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72 #define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73 #define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
75 #define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76 #define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77 #define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78 #define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
80 #define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81 #define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
83 #define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84 #define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85 #define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
87 #define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88 #define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89 #define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
91 static DEFINE_SPINLOCK(hcall_lock);
93 static u32 get_longbusy_msecs(int longbusy_rc)
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
98 case H_LONG_BUSY_ORDER_10_MSEC:
100 case H_LONG_BUSY_ORDER_100_MSEC:
102 case H_LONG_BUSY_ORDER_1_SEC:
104 case H_LONG_BUSY_ORDER_10_SEC:
106 case H_LONG_BUSY_ORDER_100_SEC:
113 static long ehca_plpar_hcall_norets(unsigned long opcode,
124 unsigned long flags = 0;
126 if (unlikely(ehca_debug_level >= 2))
127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
130 for (i = 0; i < 5; i++) {
131 /* serialize hCalls to work around firmware issue */
132 if (ehca_lock_hcalls)
133 spin_lock_irqsave(&hcall_lock, flags);
135 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
138 if (ehca_lock_hcalls)
139 spin_unlock_irqrestore(&hcall_lock, flags);
141 if (H_IS_LONG_BUSY(ret)) {
142 sleep_msecs = get_longbusy_msecs(ret);
143 msleep_interruptible(sleep_msecs);
148 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
149 opcode, ret, arg1, arg2, arg3,
150 arg4, arg5, arg6, arg7);
152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
161 static long ehca_plpar_hcall9(unsigned long opcode,
162 unsigned long *outs, /* array of 9 outputs */
175 unsigned long flags = 0;
177 if (unlikely(ehca_debug_level >= 2))
178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
182 for (i = 0; i < 5; i++) {
183 /* serialize hCalls to work around firmware issue */
184 if (ehca_lock_hcalls)
185 spin_lock_irqsave(&hcall_lock, flags);
187 ret = plpar_hcall9(opcode, outs,
188 arg1, arg2, arg3, arg4, arg5,
189 arg6, arg7, arg8, arg9);
191 if (ehca_lock_hcalls)
192 spin_unlock_irqrestore(&hcall_lock, flags);
194 if (H_IS_LONG_BUSY(ret)) {
195 sleep_msecs = get_longbusy_msecs(ret);
196 msleep_interruptible(sleep_msecs);
200 if (ret < H_SUCCESS) {
201 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
202 opcode, arg1, arg2, arg3, arg4, arg5,
203 arg6, arg7, arg8, arg9);
204 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
205 ret, outs[0], outs[1], outs[2], outs[3],
206 outs[4], outs[5], outs[6], outs[7],
208 } else if (unlikely(ehca_debug_level >= 2))
209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
210 ret, outs[0], outs[1], outs[2], outs[3],
211 outs[4], outs[5], outs[6], outs[7],
219 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
220 struct ehca_pfeq *pfeq,
221 const u32 neq_control,
222 const u32 number_of_entries,
223 struct ipz_eq_handle *eq_handle,
224 u32 *act_nr_of_entries,
229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls;
233 allocate_controls = 3ULL;
235 /* ISN is associated */
236 if (neq_control != 1)
237 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
238 else /* notification event queue */
239 allocate_controls = (1ULL << 63) | allocate_controls;
241 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
242 adapter_handle.handle, /* r4 */
243 allocate_controls, /* r5 */
244 number_of_entries, /* r6 */
246 eq_handle->handle = outs[0];
247 *act_nr_of_entries = (u32)outs[3];
248 *act_pages = (u32)outs[4];
249 *eq_ist = (u32)outs[5];
251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
257 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
258 struct ipz_eq_handle eq_handle,
259 const u64 event_mask)
261 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
262 adapter_handle.handle, /* r4 */
263 eq_handle.handle, /* r5 */
268 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
270 struct ehca_alloc_cq_parms *param)
273 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
276 adapter_handle.handle, /* r4 */
278 param->eq_handle.handle, /* r6 */
280 param->nr_cqe, /* r8 */
282 cq->ipz_cq_handle.handle = outs[0];
283 param->act_nr_of_entries = (u32)outs[3];
284 param->act_pages = (u32)outs[4];
286 if (ret == H_SUCCESS)
287 hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%lli", ret);
295 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
296 struct ehca_alloc_qp_parms *parms, int is_user)
299 u64 allocate_controls, max_r10_reg, r11, r12;
300 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
304 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
305 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
306 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
307 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
308 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
309 parms->squeue.page_size)
310 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
311 parms->rqueue.page_size)
312 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
313 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
314 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
315 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
316 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
317 parms->ud_av_l_key_ctl)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
321 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
322 parms->squeue.max_wr + 1)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
324 parms->rqueue.max_wr + 1)
325 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
326 parms->squeue.max_sge)
327 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
328 parms->rqueue.max_sge);
330 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
332 if (parms->ext_type == EQPT_SRQ)
333 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
335 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
337 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
338 adapter_handle.handle, /* r4 */
339 allocate_controls, /* r5 */
340 parms->send_cq_handle.handle,
341 parms->recv_cq_handle.handle,
342 parms->eq_handle.handle,
343 ((u64)parms->token << 32) | parms->pd.value,
344 max_r10_reg, r11, r12);
346 parms->qp_handle.handle = outs[0];
347 parms->real_qp_num = (u32)outs[1];
348 parms->squeue.act_nr_wqes =
349 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
350 parms->rqueue.act_nr_wqes =
351 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
352 parms->squeue.act_nr_sges =
353 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
354 parms->rqueue.act_nr_sges =
355 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
356 parms->squeue.queue_size =
357 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
358 parms->rqueue.queue_size =
359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
361 if (ret == H_SUCCESS)
362 hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%lli", ret);
370 u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
372 struct hipz_query_port *query_port_response_block)
375 u64 r_cb = virt_to_abs(query_port_response_block);
377 if (r_cb & (EHCA_PAGESIZE-1)) {
378 ehca_gen_err("response block not page aligned");
382 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
383 adapter_handle.handle, /* r4 */
388 if (ehca_debug_level >= 2)
389 ehca_dmp(query_port_response_block, 64, "response_block");
394 u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
395 const u8 port_id, const u32 port_cap,
396 const u8 init_type, const int modify_mask)
398 u64 port_attributes = port_cap;
400 if (modify_mask & IB_PORT_SHUTDOWN)
401 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
402 if (modify_mask & IB_PORT_INIT_TYPE)
403 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
404 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
405 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
407 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
408 adapter_handle.handle, /* r4 */
410 port_attributes, /* r6 */
414 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
415 struct hipz_query_hca *query_hca_rblock)
417 u64 r_cb = virt_to_abs(query_hca_rblock);
419 if (r_cb & (EHCA_PAGESIZE-1)) {
420 ehca_gen_err("response_block=%p not page aligned",
425 return ehca_plpar_hcall_norets(H_QUERY_HCA,
426 adapter_handle.handle, /* r4 */
431 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
434 const u64 resource_handle,
435 const u64 logical_address_of_page,
438 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
439 adapter_handle.handle, /* r4 */
440 (u64)queue_type | ((u64)pagesize) << 8,
442 resource_handle, /* r6 */
443 logical_address_of_page, /* r7 */
448 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
449 const struct ipz_eq_handle eq_handle,
450 struct ehca_pfeq *pfeq,
453 const u64 logical_address_of_page,
457 ehca_gen_err("Ppage counter=%llx", count);
460 return hipz_h_register_rpage(adapter_handle,
464 logical_address_of_page, count);
467 u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
471 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
472 adapter_handle.handle, /* r4 */
476 if (ret != H_SUCCESS && ret != H_BUSY)
477 ehca_gen_err("Could not query interrupt state.");
482 u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
483 const struct ipz_cq_handle cq_handle,
484 struct ehca_pfcq *pfcq,
487 const u64 logical_address_of_page,
489 const struct h_galpa gal)
492 ehca_gen_err("Page counter=%llx", count);
496 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
497 cq_handle.handle, logical_address_of_page,
501 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
502 const struct ipz_qp_handle qp_handle,
503 struct ehca_pfqp *pfqp,
506 const u64 logical_address_of_page,
508 const struct h_galpa galpa)
511 ehca_gen_err("Page counter=%llx", count);
515 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
516 qp_handle.handle, logical_address_of_page,
520 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
521 const struct ipz_qp_handle qp_handle,
522 struct ehca_pfqp *pfqp,
523 void **log_addr_next_sq_wqe2processed,
524 void **log_addr_next_rq_wqe2processed,
525 int dis_and_get_function_code)
528 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
531 adapter_handle.handle, /* r4 */
532 dis_and_get_function_code, /* r5 */
533 qp_handle.handle, /* r6 */
535 if (log_addr_next_sq_wqe2processed)
536 *log_addr_next_sq_wqe2processed = (void *)outs[0];
537 if (log_addr_next_rq_wqe2processed)
538 *log_addr_next_rq_wqe2processed = (void *)outs[1];
543 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
544 const struct ipz_qp_handle qp_handle,
545 struct ehca_pfqp *pfqp,
546 const u64 update_mask,
547 struct hcp_modify_qp_control_block *mqpcb,
551 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
553 adapter_handle.handle, /* r4 */
554 qp_handle.handle, /* r5 */
555 update_mask, /* r6 */
556 virt_to_abs(mqpcb), /* r7 */
559 if (ret == H_NOT_ENOUGH_RESOURCES)
560 ehca_gen_err("Insufficient resources ret=%lli", ret);
565 u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
566 const struct ipz_qp_handle qp_handle,
567 struct ehca_pfqp *pfqp,
568 struct hcp_modify_qp_control_block *qqpcb,
571 return ehca_plpar_hcall_norets(H_QUERY_QP,
572 adapter_handle.handle, /* r4 */
573 qp_handle.handle, /* r5 */
574 virt_to_abs(qqpcb), /* r6 */
578 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
582 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
584 ret = hcp_galpas_dtor(&qp->galpas);
586 ehca_gen_err("Could not destruct qp->galpas");
589 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
590 adapter_handle.handle, /* r4 */
593 qp->ipz_qp_handle.handle, /* r6 */
595 if (ret == H_HARDWARE)
596 ehca_gen_err("HCA not operational. ret=%lli", ret);
598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
599 adapter_handle.handle, /* r4 */
600 qp->ipz_qp_handle.handle, /* r5 */
603 if (ret == H_RESOURCE)
604 ehca_gen_err("Resource still in use. ret=%lli", ret);
609 u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
610 const struct ipz_qp_handle qp_handle,
614 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
615 adapter_handle.handle, /* r4 */
616 qp_handle.handle, /* r5 */
621 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
622 const struct ipz_qp_handle qp_handle,
624 u32 port, u32 * pma_qp_nr,
628 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
631 adapter_handle.handle, /* r4 */
632 qp_handle.handle, /* r5 */
635 *pma_qp_nr = (u32)outs[0];
636 *bma_qp_nr = (u32)outs[1];
638 if (ret == H_ALIAS_EXIST)
639 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
644 u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
645 const struct ipz_qp_handle qp_handle,
648 u64 subnet_prefix, u64 interface_id)
652 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
653 adapter_handle.handle, /* r4 */
654 qp_handle.handle, /* r5 */
656 interface_id, /* r7 */
657 subnet_prefix, /* r8 */
660 if (ret == H_NOT_ENOUGH_RESOURCES)
661 ehca_gen_err("Not enough resources. ret=%lli", ret);
666 u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
667 const struct ipz_qp_handle qp_handle,
670 u64 subnet_prefix, u64 interface_id)
672 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
673 adapter_handle.handle, /* r4 */
674 qp_handle.handle, /* r5 */
676 interface_id, /* r7 */
677 subnet_prefix, /* r8 */
681 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
687 ret = hcp_galpas_dtor(&cq->galpas);
689 ehca_gen_err("Could not destruct cp->galpas");
693 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
694 adapter_handle.handle, /* r4 */
695 cq->ipz_cq_handle.handle, /* r5 */
696 force_flag != 0 ? 1L : 0L, /* r6 */
699 if (ret == H_RESOURCE)
700 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
705 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
710 ret = hcp_galpas_dtor(&eq->galpas);
712 ehca_gen_err("Could not destruct eq->galpas");
716 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
717 adapter_handle.handle, /* r4 */
718 eq->ipz_eq_handle.handle, /* r5 */
721 if (ret == H_RESOURCE)
722 ehca_gen_err("Resource in use. ret=%lli ", ret);
727 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
728 const struct ehca_mr *mr,
731 const u32 access_ctrl,
732 const struct ipz_pd pd,
733 struct ehca_mr_hipzout_parms *outparms)
736 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
739 adapter_handle.handle, /* r4 */
743 (((u64)access_ctrl) << 32ULL), /* r8 */
746 outparms->handle.handle = outs[0];
747 outparms->lkey = (u32)outs[2];
748 outparms->rkey = (u32)outs[3];
753 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
754 const struct ehca_mr *mr,
757 const u64 logical_address_of_page,
762 if (unlikely(ehca_debug_level >= 3)) {
766 kpage = (u64 *)abs_to_virt(logical_address_of_page);
767 for (i = 0; i < count; i++)
768 ehca_gen_dbg("kpage[%d]=%p",
769 i, (void *)kpage[i]);
771 ehca_gen_dbg("kpage=%p",
772 (void *)logical_address_of_page);
775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
776 ehca_gen_err("logical_address_of_page not on a 4k boundary "
777 "adapter_handle=%llx mr=%p mr_handle=%llx "
778 "pagesize=%x queue_type=%x "
779 "logical_address_of_page=%llx count=%llx",
780 adapter_handle.handle, mr,
781 mr->ipz_mr_handle.handle, pagesize, queue_type,
782 logical_address_of_page, count);
785 ret = hipz_h_register_rpage(adapter_handle, pagesize,
787 mr->ipz_mr_handle.handle,
788 logical_address_of_page, count);
792 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
793 const struct ehca_mr *mr,
794 struct ehca_mr_hipzout_parms *outparms)
797 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
800 adapter_handle.handle, /* r4 */
801 mr->ipz_mr_handle.handle, /* r5 */
802 0, 0, 0, 0, 0, 0, 0);
803 outparms->len = outs[0];
804 outparms->vaddr = outs[1];
805 outparms->acl = outs[4] >> 32;
806 outparms->lkey = (u32)(outs[5] >> 32);
807 outparms->rkey = (u32)(outs[5] & (0xffffffff));
812 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
813 const struct ehca_mr *mr)
815 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
816 adapter_handle.handle, /* r4 */
817 mr->ipz_mr_handle.handle, /* r5 */
821 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
822 const struct ehca_mr *mr,
825 const u32 access_ctrl,
826 const struct ipz_pd pd,
827 const u64 mr_addr_cb,
828 struct ehca_mr_hipzout_parms *outparms)
831 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
834 adapter_handle.handle, /* r4 */
835 mr->ipz_mr_handle.handle, /* r5 */
839 ((((u64)access_ctrl) << 32ULL) | pd.value),
842 outparms->vaddr = outs[1];
843 outparms->lkey = (u32)outs[2];
844 outparms->rkey = (u32)outs[3];
849 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
850 const struct ehca_mr *mr,
851 const struct ehca_mr *orig_mr,
853 const u32 access_ctrl,
854 const struct ipz_pd pd,
855 struct ehca_mr_hipzout_parms *outparms)
858 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
861 adapter_handle.handle, /* r4 */
862 orig_mr->ipz_mr_handle.handle, /* r5 */
864 (((u64)access_ctrl) << 32ULL), /* r7 */
867 outparms->handle.handle = outs[0];
868 outparms->lkey = (u32)outs[2];
869 outparms->rkey = (u32)outs[3];
874 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
875 const struct ehca_mw *mw,
876 const struct ipz_pd pd,
877 struct ehca_mw_hipzout_parms *outparms)
880 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
883 adapter_handle.handle, /* r4 */
887 outparms->handle.handle = outs[0];
888 outparms->rkey = (u32)outs[3];
893 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
894 const struct ehca_mw *mw,
895 struct ehca_mw_hipzout_parms *outparms)
898 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
901 adapter_handle.handle, /* r4 */
902 mw->ipz_mw_handle.handle, /* r5 */
903 0, 0, 0, 0, 0, 0, 0);
904 outparms->rkey = (u32)outs[3];
909 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
910 const struct ehca_mw *mw)
912 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
913 adapter_handle.handle, /* r4 */
914 mw->ipz_mw_handle.handle, /* r5 */
918 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
919 const u64 ressource_handle,
921 unsigned long *byte_count)
923 u64 r_cb = virt_to_abs(rblock);
925 if (r_cb & (EHCA_PAGESIZE-1)) {
926 ehca_gen_err("rblock not page aligned.");
930 return ehca_plpar_hcall_norets(H_ERROR_DATA,
931 adapter_handle.handle,
937 u64 hipz_h_eoi(int irq)
942 xirr = (0xffULL << 24) | irq;
944 return plpar_hcall_norets(H_EOI, xirr);