2 * linux/drivers/net/ehea/ehea_phyp.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include "ehea_phyp.h"
32 static inline u16 get_order_of_qentries(u16 queue_entries)
34 u8 ld = 1; /* logarithmus dualis */
35 while (((1U << ld) - 1) < queue_entries)
40 /* Defines for H_CALL H_ALLOC_RESOURCE */
41 #define H_ALL_RES_TYPE_QP 1
42 #define H_ALL_RES_TYPE_CQ 2
43 #define H_ALL_RES_TYPE_EQ 3
44 #define H_ALL_RES_TYPE_MR 5
45 #define H_ALL_RES_TYPE_MW 6
47 static long ehea_plpar_hcall_norets(unsigned long opcode,
59 for (i = 0; i < 5; i++) {
60 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
63 if (H_IS_LONG_BUSY(ret)) {
64 sleep_msecs = get_longbusy_msecs(ret);
65 msleep_interruptible(sleep_msecs);
70 ehea_error("opcode=%lx ret=%lx"
71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
72 " arg5=%lx arg6=%lx arg7=%lx ",
74 arg1, arg2, arg3, arg4, arg5,
83 static long ehea_plpar_hcall9(unsigned long opcode,
84 unsigned long *outs, /* array of 9 outputs */
98 for (i = 0; i < 5; i++) {
99 ret = plpar_hcall9(opcode, outs,
100 arg1, arg2, arg3, arg4, arg5,
101 arg6, arg7, arg8, arg9);
103 if (H_IS_LONG_BUSY(ret)) {
104 sleep_msecs = get_longbusy_msecs(ret);
105 msleep_interruptible(sleep_msecs);
110 ehea_error("opcode=%lx ret=%lx"
111 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
112 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
114 " out1=%lx out2=%lx out3=%lx out4=%lx"
115 " out5=%lx out6=%lx out7=%lx out8=%lx"
118 arg1, arg2, arg3, arg4, arg5,
119 arg6, arg7, arg8, arg9,
120 outs[0], outs[1], outs[2], outs[3],
121 outs[4], outs[5], outs[6], outs[7],
130 u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
131 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
133 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
134 adapter_handle, /* R4 */
135 qp_category, /* R5 */
138 virt_to_abs(cb_addr), /* R8 */
143 #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
144 #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
145 #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
146 #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
147 #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
148 #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
149 #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
150 #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
151 #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
152 #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
155 #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
156 #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
158 /* input param R10 */
159 #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
160 #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
161 #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
162 #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
163 /* Max Send Scatter Gather Elements */
164 #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
165 #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
166 /* Max Receive SG Elements RQ1 */
167 #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
168 #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
170 /* input param R11 */
171 #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
172 /* max swqe immediate data length */
173 #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
175 /* input param R12 */
176 #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
178 #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
181 /* output param R6 */
182 #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
183 #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
184 #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
185 #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
187 /* output param, R7 */
188 #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
189 #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
190 #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
191 #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
192 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
194 /* output param R8,R9 */
195 #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
196 #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
197 #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
198 #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
200 /* output param R11,R12 */
201 #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
202 #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
203 #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
204 #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
206 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
207 struct ehea_qp_init_attr *init_attr, const u32 pd,
208 u64 *qp_handle, struct h_epas *h_epas)
211 u64 outs[PLPAR_HCALL9_BUFSIZE];
213 u64 allocate_controls =
214 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
215 | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
216 | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
217 | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
218 | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
219 | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
220 | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
221 | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
222 | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
224 u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
225 | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
228 EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
229 get_order_of_qentries(init_attr->max_nr_send_wqes))
230 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
231 get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
232 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
233 get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
234 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
235 get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
236 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
237 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
238 init_attr->wqe_size_enc_rq1)
239 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
240 init_attr->wqe_size_enc_rq2)
241 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
242 init_attr->wqe_size_enc_rq3);
245 EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
246 | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
248 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
249 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
251 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
253 adapter_handle, /* R4 */
254 allocate_controls, /* R5 */
255 init_attr->send_cq_handle, /* R6 */
256 init_attr->recv_cq_handle, /* R7 */
257 init_attr->aff_eq_handle, /* R8 */
259 max_r10_reg, /* R10 */
261 threshold); /* R12 */
263 *qp_handle = outs[0];
264 init_attr->qp_nr = (u32)outs[1];
266 init_attr->act_nr_send_wqes =
267 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
268 init_attr->act_nr_rwqes_rq1 =
269 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
270 init_attr->act_nr_rwqes_rq2 =
271 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
272 init_attr->act_nr_rwqes_rq3 =
273 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
275 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
276 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
277 init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
278 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
280 init_attr->nr_sq_pages =
281 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
282 init_attr->nr_rq1_pages =
283 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
284 init_attr->nr_rq2_pages =
285 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
286 init_attr->nr_rq3_pages =
287 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
289 init_attr->liobn_sq =
290 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
291 init_attr->liobn_rq1 =
292 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
293 init_attr->liobn_rq2 =
294 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
295 init_attr->liobn_rq3 =
296 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
299 hcp_epas_ctor(h_epas, outs[6], outs[6]);
304 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
305 struct ehea_cq_attr *cq_attr,
306 u64 *cq_handle, struct h_epas *epas)
309 u64 outs[PLPAR_HCALL9_BUFSIZE];
311 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
313 adapter_handle, /* R4 */
314 H_ALL_RES_TYPE_CQ, /* R5 */
315 cq_attr->eq_handle, /* R6 */
316 cq_attr->cq_token, /* R7 */
317 cq_attr->max_nr_of_cqes, /* R8 */
318 0, 0, 0, 0); /* R9-R12 */
320 *cq_handle = outs[0];
321 cq_attr->act_nr_of_cqes = outs[3];
322 cq_attr->nr_pages = outs[4];
325 hcp_epas_ctor(epas, outs[5], outs[6]);
330 /* Defines for H_CALL H_ALLOC_RESOURCE */
331 #define H_ALL_RES_TYPE_QP 1
332 #define H_ALL_RES_TYPE_CQ 2
333 #define H_ALL_RES_TYPE_EQ 3
334 #define H_ALL_RES_TYPE_MR 5
335 #define H_ALL_RES_TYPE_MW 6
338 #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
339 #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
340 #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
341 #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
343 #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
345 /* output param R6 */
346 #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
348 /* output param R7 */
349 #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
351 /* output param R8 */
352 #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
354 /* output param R9 */
355 #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
356 #define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
358 /* output param R10 */
359 #define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
361 /* output param R11 */
362 #define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
364 /* output param R12 */
365 #define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
367 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
368 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
370 u64 hret, allocate_controls;
371 u64 outs[PLPAR_HCALL9_BUFSIZE];
375 EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
376 | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
377 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
378 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
380 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
382 adapter_handle, /* R4 */
383 allocate_controls, /* R5 */
384 eq_attr->max_nr_of_eqes, /* R6 */
385 0, 0, 0, 0, 0, 0); /* R7-R10 */
387 *eq_handle = outs[0];
388 eq_attr->act_nr_of_eqes = outs[3];
389 eq_attr->nr_pages = outs[4];
390 eq_attr->ist1 = outs[5];
391 eq_attr->ist2 = outs[6];
392 eq_attr->ist3 = outs[7];
393 eq_attr->ist4 = outs[8];
398 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
399 const u64 qp_handle, const u64 sel_mask,
400 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
401 u16 *out_swr, u16 *out_rwr)
404 u64 outs[PLPAR_HCALL9_BUFSIZE];
406 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
408 adapter_handle, /* R4 */
412 virt_to_abs(cb_addr), /* R8 */
413 0, 0, 0, 0); /* R9-R12 */
415 *inv_attr_id = outs[0];
418 *proc_mask = outs[5];
423 u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
424 const u8 queue_type, const u64 resource_handle,
425 const u64 log_pageaddr, u64 count)
429 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
430 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
432 return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
433 adapter_handle, /* R4 */
434 reg_control, /* R5 */
435 resource_handle, /* R6 */
436 log_pageaddr, /* R7 */
441 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
442 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
446 u64 outs[PLPAR_HCALL9_BUFSIZE];
448 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
450 adapter_handle , /* R4 */
451 orig_mr_handle, /* R5 */
453 (((u64)access_ctrl) << 32ULL), /* R7 */
455 0, 0, 0, 0); /* R9-R12 */
457 mr->handle = outs[0];
458 mr->lkey = (u32)outs[2];
463 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
465 u64 outs[PLPAR_HCALL9_BUFSIZE];
467 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
469 adapter_handle, /* R4 */
470 H_DISABLE_GET_EHEA_WQE_P, /* R5 */
472 0, 0, 0, 0, 0, 0); /* R7-R12 */
475 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
477 return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
478 adapter_handle, /* R4 */
480 0, 0, 0, 0, 0); /* R6-R10 */
483 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
484 const u64 length, const u32 access_ctrl,
485 const u32 pd, u64 *mr_handle, u32 *lkey)
488 u64 outs[PLPAR_HCALL9_BUFSIZE];
490 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
492 adapter_handle, /* R4 */
496 (((u64) access_ctrl) << 32ULL), /* R8 */
498 0, 0, 0); /* R10-R12 */
500 *mr_handle = outs[0];
501 *lkey = (u32)outs[2];
505 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
506 const u8 pagesize, const u8 queue_type,
507 const u64 log_pageaddr, const u64 count)
509 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
510 ehea_error("not on pageboundary");
514 return ehea_h_register_rpage(adapter_handle, pagesize,
515 queue_type, mr_handle,
516 log_pageaddr, count);
519 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
521 u64 hret, cb_logaddr;
523 cb_logaddr = virt_to_abs(cb_addr);
525 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
526 adapter_handle, /* R4 */
528 0, 0, 0, 0, 0); /* R6-R10 */
530 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
535 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
536 const u8 cb_cat, const u64 select_mask,
540 u64 cb_logaddr = virt_to_abs(cb_addr);
543 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
544 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
546 return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
547 adapter_handle, /* R4 */
549 select_mask, /* R6 */
555 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
556 const u8 cb_cat, const u64 select_mask,
559 u64 outs[PLPAR_HCALL9_BUFSIZE];
562 u64 cb_logaddr = virt_to_abs(cb_addr);
564 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
565 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
567 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
569 return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
571 adapter_handle, /* R4 */
573 select_mask, /* R6 */
576 0, 0, 0, 0); /* R9-R12 */
579 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
580 const u8 reg_type, const u64 mc_mac_addr,
581 const u16 vlan_id, const u32 hcall_id)
583 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
584 u64 mac_addr = mc_mac_addr >> 16;
586 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
587 r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
588 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
589 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
591 return ehea_plpar_hcall_norets(hcall_id,
592 adapter_handle, /* R4 */
593 r5_port_num, /* R5 */
594 r6_reg_type, /* R6 */
595 r7_mc_mac_addr, /* R7 */
600 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
601 const u64 event_mask)
603 return ehea_plpar_hcall_norets(H_RESET_EVENTS,
604 adapter_handle, /* R4 */
607 0, 0, 0, 0); /* R7-R12 */