2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) sn2-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <asm/uncached.h>
19 #include <asm/sn/sn_sal.h>
22 static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */
23 static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */
25 /* SH_IPI_ACCESS shub register value on startup */
26 static u64 xpc_sh1_IPI_access;
27 static u64 xpc_sh2_IPI_access0;
28 static u64 xpc_sh2_IPI_access1;
29 static u64 xpc_sh2_IPI_access2;
30 static u64 xpc_sh2_IPI_access3;
33 * Change protections to allow IPI operations.
36 xpc_allow_IPI_ops_sn2(void)
41 /* >>> The following should get moved into SAL. */
44 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
46 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
48 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
50 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
52 for_each_online_node(node) {
53 nasid = cnodeid_to_nasid(node);
54 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
56 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
58 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
60 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
65 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
67 for_each_online_node(node) {
68 nasid = cnodeid_to_nasid(node);
69 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
76 * Restrict protections to disallow IPI operations.
79 xpc_disallow_IPI_ops_sn2(void)
84 /* >>> The following should get moved into SAL. */
86 for_each_online_node(node) {
87 nasid = cnodeid_to_nasid(node);
88 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
90 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
92 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
94 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
98 for_each_online_node(node) {
99 nasid = cnodeid_to_nasid(node);
100 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
107 * The following set of macros and functions are used for the sending and
108 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
109 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
110 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
114 xpc_IPI_receive_sn2(AMO_t *amo)
116 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
119 static enum xp_retval
120 xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
123 unsigned long irq_flags;
125 local_irq_save(irq_flags);
127 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
128 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
131 * We must always use the nofault function regardless of whether we
132 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
133 * didn't, we'd never know that the other partition is down and would
134 * keep sending IPIs and AMOs to it until the heartbeat times out.
136 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
137 xp_nofault_PIOR_target));
139 local_irq_restore(irq_flags);
141 return ((ret == 0) ? xpSuccess : xpPioReadError);
145 xpc_IPI_init_sn2(int index)
147 AMO_t *amo = xpc_vars->amos_page + index;
149 (void)xpc_IPI_receive_sn2(amo); /* clear AMO variable */
154 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
158 * Notify the heartbeat check thread that an activate IRQ has been received.
161 xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
163 atomic_inc(&xpc_activate_IRQ_rcvd);
164 wake_up_interruptible(&xpc_activate_IRQ_wq);
169 * Flag the appropriate AMO variable and send an IPI to the specified node.
172 xpc_activate_IRQ_send_sn2(u64 amos_page_pa, int from_nasid, int to_nasid,
175 int w_index = XPC_NASID_W_INDEX(from_nasid);
176 int b_index = XPC_NASID_B_INDEX(from_nasid);
177 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
178 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
180 (void)xpc_IPI_send_sn2(&amos[w_index], (1UL << b_index), to_nasid,
181 to_phys_cpuid, SGI_XPC_ACTIVATE);
185 xpc_activate_IRQ_send_local_sn2(int from_nasid)
187 int w_index = XPC_NASID_W_INDEX(from_nasid);
188 int b_index = XPC_NASID_B_INDEX(from_nasid);
189 AMO_t *amos = (AMO_t *)__va(xpc_vars->amos_page_pa +
190 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
192 /* fake the sending and receipt of an activate IRQ from remote nasid */
193 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR,
195 atomic_inc(&xpc_activate_IRQ_rcvd);
196 wake_up_interruptible(&xpc_activate_IRQ_wq);
200 * IPIs associated with SGI_XPC_NOTIFY IRQ.
204 * Check to see if there is any channel activity to/from the specified
208 xpc_check_for_channel_activity_sn2(struct xpc_partition *part)
211 unsigned long irq_flags;
213 IPI_amo = xpc_IPI_receive_sn2(part->sn.sn2.local_IPI_amo_va);
217 spin_lock_irqsave(&part->IPI_lock, irq_flags);
218 part->local_IPI_amo |= IPI_amo;
219 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
221 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
222 XPC_PARTID(part), IPI_amo);
224 xpc_wakeup_channel_mgr(part);
228 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
229 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
230 * than one partition, we use an AMO_t structure per partition to indicate
231 * whether a partition has sent an IPI or not. If it has, then wake up the
232 * associated kthread to handle it.
234 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
235 * running on other partitions.
237 * Noteworthy Arguments:
239 * irq - Interrupt ReQuest number. NOT USED.
241 * dev_id - partid of IPI's potential sender.
244 xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
246 short partid = (short)(u64)dev_id;
247 struct xpc_partition *part = &xpc_partitions[partid];
249 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
251 if (xpc_part_ref(part)) {
252 xpc_check_for_channel_activity_sn2(part);
254 xpc_part_deref(part);
260 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IPIs on the floor
261 * because the write to their associated IPI amo completed after the IRQ/IPI
265 xpc_dropped_notify_IRQ_check_sn2(struct xpc_partition *part)
267 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
269 if (xpc_part_ref(part)) {
270 xpc_check_for_channel_activity_sn2(part);
272 part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
273 XPC_P_DROPPED_IPI_WAIT_INTERVAL;
274 add_timer(&part_sn2->dropped_notify_IRQ_timer);
275 xpc_part_deref(part);
280 * Send an IPI to the remote partition that is associated with the
284 xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag,
285 char *ipi_flag_string, unsigned long *irq_flags)
287 struct xpc_partition *part = &xpc_partitions[ch->partid];
288 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
291 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
292 ret = xpc_IPI_send_sn2(part_sn2->remote_IPI_amo_va,
293 (u64)ipi_flag << (ch->number * 8),
294 part_sn2->remote_IPI_nasid,
295 part_sn2->remote_IPI_phys_cpuid,
297 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
298 ipi_flag_string, ch->partid, ch->number, ret);
299 if (unlikely(ret != xpSuccess)) {
300 if (irq_flags != NULL)
301 spin_unlock_irqrestore(&ch->lock, *irq_flags);
302 XPC_DEACTIVATE_PARTITION(part, ret);
303 if (irq_flags != NULL)
304 spin_lock_irqsave(&ch->lock, *irq_flags);
309 #define XPC_NOTIFY_IRQ_SEND_SN2(_ch, _ipi_f, _irq_f) \
310 xpc_notify_IRQ_send_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
313 * Make it look like the remote partition, which is associated with the
314 * specified channel, sent us an IPI. This faked IPI will be handled
315 * by xpc_dropped_notify_IRQ_check_sn2().
318 xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag,
319 char *ipi_flag_string)
321 struct xpc_partition *part = &xpc_partitions[ch->partid];
323 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_IPI_amo_va->variable),
324 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
325 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
326 ipi_flag_string, ch->partid, ch->number);
329 #define XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(_ch, _ipi_f) \
330 xpc_notify_IRQ_send_local_sn2(_ch, _ipi_f, #_ipi_f)
333 xpc_send_channel_closerequest_sn2(struct xpc_channel *ch,
334 unsigned long *irq_flags)
336 struct xpc_openclose_args *args = ch->local_openclose_args;
338 args->reason = ch->reason;
339 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
343 xpc_send_channel_closereply_sn2(struct xpc_channel *ch,
344 unsigned long *irq_flags)
346 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREPLY, irq_flags);
350 xpc_send_channel_openrequest_sn2(struct xpc_channel *ch,
351 unsigned long *irq_flags)
353 struct xpc_openclose_args *args = ch->local_openclose_args;
355 args->msg_size = ch->msg_size;
356 args->local_nentries = ch->local_nentries;
357 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREQUEST, irq_flags);
361 xpc_send_channel_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
363 struct xpc_openclose_args *args = ch->local_openclose_args;
365 args->remote_nentries = ch->remote_nentries;
366 args->local_nentries = ch->local_nentries;
367 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
368 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREPLY, irq_flags);
372 xpc_send_channel_msgrequest_sn2(struct xpc_channel *ch)
374 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_MSGREQUEST, NULL);
378 xpc_send_channel_local_msgrequest_sn2(struct xpc_channel *ch)
380 XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(ch, XPC_IPI_MSGREQUEST);
384 * This next set of functions are used to keep track of when a partition is
385 * potentially engaged in accessing memory belonging to another partition.
389 xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
391 unsigned long irq_flags;
392 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa +
393 (XPC_ENGAGED_PARTITIONS_AMO *
396 local_irq_save(irq_flags);
398 /* set bit corresponding to our partid in remote partition's AMO */
399 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
400 (1UL << sn_partition_id));
402 * We must always use the nofault function regardless of whether we
403 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
404 * didn't, we'd never know that the other partition is down and would
405 * keep sending IPIs and AMOs to it until the heartbeat times out.
407 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
409 xp_nofault_PIOR_target));
411 local_irq_restore(irq_flags);
415 xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
417 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
418 unsigned long irq_flags;
419 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa +
420 (XPC_ENGAGED_PARTITIONS_AMO *
423 local_irq_save(irq_flags);
425 /* clear bit corresponding to our partid in remote partition's AMO */
426 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
427 ~(1UL << sn_partition_id));
429 * We must always use the nofault function regardless of whether we
430 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
431 * didn't, we'd never know that the other partition is down and would
432 * keep sending IPIs and AMOs to it until the heartbeat times out.
434 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
436 xp_nofault_PIOR_target));
438 local_irq_restore(irq_flags);
441 * Send activate IRQ to get other side to see that we've cleared our
442 * bit in their engaged partitions AMO.
444 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
446 part_sn2->activate_IRQ_nasid,
447 part_sn2->activate_IRQ_phys_cpuid);
451 xpc_partition_engaged_sn2(short partid)
453 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
455 /* our partition's AMO variable ANDed with partid mask */
456 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
457 (1UL << partid)) != 0;
461 xpc_any_partition_engaged_sn2(void)
463 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
465 /* our partition's AMO variable */
466 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
470 xpc_assume_partition_disengaged_sn2(short partid)
472 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
474 /* clear bit(s) based on partid mask in our partition's AMO */
475 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
479 /* original protection values for each node */
480 static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
483 * Change protections to allow AMO operations on non-Shub 1.1 systems.
485 static enum xp_retval
486 xpc_allow_AMO_ops_sn2(AMO_t *amos_page)
492 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
493 * collides with memory operations. On those systems we call
494 * xpc_allow_AMO_ops_shub_wars_1_1_sn2() instead.
496 if (!enable_shub_wars_1_1()) {
497 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
498 SN_MEMPROT_ACCESS_CLASS_1,
507 * Change protections to allow AMO operations on Shub 1.1 systems.
510 xpc_allow_AMO_ops_shub_wars_1_1_sn2(void)
515 if (!enable_shub_wars_1_1())
518 for_each_online_node(node) {
519 nasid = cnodeid_to_nasid(node);
520 /* save current protection values */
521 xpc_prot_vec_sn2[node] =
522 (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
523 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
524 /* open up everything */
525 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
526 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
528 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
529 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
534 static enum xp_retval
535 xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
541 xpc_vars = XPC_RP_VARS(rp);
543 rp->sn.vars_pa = __pa(xpc_vars);
545 /* vars_part array follows immediately after vars */
546 xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
550 * Before clearing xpc_vars, see if a page of AMOs had been previously
551 * allocated. If not we'll need to allocate one and set permissions
552 * so that cross-partition AMOs are allowed.
554 * The allocated AMO page needs MCA reporting to remain disabled after
555 * XPC has unloaded. To make this work, we keep a copy of the pointer
556 * to this page (i.e., amos_page) in the struct xpc_vars structure,
557 * which is pointed to by the reserved page, and re-use that saved copy
558 * on subsequent loads of XPC. This AMO page is never freed, and its
559 * memory protections are never restricted.
561 amos_page = xpc_vars->amos_page;
562 if (amos_page == NULL) {
563 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1));
564 if (amos_page == NULL) {
565 dev_err(xpc_part, "can't allocate page of AMOs\n");
570 * Open up AMO-R/W to cpu. This is done on Shub 1.1 systems
571 * when xpc_allow_AMO_ops_shub_wars_1_1_sn2() is called.
573 ret = xpc_allow_AMO_ops_sn2(amos_page);
574 if (ret != xpSuccess) {
575 dev_err(xpc_part, "can't allow AMO operations\n");
576 uncached_free_page(__IA64_UNCACHED_OFFSET |
577 TO_PHYS((u64)amos_page), 1);
583 memset(xpc_vars, 0, sizeof(struct xpc_vars_sn2));
585 xpc_vars->version = XPC_V_VERSION;
586 xpc_vars->activate_IRQ_nasid = cpuid_to_nasid(0);
587 xpc_vars->activate_IRQ_phys_cpuid = cpu_physical_id(0);
588 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
589 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
590 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
592 /* clear xpc_vars_part */
593 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) *
596 /* initialize the activate IRQ related AMO variables */
597 for (i = 0; i < xp_nasid_mask_words; i++)
598 (void)xpc_IPI_init_sn2(XPC_ACTIVATE_IRQ_AMOS + i);
600 /* initialize the engaged remote partitions related AMO variables */
601 (void)xpc_IPI_init_sn2(XPC_ENGAGED_PARTITIONS_AMO);
602 (void)xpc_IPI_init_sn2(XPC_DEACTIVATE_REQUEST_AMO);
608 xpc_increment_heartbeat_sn2(void)
610 xpc_vars->heartbeat++;
614 xpc_offline_heartbeat_sn2(void)
616 xpc_increment_heartbeat_sn2();
617 xpc_vars->heartbeat_offline = 1;
621 xpc_online_heartbeat_sn2(void)
623 xpc_increment_heartbeat_sn2();
624 xpc_vars->heartbeat_offline = 0;
628 xpc_heartbeat_init_sn2(void)
630 DBUG_ON(xpc_vars == NULL);
632 bitmap_zero(xpc_vars->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
633 xpc_heartbeating_to_mask = &xpc_vars->heartbeating_to_mask[0];
634 xpc_online_heartbeat_sn2();
638 xpc_heartbeat_exit_sn2(void)
640 xpc_offline_heartbeat_sn2();
644 * At periodic intervals, scan through all active partitions and ensure
645 * their heartbeat is still active. If not, the partition is deactivated.
648 xpc_check_remote_hb_sn2(void)
650 struct xpc_vars_sn2 *remote_vars;
651 struct xpc_partition *part;
655 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer;
657 for (partid = 0; partid < xp_max_npartitions; partid++) {
662 if (partid == sn_partition_id)
665 part = &xpc_partitions[partid];
667 if (part->act_state == XPC_P_INACTIVE ||
668 part->act_state == XPC_P_DEACTIVATING) {
672 /* pull the remote_hb cache line */
673 ret = xp_remote_memcpy(remote_vars,
674 (void *)part->sn.sn2.remote_vars_pa,
676 if (ret != xpSuccess) {
677 XPC_DEACTIVATE_PARTITION(part, ret);
681 dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
682 " = %ld, heartbeat_offline = %ld, HB_mask[0] = 0x%lx\n",
683 partid, remote_vars->heartbeat, part->last_heartbeat,
684 remote_vars->heartbeat_offline,
685 remote_vars->heartbeating_to_mask[0]);
687 if (((remote_vars->heartbeat == part->last_heartbeat) &&
688 (remote_vars->heartbeat_offline == 0)) ||
689 !xpc_hb_allowed(sn_partition_id,
690 &remote_vars->heartbeating_to_mask)) {
692 XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
696 part->last_heartbeat = remote_vars->heartbeat;
701 * Get a copy of the remote partition's XPC variables from the reserved page.
703 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
704 * assumed to be of size XPC_RP_VARS_SIZE.
706 static enum xp_retval
707 xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars)
711 if (remote_vars_pa == 0)
714 /* pull over the cross partition variables */
715 ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa,
717 if (ret != xpSuccess)
720 if (XPC_VERSION_MAJOR(remote_vars->version) !=
721 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
729 xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
730 u64 remote_rp_pa, int nasid)
732 xpc_activate_IRQ_send_local_sn2(nasid);
736 xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
738 xpc_activate_IRQ_send_local_sn2(part->sn.sn2.activate_IRQ_nasid);
742 xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
744 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
745 unsigned long irq_flags;
746 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa +
747 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t)));
749 local_irq_save(irq_flags);
751 /* set bit corresponding to our partid in remote partition's AMO */
752 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
753 (1UL << sn_partition_id));
755 * We must always use the nofault function regardless of whether we
756 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
757 * didn't, we'd never know that the other partition is down and would
758 * keep sending IPIs and AMOs to it until the heartbeat times out.
760 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
762 xp_nofault_PIOR_target));
764 local_irq_restore(irq_flags);
767 * Send activate IRQ to get other side to see that we've set our
768 * bit in their deactivate request AMO.
770 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
772 part_sn2->activate_IRQ_nasid,
773 part_sn2->activate_IRQ_phys_cpuid);
777 xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
779 unsigned long irq_flags;
780 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa +
781 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t)));
783 local_irq_save(irq_flags);
785 /* clear bit corresponding to our partid in remote partition's AMO */
786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
787 ~(1UL << sn_partition_id));
789 * We must always use the nofault function regardless of whether we
790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
791 * didn't, we'd never know that the other partition is down and would
792 * keep sending IPIs and AMOs to it until the heartbeat times out.
794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
796 xp_nofault_PIOR_target));
798 local_irq_restore(irq_flags);
802 xpc_partition_deactivation_requested_sn2(short partid)
804 AMO_t *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO;
806 /* our partition's AMO variable ANDed with partid mask */
807 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
808 (1UL << partid)) != 0;
812 * Update the remote partition's info.
815 xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
816 unsigned long *remote_rp_stamp, u64 remote_rp_pa,
818 struct xpc_vars_sn2 *remote_vars)
820 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
822 part->remote_rp_version = remote_rp_version;
823 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
824 part->remote_rp_version);
826 part->remote_rp_stamp = *remote_rp_stamp;
827 dev_dbg(xpc_part, " remote_rp_stamp = 0x%016lx\n",
828 part->remote_rp_stamp);
830 part->remote_rp_pa = remote_rp_pa;
831 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
833 part_sn2->remote_vars_pa = remote_vars_pa;
834 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
835 part_sn2->remote_vars_pa);
837 part->last_heartbeat = remote_vars->heartbeat;
838 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
839 part->last_heartbeat);
841 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
842 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
843 part_sn2->remote_vars_part_pa);
845 part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
846 dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
847 part_sn2->activate_IRQ_nasid);
849 part_sn2->activate_IRQ_phys_cpuid =
850 remote_vars->activate_IRQ_phys_cpuid;
851 dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
852 part_sn2->activate_IRQ_phys_cpuid);
854 part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
855 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
856 part_sn2->remote_amos_page_pa);
858 part_sn2->remote_vars_version = remote_vars->version;
859 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
860 part_sn2->remote_vars_version);
864 * Prior code has determined the nasid which generated an IPI. Inspect
865 * that nasid to determine if its partition needs to be activated or
868 * A partition is consider "awaiting activation" if our partition
869 * flags indicate it is not active and it has a heartbeat. A
870 * partition is considered "awaiting deactivation" if our partition
871 * flags indicate it is active but it has no heartbeat or it is not
872 * sending its heartbeat to us.
874 * To determine the heartbeat, the remote nasid must have a properly
875 * initialized reserved page.
878 xpc_identify_activate_IRQ_req_sn2(int nasid)
880 struct xpc_rsvd_page *remote_rp;
881 struct xpc_vars_sn2 *remote_vars;
884 int remote_rp_version;
886 unsigned long remote_rp_stamp = 0;
888 struct xpc_partition *part;
889 struct xpc_partition_sn2 *part_sn2;
892 /* pull over the reserved page structure */
894 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
896 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
897 if (ret != xpSuccess) {
898 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
899 "which sent interrupt, reason=%d\n", nasid, ret);
903 remote_vars_pa = remote_rp->sn.vars_pa;
904 remote_rp_version = remote_rp->version;
905 remote_rp_stamp = remote_rp->stamp;
907 partid = remote_rp->SAL_partid;
908 part = &xpc_partitions[partid];
909 part_sn2 = &part->sn.sn2;
911 /* pull over the cross partition variables */
913 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer;
915 ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
916 if (ret != xpSuccess) {
917 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
918 "which sent interrupt, reason=%d\n", nasid, ret);
920 XPC_DEACTIVATE_PARTITION(part, ret);
924 part->activate_IRQ_rcvd++;
926 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
927 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
928 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
930 if (xpc_partition_disengaged(part) &&
931 part->act_state == XPC_P_INACTIVE) {
933 xpc_update_partition_info_sn2(part, remote_rp_version,
934 &remote_rp_stamp, remote_rp_pa,
935 remote_vars_pa, remote_vars);
937 if (xpc_partition_deactivation_requested_sn2(partid)) {
939 * Other side is waiting on us to deactivate even though
945 xpc_activate_partition(part);
949 DBUG_ON(part->remote_rp_version == 0);
950 DBUG_ON(part_sn2->remote_vars_version == 0);
952 if (remote_rp_stamp != part->remote_rp_stamp) {
954 /* the other side rebooted */
956 DBUG_ON(xpc_partition_engaged_sn2(partid));
957 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
959 xpc_update_partition_info_sn2(part, remote_rp_version,
960 &remote_rp_stamp, remote_rp_pa,
961 remote_vars_pa, remote_vars);
965 if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
966 /* still waiting on other side to disengage from us */
971 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
972 else if (xpc_partition_deactivation_requested_sn2(partid))
973 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
977 * Loop through the activation AMO variables and process any bits
978 * which are set. Each bit indicates a nasid sending a partition
979 * activation or deactivation request.
981 * Return #of IRQs detected.
984 xpc_identify_activate_IRQ_sender_sn2(void)
988 u64 nasid; /* remote nasid */
989 int n_IRQs_detected = 0;
992 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
994 /* scan through act AMO variable looking for non-zero entries */
995 for (word = 0; word < xp_nasid_mask_words; word++) {
1000 nasid_mask = xpc_IPI_receive_sn2(&act_amos[word]);
1001 if (nasid_mask == 0) {
1002 /* no IRQs from nasids in this variable */
1006 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
1010 * If this nasid has been added to the machine since
1011 * our partition was reset, this will retain the
1012 * remote nasid in our reserved pages machine mask.
1013 * This is used in the event of module reload.
1015 xpc_mach_nasids[word] |= nasid_mask;
1017 /* locate the nasid(s) which sent interrupts */
1019 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
1020 if (nasid_mask & (1UL << bit)) {
1022 nasid = XPC_NASID_FROM_W_B(word, bit);
1023 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
1025 xpc_identify_activate_IRQ_req_sn2(nasid);
1029 return n_IRQs_detected;
1033 xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
1035 int n_IRQs_detected;
1037 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1038 if (n_IRQs_detected < n_IRQs_expected) {
1039 /* retry once to help avoid missing AMO */
1040 (void)xpc_identify_activate_IRQ_sender_sn2();
1045 * Setup the infrastructure necessary to support XPartition Communication
1046 * between the specified remote partition and the local one.
1048 static enum xp_retval
1049 xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1051 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1052 enum xp_retval retval;
1056 struct xpc_channel *ch;
1057 struct timer_list *timer;
1058 short partid = XPC_PARTID(part);
1061 * Allocate all of the channel structures as a contiguous chunk of
1064 DBUG_ON(part->channels != NULL);
1065 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
1067 if (part->channels == NULL) {
1068 dev_err(xpc_chan, "can't get memory for channels\n");
1072 /* allocate all the required GET/PUT values */
1074 part_sn2->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
1078 if (part_sn2->local_GPs == NULL) {
1079 dev_err(xpc_chan, "can't get memory for local get/put "
1081 retval = xpNoMemory;
1085 part_sn2->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
1089 if (part_sn2->remote_GPs == NULL) {
1090 dev_err(xpc_chan, "can't get memory for remote get/put "
1092 retval = xpNoMemory;
1096 part_sn2->remote_GPs_pa = 0;
1098 /* allocate all the required open and close args */
1100 part->local_openclose_args =
1101 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
1102 &part->local_openclose_args_base);
1103 if (part->local_openclose_args == NULL) {
1104 dev_err(xpc_chan, "can't get memory for local connect args\n");
1105 retval = xpNoMemory;
1109 part->remote_openclose_args =
1110 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
1111 &part->remote_openclose_args_base);
1112 if (part->remote_openclose_args == NULL) {
1113 dev_err(xpc_chan, "can't get memory for remote connect args\n");
1114 retval = xpNoMemory;
1118 part_sn2->remote_openclose_args_pa = 0;
1120 part_sn2->local_IPI_amo_va = xpc_IPI_init_sn2(partid);
1121 part->local_IPI_amo = 0;
1122 spin_lock_init(&part->IPI_lock);
1124 part_sn2->remote_IPI_nasid = 0;
1125 part_sn2->remote_IPI_phys_cpuid = 0;
1126 part_sn2->remote_IPI_amo_va = NULL;
1128 atomic_set(&part->channel_mgr_requests, 1);
1129 init_waitqueue_head(&part->channel_mgr_wq);
1131 sprintf(part_sn2->IPI_owner, "xpc%02d", partid);
1132 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1133 IRQF_SHARED, part_sn2->IPI_owner,
1134 (void *)(u64)partid);
1136 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1137 "errno=%d\n", -ret);
1138 retval = xpLackOfResources;
1142 /* Setup a timer to check for dropped IPIs */
1143 timer = &part_sn2->dropped_notify_IRQ_timer;
1146 (void (*)(unsigned long))xpc_dropped_notify_IRQ_check_sn2;
1147 timer->data = (unsigned long)part;
1148 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL;
1151 part->nchannels = XPC_MAX_NCHANNELS;
1153 atomic_set(&part->nchannels_active, 0);
1154 atomic_set(&part->nchannels_engaged, 0);
1156 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1157 ch = &part->channels[ch_number];
1159 ch->partid = partid;
1160 ch->number = ch_number;
1161 ch->flags = XPC_C_DISCONNECTED;
1163 ch->sn.sn2.local_GP = &part_sn2->local_GPs[ch_number];
1164 ch->local_openclose_args =
1165 &part->local_openclose_args[ch_number];
1167 atomic_set(&ch->kthreads_assigned, 0);
1168 atomic_set(&ch->kthreads_idle, 0);
1169 atomic_set(&ch->kthreads_active, 0);
1171 atomic_set(&ch->references, 0);
1172 atomic_set(&ch->n_to_notify, 0);
1174 spin_lock_init(&ch->lock);
1175 mutex_init(&ch->sn.sn2.msg_to_pull_mutex);
1176 init_completion(&ch->wdisconnect_wait);
1178 atomic_set(&ch->n_on_msg_allocate_wq, 0);
1179 init_waitqueue_head(&ch->msg_allocate_wq);
1180 init_waitqueue_head(&ch->idle_wq);
1184 * With the setting of the partition setup_state to XPC_P_SETUP, we're
1185 * declaring that this partition is ready to go.
1187 part->setup_state = XPC_P_SETUP;
1190 * Setup the per partition specific variables required by the
1191 * remote partition to establish channel connections with us.
1193 * The setting of the magic # indicates that these per partition
1194 * specific variables are ready to be used.
1196 xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs);
1197 xpc_vars_part[partid].openclose_args_pa =
1198 __pa(part->local_openclose_args);
1199 xpc_vars_part[partid].IPI_amo_pa = __pa(part_sn2->local_IPI_amo_va);
1200 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1201 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
1202 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
1203 xpc_vars_part[partid].nchannels = part->nchannels;
1204 xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
1208 /* setup of infrastructure failed */
1210 kfree(part->remote_openclose_args_base);
1211 part->remote_openclose_args = NULL;
1213 kfree(part->local_openclose_args_base);
1214 part->local_openclose_args = NULL;
1216 kfree(part_sn2->remote_GPs_base);
1217 part_sn2->remote_GPs = NULL;
1219 kfree(part_sn2->local_GPs_base);
1220 part_sn2->local_GPs = NULL;
1222 kfree(part->channels);
1223 part->channels = NULL;
1228 * Teardown the infrastructure necessary to support XPartition Communication
1229 * between the specified remote partition and the local one.
1232 xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1234 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1235 short partid = XPC_PARTID(part);
1238 * We start off by making this partition inaccessible to local
1239 * processes by marking it as no longer setup. Then we make it
1240 * inaccessible to remote processes by clearing the XPC per partition
1241 * specific variable's magic # (which indicates that these variables
1242 * are no longer valid) and by ignoring all XPC notify IPIs sent to
1246 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
1247 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1248 DBUG_ON(part->setup_state != XPC_P_SETUP);
1249 part->setup_state = XPC_P_WTEARDOWN;
1251 xpc_vars_part[partid].magic = 0;
1253 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1256 * Before proceeding with the teardown we have to wait until all
1257 * existing references cease.
1259 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1261 /* now we can begin tearing down the infrastructure */
1263 part->setup_state = XPC_P_TORNDOWN;
1265 /* in case we've still got outstanding timers registered... */
1266 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
1268 kfree(part->remote_openclose_args_base);
1269 part->remote_openclose_args = NULL;
1270 kfree(part->local_openclose_args_base);
1271 part->local_openclose_args = NULL;
1272 kfree(part_sn2->remote_GPs_base);
1273 part_sn2->remote_GPs = NULL;
1274 kfree(part_sn2->local_GPs_base);
1275 part_sn2->local_GPs = NULL;
1276 kfree(part->channels);
1277 part->channels = NULL;
1278 part_sn2->local_IPI_amo_va = NULL;
1282 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1283 * (or multiple cachelines) from a remote partition.
1285 * src must be a cacheline aligned physical address on the remote partition.
1286 * dst must be a cacheline aligned virtual address on this partition.
1287 * cnt must be cacheline sized
1289 /* >>> Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1290 static enum xp_retval
1291 xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
1292 const void *src, size_t cnt)
1296 DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src));
1297 DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst));
1298 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
1300 if (part->act_state == XPC_P_DEACTIVATING)
1301 return part->reason;
1303 ret = xp_remote_memcpy(dst, src, cnt);
1304 if (ret != xpSuccess) {
1305 dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
1306 " ret=%d\n", XPC_PARTID(part), ret);
1312 * Pull the remote per partition specific variables from the specified
1315 static enum xp_retval
1316 xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1318 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1319 u8 buffer[L1_CACHE_BYTES * 2];
1320 struct xpc_vars_part_sn2 *pulled_entry_cacheline =
1321 (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
1322 struct xpc_vars_part_sn2 *pulled_entry;
1323 u64 remote_entry_cacheline_pa, remote_entry_pa;
1324 short partid = XPC_PARTID(part);
1327 /* pull the cacheline that contains the variables we're interested in */
1329 DBUG_ON(part_sn2->remote_vars_part_pa !=
1330 L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
1331 DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
1333 remote_entry_pa = part_sn2->remote_vars_part_pa +
1334 sn_partition_id * sizeof(struct xpc_vars_part_sn2);
1336 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
1338 pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
1339 + (remote_entry_pa &
1340 (L1_CACHE_BYTES - 1)));
1342 ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
1343 (void *)remote_entry_cacheline_pa,
1345 if (ret != xpSuccess) {
1346 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
1347 "partition %d, ret=%d\n", partid, ret);
1351 /* see if they've been set up yet */
1353 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
1354 pulled_entry->magic != XPC_VP_MAGIC2) {
1356 if (pulled_entry->magic != 0) {
1357 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
1358 "partition %d has bad magic value (=0x%lx)\n",
1359 partid, sn_partition_id, pulled_entry->magic);
1363 /* they've not been initialized yet */
1367 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
1369 /* validate the variables */
1371 if (pulled_entry->GPs_pa == 0 ||
1372 pulled_entry->openclose_args_pa == 0 ||
1373 pulled_entry->IPI_amo_pa == 0) {
1375 dev_err(xpc_chan, "partition %d's XPC vars_part for "
1376 "partition %d are not valid\n", partid,
1378 return xpInvalidAddress;
1381 /* the variables we imported look to be valid */
1383 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1384 part_sn2->remote_openclose_args_pa =
1385 pulled_entry->openclose_args_pa;
1386 part_sn2->remote_IPI_amo_va =
1387 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
1388 part_sn2->remote_IPI_nasid = pulled_entry->IPI_nasid;
1389 part_sn2->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
1391 if (part->nchannels > pulled_entry->nchannels)
1392 part->nchannels = pulled_entry->nchannels;
1394 /* let the other side know that we've pulled their variables */
1396 xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
1399 if (pulled_entry->magic == XPC_VP_MAGIC1)
1406 * Establish first contact with the remote partititon. This involves pulling
1407 * the XPC per partition variables from the remote partition and waiting for
1408 * the remote partition to pull ours.
1410 static enum xp_retval
1411 xpc_make_first_contact_sn2(struct xpc_partition *part)
1413 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1417 * Register the remote partition's AMOs with SAL so it can handle
1418 * and cleanup errors within that address range should the remote
1419 * partition go down. We don't unregister this range because it is
1420 * difficult to tell when outstanding writes to the remote partition
1421 * are finished and thus when it is safe to unregister. This should
1422 * not result in wasted space in the SAL xp_addr_region table because
1423 * we should get the same page for remote_amos_page_pa after module
1424 * reloads and system reboots.
1426 if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
1427 PAGE_SIZE, 1) < 0) {
1428 dev_warn(xpc_part, "xpc_activating(%d) failed to register "
1429 "xp_addr region\n", XPC_PARTID(part));
1431 ret = xpPhysAddrRegFailed;
1432 XPC_DEACTIVATE_PARTITION(part, ret);
1437 * Send activate IRQ to get other side to activate if they've not
1438 * already begun to do so.
1440 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
1441 cnodeid_to_nasid(0),
1442 part_sn2->activate_IRQ_nasid,
1443 part_sn2->activate_IRQ_phys_cpuid);
1445 while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
1446 if (ret != xpRetry) {
1447 XPC_DEACTIVATE_PARTITION(part, ret);
1451 dev_dbg(xpc_part, "waiting to make first contact with "
1452 "partition %d\n", XPC_PARTID(part));
1454 /* wait a 1/4 of a second or so */
1455 (void)msleep_interruptible(250);
1457 if (part->act_state == XPC_P_DEACTIVATING)
1458 return part->reason;
1465 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
1468 xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1470 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1471 unsigned long irq_flags;
1476 * See if there are any IPI flags to be handled.
1479 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1480 IPI_amo = part->local_IPI_amo;
1482 part->local_IPI_amo = 0;
1484 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1486 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
1487 ret = xpc_pull_remote_cachelines_sn2(part, part->
1488 remote_openclose_args,
1490 remote_openclose_args_pa,
1491 XPC_OPENCLOSE_ARGS_SIZE);
1492 if (ret != xpSuccess) {
1493 XPC_DEACTIVATE_PARTITION(part, ret);
1495 dev_dbg(xpc_chan, "failed to pull openclose args from "
1496 "partition %d, ret=%d\n", XPC_PARTID(part),
1499 /* don't bother processing IPIs anymore */
1504 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
1505 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1506 (void *)part_sn2->remote_GPs_pa,
1508 if (ret != xpSuccess) {
1509 XPC_DEACTIVATE_PARTITION(part, ret);
1511 dev_dbg(xpc_chan, "failed to pull GPs from partition "
1512 "%d, ret=%d\n", XPC_PARTID(part), ret);
1514 /* don't bother processing IPIs anymore */
1523 * Notify those who wanted to be notified upon delivery of their message.
1526 xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1528 struct xpc_notify *notify;
1530 s64 get = ch->sn.sn2.w_remote_GP.get - 1;
1532 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1534 notify = &ch->notify_queue[get % ch->local_nentries];
1537 * See if the notify entry indicates it was associated with
1538 * a message who's sender wants to be notified. It is possible
1539 * that it is, but someone else is doing or has done the
1542 notify_type = notify->type;
1543 if (notify_type == 0 ||
1544 cmpxchg(¬ify->type, notify_type, 0) != notify_type) {
1548 DBUG_ON(notify_type != XPC_N_CALL);
1550 atomic_dec(&ch->n_to_notify);
1552 if (notify->func != NULL) {
1553 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
1554 "msg_number=%ld, partid=%d, channel=%d\n",
1555 (void *)notify, get, ch->partid, ch->number);
1557 notify->func(reason, ch->partid, ch->number,
1560 dev_dbg(xpc_chan, "notify->func() returned, "
1561 "notify=0x%p, msg_number=%ld, partid=%d, "
1562 "channel=%d\n", (void *)notify, get,
1563 ch->partid, ch->number);
1569 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
1571 xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
1575 * Clear some of the msg flags in the local message queue.
1578 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1580 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1581 struct xpc_msg *msg;
1584 get = ch_sn2->w_remote_GP.get;
1586 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1587 (get % ch->local_nentries) *
1590 } while (++get < ch_sn2->remote_GP.get);
1594 * Clear some of the msg flags in the remote message queue.
1597 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1599 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1600 struct xpc_msg *msg;
1603 put = ch_sn2->w_remote_GP.put;
1605 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1606 (put % ch->remote_nentries) *
1609 } while (++put < ch_sn2->remote_GP.put);
1613 xpc_process_msg_IPI_sn2(struct xpc_partition *part, int ch_number)
1615 struct xpc_channel *ch = &part->channels[ch_number];
1616 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1619 ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
1621 /* See what, if anything, has changed for each connected channel */
1623 xpc_msgqueue_ref(ch);
1625 if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
1626 ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
1627 /* nothing changed since GPs were last pulled */
1628 xpc_msgqueue_deref(ch);
1632 if (!(ch->flags & XPC_C_CONNECTED)) {
1633 xpc_msgqueue_deref(ch);
1638 * First check to see if messages recently sent by us have been
1639 * received by the other side. (The remote GET value will have
1640 * changed since we last looked at it.)
1643 if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
1646 * We need to notify any senders that want to be notified
1647 * that their sent messages have been received by their
1648 * intended recipients. We need to do this before updating
1649 * w_remote_GP.get so that we don't allocate the same message
1650 * queue entries prematurely (see xpc_allocate_msg()).
1652 if (atomic_read(&ch->n_to_notify) > 0) {
1654 * Notify senders that messages sent have been
1655 * received and delivered by the other side.
1657 xpc_notify_senders_sn2(ch, xpMsgDelivered,
1658 ch_sn2->remote_GP.get);
1662 * Clear msg->flags in previously sent messages, so that
1663 * they're ready for xpc_allocate_msg().
1665 xpc_clear_local_msgqueue_flags_sn2(ch);
1667 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1669 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1670 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1674 * If anyone was waiting for message queue entries to become
1675 * available, wake them up.
1677 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1678 wake_up(&ch->msg_allocate_wq);
1682 * Now check for newly sent messages by the other side. (The remote
1683 * PUT value will have changed since we last looked at it.)
1686 if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
1688 * Clear msg->flags in previously received messages, so that
1689 * they're ready for xpc_get_deliverable_msg().
1691 xpc_clear_remote_msgqueue_flags_sn2(ch);
1693 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1695 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1696 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1699 nmsgs_sent = ch_sn2->w_remote_GP.put - ch_sn2->w_local_GP.get;
1700 if (nmsgs_sent > 0) {
1701 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1702 "delivered=%d, partid=%d, channel=%d\n",
1703 nmsgs_sent, ch->partid, ch->number);
1705 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1706 xpc_activate_kthreads(ch, nmsgs_sent);
1710 xpc_msgqueue_deref(ch);
1713 static struct xpc_msg *
1714 xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1716 struct xpc_partition *part = &xpc_partitions[ch->partid];
1717 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1718 struct xpc_msg *remote_msg, *msg;
1719 u32 msg_index, nmsgs;
1723 if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
1724 /* we were interrupted by a signal */
1728 while (get >= ch_sn2->next_msg_to_pull) {
1730 /* pull as many messages as are ready and able to be pulled */
1732 msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
1734 DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
1735 nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
1736 if (msg_index + nmsgs > ch->remote_nentries) {
1737 /* ignore the ones that wrap the msg queue for now */
1738 nmsgs = ch->remote_nentries - msg_index;
1741 msg_offset = msg_index * ch->msg_size;
1742 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
1743 remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa +
1746 ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg,
1747 nmsgs * ch->msg_size);
1748 if (ret != xpSuccess) {
1750 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1751 " msg %ld from partition %d, channel=%d, "
1752 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1753 ch->partid, ch->number, ret);
1755 XPC_DEACTIVATE_PARTITION(part, ret);
1757 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1761 ch_sn2->next_msg_to_pull += nmsgs;
1764 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1766 /* return the message we were looking for */
1767 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
1768 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
1774 xpc_n_of_deliverable_msgs_sn2(struct xpc_channel *ch)
1776 return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
1780 * Get a message to be delivered.
1782 static struct xpc_msg *
1783 xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1785 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1786 struct xpc_msg *msg = NULL;
1790 if (ch->flags & XPC_C_DISCONNECTING)
1793 get = ch_sn2->w_local_GP.get;
1794 rmb(); /* guarantee that .get loads before .put */
1795 if (get == ch_sn2->w_remote_GP.put)
1798 /* There are messages waiting to be pulled and delivered.
1799 * We need to try to secure one for ourselves. We'll do this
1800 * by trying to increment w_local_GP.get and hope that no one
1801 * else beats us to it. If they do, we'll we'll simply have
1802 * to try again for the next one.
1805 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
1806 /* we got the entry referenced by get */
1808 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
1809 "partid=%d, channel=%d\n", get + 1,
1810 ch->partid, ch->number);
1812 /* pull the message from the remote partition */
1814 msg = xpc_pull_remote_msg_sn2(ch, get);
1816 DBUG_ON(msg != NULL && msg->number != get);
1817 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
1818 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
1829 * Now we actually send the messages that are ready to be sent by advancing
1830 * the local message queue's Put value and then send an IPI to the recipient
1834 xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1836 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1837 struct xpc_msg *msg;
1838 s64 put = initial_put + 1;
1844 if (put == ch_sn2->w_local_GP.put)
1847 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1848 (put % ch->local_nentries) *
1851 if (!(msg->flags & XPC_M_READY))
1857 if (put == initial_put) {
1858 /* nothing's changed */
1862 if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
1864 /* someone else beat us to it */
1865 DBUG_ON(ch_sn2->local_GP->put < initial_put);
1869 /* we just set the new value of local_GP->put */
1871 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1872 "channel=%d\n", put, ch->partid, ch->number);
1877 * We need to ensure that the message referenced by
1878 * local_GP->put is not XPC_M_READY or that local_GP->put
1879 * equals w_local_GP.put, so we'll go have a look.
1885 xpc_send_channel_msgrequest_sn2(ch);
1889 * Allocate an entry for a message from the message queue associated with the
1890 * specified channel.
1892 static enum xp_retval
1893 xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1894 struct xpc_msg **address_of_msg)
1896 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1897 struct xpc_msg *msg;
1902 * Get the next available message entry from the local message queue.
1903 * If none are available, we'll make sure that we grab the latest
1910 put = ch_sn2->w_local_GP.put;
1911 rmb(); /* guarantee that .put loads before .get */
1912 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
1914 /* There are available message entries. We need to try
1915 * to secure one for ourselves. We'll do this by trying
1916 * to increment w_local_GP.put as long as someone else
1917 * doesn't beat us to it. If they do, we'll have to
1920 if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
1922 /* we got the entry referenced by put */
1925 continue; /* try again */
1929 * There aren't any available msg entries at this time.
1931 * In waiting for a message entry to become available,
1932 * we set a timeout in case the other side is not
1933 * sending completion IPIs. This lets us fake an IPI
1934 * that will cause the IPI handler to fetch the latest
1935 * GP values as if an IPI was sent by the other side.
1937 if (ret == xpTimeout)
1938 xpc_send_channel_local_msgrequest_sn2(ch);
1940 if (flags & XPC_NOWAIT)
1943 ret = xpc_allocate_msg_wait(ch);
1944 if (ret != xpInterrupted && ret != xpTimeout)
1948 /* get the message's address and initialize it */
1949 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1950 (put % ch->local_nentries) * ch->msg_size);
1952 DBUG_ON(msg->flags != 0);
1955 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1956 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1957 (void *)msg, msg->number, ch->partid, ch->number);
1959 *address_of_msg = msg;
1964 * Common code that does the actual sending of the message by advancing the
1965 * local message queue's Put value and sends an IPI to the partition the
1966 * message is being sent to.
1968 static enum xp_retval
1969 xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
1970 u16 payload_size, u8 notify_type, xpc_notify_func func,
1973 enum xp_retval ret = xpSuccess;
1974 struct xpc_msg *msg = msg;
1975 struct xpc_notify *notify = notify;
1979 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1981 if (XPC_MSG_SIZE(payload_size) > ch->msg_size)
1982 return xpPayloadTooBig;
1984 xpc_msgqueue_ref(ch);
1986 if (ch->flags & XPC_C_DISCONNECTING) {
1990 if (!(ch->flags & XPC_C_CONNECTED)) {
1991 ret = xpNotConnected;
1995 ret = xpc_allocate_msg_sn2(ch, flags, &msg);
1996 if (ret != xpSuccess)
1999 msg_number = msg->number;
2001 if (notify_type != 0) {
2003 * Tell the remote side to send an ACK interrupt when the
2004 * message has been delivered.
2006 msg->flags |= XPC_M_INTERRUPT;
2008 atomic_inc(&ch->n_to_notify);
2010 notify = &ch->notify_queue[msg_number % ch->local_nentries];
2011 notify->func = func;
2013 notify->type = notify_type;
2015 /* >>> is a mb() needed here? */
2017 if (ch->flags & XPC_C_DISCONNECTING) {
2019 * An error occurred between our last error check and
2020 * this one. We will try to clear the type field from
2021 * the notify entry. If we succeed then
2022 * xpc_disconnect_channel() didn't already process
2025 if (cmpxchg(¬ify->type, notify_type, 0) ==
2027 atomic_dec(&ch->n_to_notify);
2034 memcpy(&msg->payload, payload, payload_size);
2036 msg->flags |= XPC_M_READY;
2039 * The preceding store of msg->flags must occur before the following
2040 * load of local_GP->put.
2044 /* see if the message is next in line to be sent, if so send it */
2046 put = ch->sn.sn2.local_GP->put;
2047 if (put == msg_number)
2048 xpc_send_msgs_sn2(ch, put);
2051 xpc_msgqueue_deref(ch);
2056 * Now we actually acknowledge the messages that have been delivered and ack'd
2057 * by advancing the cached remote message queue's Get value and if requested
2058 * send an IPI to the message sender's partition.
2061 xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2063 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2064 struct xpc_msg *msg;
2065 s64 get = initial_get + 1;
2071 if (get == ch_sn2->w_local_GP.get)
2074 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2075 (get % ch->remote_nentries) *
2078 if (!(msg->flags & XPC_M_DONE))
2081 msg_flags |= msg->flags;
2085 if (get == initial_get) {
2086 /* nothing's changed */
2090 if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
2092 /* someone else beat us to it */
2093 DBUG_ON(ch_sn2->local_GP->get <= initial_get);
2097 /* we just set the new value of local_GP->get */
2099 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2100 "channel=%d\n", get, ch->partid, ch->number);
2102 send_IPI = (msg_flags & XPC_M_INTERRUPT);
2105 * We need to ensure that the message referenced by
2106 * local_GP->get is not XPC_M_DONE or that local_GP->get
2107 * equals w_local_GP.get, so we'll go have a look.
2113 xpc_send_channel_msgrequest_sn2(ch);
2117 xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
2120 s64 msg_number = msg->number;
2122 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2123 (void *)msg, msg_number, ch->partid, ch->number);
2125 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2126 msg_number % ch->remote_nentries);
2127 DBUG_ON(msg->flags & XPC_M_DONE);
2129 msg->flags |= XPC_M_DONE;
2132 * The preceding store of msg->flags must occur before the following
2133 * load of local_GP->get.
2138 * See if this message is next in line to be acknowledged as having
2141 get = ch->sn.sn2.local_GP->get;
2142 if (get == msg_number)
2143 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
2151 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2;
2152 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
2153 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
2154 xpc_online_heartbeat = xpc_online_heartbeat_sn2;
2155 xpc_heartbeat_init = xpc_heartbeat_init_sn2;
2156 xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
2157 xpc_check_remote_hb = xpc_check_remote_hb_sn2;
2159 xpc_request_partition_activation = xpc_request_partition_activation_sn2;
2160 xpc_request_partition_reactivation =
2161 xpc_request_partition_reactivation_sn2;
2162 xpc_request_partition_deactivation =
2163 xpc_request_partition_deactivation_sn2;
2164 xpc_cancel_partition_deactivation_request =
2165 xpc_cancel_partition_deactivation_request_sn2;
2167 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
2168 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
2169 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
2170 xpc_make_first_contact = xpc_make_first_contact_sn2;
2171 xpc_get_IPI_flags = xpc_get_IPI_flags_sn2;
2172 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2173 xpc_process_msg_IPI = xpc_process_msg_IPI_sn2;
2174 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2;
2175 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2;
2177 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
2178 xpc_partition_engaged = xpc_partition_engaged_sn2;
2179 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
2180 xpc_indicate_partition_disengaged =
2181 xpc_indicate_partition_disengaged_sn2;
2182 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
2184 xpc_send_channel_closerequest = xpc_send_channel_closerequest_sn2;
2185 xpc_send_channel_closereply = xpc_send_channel_closereply_sn2;
2186 xpc_send_channel_openrequest = xpc_send_channel_openrequest_sn2;
2187 xpc_send_channel_openreply = xpc_send_channel_openreply_sn2;
2189 xpc_send_msg = xpc_send_msg_sn2;
2190 xpc_received_msg = xpc_received_msg_sn2;
2192 /* open up protections for IPI and [potentially] AMO operations */
2193 xpc_allow_IPI_ops_sn2();
2194 xpc_allow_AMO_ops_shub_wars_1_1_sn2();
2197 * This is safe to do before the xpc_hb_checker thread has started
2198 * because the handler releases a wait queue. If an interrupt is
2199 * received before the thread is waiting, it will not go to sleep,
2200 * but rather immediately process the interrupt.
2202 ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
2205 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
2206 "errno=%d\n", -ret);
2207 xpc_disallow_IPI_ops_sn2();
2215 free_irq(SGI_XPC_ACTIVATE, NULL);
2216 xpc_disallow_IPI_ops_sn2();