2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
11 * Cross Partition Communication (XPC) channel support.
13 * This is the part of XPC that manages the channels and
14 * sends/receives messages across them to/from other partitions.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/sched.h>
22 #include <linux/cache.h>
23 #include <linux/interrupt.h>
24 #include <linux/slab.h>
25 #include <asm/sn/bte.h>
26 #include <asm/sn/sn_sal.h>
31 * Set up the initial values for the XPartition Communication channels.
34 xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
37 struct xpc_channel *ch;
40 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
41 ch = &part->channels[ch_number];
44 ch->number = ch_number;
45 ch->flags = XPC_C_DISCONNECTED;
47 ch->local_GP = &part->local_GPs[ch_number];
48 ch->local_openclose_args =
49 &part->local_openclose_args[ch_number];
51 atomic_set(&ch->kthreads_assigned, 0);
52 atomic_set(&ch->kthreads_idle, 0);
53 atomic_set(&ch->kthreads_active, 0);
55 atomic_set(&ch->references, 0);
56 atomic_set(&ch->n_to_notify, 0);
58 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
61 atomic_set(&ch->n_on_msg_allocate_wq, 0);
62 init_waitqueue_head(&ch->msg_allocate_wq);
63 init_waitqueue_head(&ch->idle_wq);
69 * Setup the infrastructure necessary to support XPartition Communication
70 * between the specified remote partition and the local one.
73 xpc_setup_infrastructure(struct xpc_partition *part)
76 struct timer_list *timer;
77 partid_t partid = XPC_PARTID(part);
81 * Zero out MOST of the entry for this partition. Only the fields
82 * starting with `nchannels' will be zeroed. The preceding fields must
83 * remain `viable' across partition ups and downs, since they may be
84 * referenced during this memset() operation.
86 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
87 offsetof(struct xpc_partition, nchannels));
90 * Allocate all of the channel structures as a contiguous chunk of
93 part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
95 if (part->channels == NULL) {
96 dev_err(xpc_chan, "can't get memory for channels\n");
99 memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
101 part->nchannels = XPC_NCHANNELS;
104 /* allocate all the required GET/PUT values */
106 part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
107 GFP_KERNEL, &part->local_GPs_base);
108 if (part->local_GPs == NULL) {
109 kfree(part->channels);
110 part->channels = NULL;
111 dev_err(xpc_chan, "can't get memory for local get/put "
115 memset(part->local_GPs, 0, XPC_GP_SIZE);
117 part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
118 GFP_KERNEL, &part->remote_GPs_base);
119 if (part->remote_GPs == NULL) {
120 kfree(part->channels);
121 part->channels = NULL;
122 kfree(part->local_GPs_base);
123 part->local_GPs = NULL;
124 dev_err(xpc_chan, "can't get memory for remote get/put "
128 memset(part->remote_GPs, 0, XPC_GP_SIZE);
131 /* allocate all the required open and close args */
133 part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
134 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
135 &part->local_openclose_args_base);
136 if (part->local_openclose_args == NULL) {
137 kfree(part->channels);
138 part->channels = NULL;
139 kfree(part->local_GPs_base);
140 part->local_GPs = NULL;
141 kfree(part->remote_GPs_base);
142 part->remote_GPs = NULL;
143 dev_err(xpc_chan, "can't get memory for local connect args\n");
146 memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
148 part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
149 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
150 &part->remote_openclose_args_base);
151 if (part->remote_openclose_args == NULL) {
152 kfree(part->channels);
153 part->channels = NULL;
154 kfree(part->local_GPs_base);
155 part->local_GPs = NULL;
156 kfree(part->remote_GPs_base);
157 part->remote_GPs = NULL;
158 kfree(part->local_openclose_args_base);
159 part->local_openclose_args = NULL;
160 dev_err(xpc_chan, "can't get memory for remote connect args\n");
163 memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
166 xpc_initialize_channels(part, partid);
168 atomic_set(&part->nchannels_active, 0);
171 /* local_IPI_amo were set to 0 by an earlier memset() */
173 /* Initialize this partitions AMO_t structure */
174 part->local_IPI_amo_va = xpc_IPI_init(partid);
176 spin_lock_init(&part->IPI_lock);
178 atomic_set(&part->channel_mgr_requests, 1);
179 init_waitqueue_head(&part->channel_mgr_wq);
181 sprintf(part->IPI_owner, "xpc%02d", partid);
182 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
183 part->IPI_owner, (void *) (u64) partid);
185 kfree(part->channels);
186 part->channels = NULL;
187 kfree(part->local_GPs_base);
188 part->local_GPs = NULL;
189 kfree(part->remote_GPs_base);
190 part->remote_GPs = NULL;
191 kfree(part->local_openclose_args_base);
192 part->local_openclose_args = NULL;
193 kfree(part->remote_openclose_args_base);
194 part->remote_openclose_args = NULL;
195 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
197 return xpcLackOfResources;
200 /* Setup a timer to check for dropped IPIs */
201 timer = &part->dropped_IPI_timer;
203 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
204 timer->data = (unsigned long) part;
205 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
209 * With the setting of the partition setup_state to XPC_P_SETUP, we're
210 * declaring that this partition is ready to go.
212 (volatile u8) part->setup_state = XPC_P_SETUP;
216 * Setup the per partition specific variables required by the
217 * remote partition to establish channel connections with us.
219 * The setting of the magic # indicates that these per partition
220 * specific variables are ready to be used.
222 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
223 xpc_vars_part[partid].openclose_args_pa =
224 __pa(part->local_openclose_args);
225 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
226 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id());
227 xpc_vars_part[partid].IPI_phys_cpuid =
228 cpu_physical_id(smp_processor_id());
229 xpc_vars_part[partid].nchannels = part->nchannels;
230 (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
237 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
238 * (or multiple cachelines) from a remote partition.
240 * src must be a cacheline aligned physical address on the remote partition.
241 * dst must be a cacheline aligned virtual address on this partition.
242 * cnt must be an cacheline sized
244 static enum xpc_retval
245 xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
246 const void *src, size_t cnt)
248 bte_result_t bte_ret;
251 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
252 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
253 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
255 if (part->act_state == XPC_P_DEACTIVATING) {
259 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
260 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
261 if (bte_ret == BTE_SUCCESS) {
265 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
266 XPC_PARTID(part), bte_ret);
268 return xpc_map_bte_errors(bte_ret);
273 * Pull the remote per partititon specific variables from the specified
277 xpc_pull_remote_vars_part(struct xpc_partition *part)
279 u8 buffer[L1_CACHE_BYTES * 2];
280 struct xpc_vars_part *pulled_entry_cacheline =
281 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
282 struct xpc_vars_part *pulled_entry;
283 u64 remote_entry_cacheline_pa, remote_entry_pa;
284 partid_t partid = XPC_PARTID(part);
288 /* pull the cacheline that contains the variables we're interested in */
290 DBUG_ON(part->remote_vars_part_pa !=
291 L1_CACHE_ALIGN(part->remote_vars_part_pa));
292 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
294 remote_entry_pa = part->remote_vars_part_pa +
295 sn_partition_id * sizeof(struct xpc_vars_part);
297 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
299 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
300 (remote_entry_pa & (L1_CACHE_BYTES - 1)));
302 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
303 (void *) remote_entry_cacheline_pa,
305 if (ret != xpcSuccess) {
306 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
307 "partition %d, ret=%d\n", partid, ret);
312 /* see if they've been set up yet */
314 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
315 pulled_entry->magic != XPC_VP_MAGIC2) {
317 if (pulled_entry->magic != 0) {
318 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
319 "partition %d has bad magic value (=0x%lx)\n",
320 partid, sn_partition_id, pulled_entry->magic);
324 /* they've not been initialized yet */
328 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
330 /* validate the variables */
332 if (pulled_entry->GPs_pa == 0 ||
333 pulled_entry->openclose_args_pa == 0 ||
334 pulled_entry->IPI_amo_pa == 0) {
336 dev_err(xpc_chan, "partition %d's XPC vars_part for "
337 "partition %d are not valid\n", partid,
339 return xpcInvalidAddress;
342 /* the variables we imported look to be valid */
344 part->remote_GPs_pa = pulled_entry->GPs_pa;
345 part->remote_openclose_args_pa =
346 pulled_entry->openclose_args_pa;
347 part->remote_IPI_amo_va =
348 (AMO_t *) __va(pulled_entry->IPI_amo_pa);
349 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
350 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
352 if (part->nchannels > pulled_entry->nchannels) {
353 part->nchannels = pulled_entry->nchannels;
356 /* let the other side know that we've pulled their variables */
358 (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
361 if (pulled_entry->magic == XPC_VP_MAGIC1) {
370 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
373 xpc_get_IPI_flags(struct xpc_partition *part)
375 unsigned long irq_flags;
381 * See if there are any IPI flags to be handled.
384 spin_lock_irqsave(&part->IPI_lock, irq_flags);
385 if ((IPI_amo = part->local_IPI_amo) != 0) {
386 part->local_IPI_amo = 0;
388 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
391 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
392 ret = xpc_pull_remote_cachelines(part,
393 part->remote_openclose_args,
394 (void *) part->remote_openclose_args_pa,
395 XPC_OPENCLOSE_ARGS_SIZE);
396 if (ret != xpcSuccess) {
397 XPC_DEACTIVATE_PARTITION(part, ret);
399 dev_dbg(xpc_chan, "failed to pull openclose args from "
400 "partition %d, ret=%d\n", XPC_PARTID(part),
403 /* don't bother processing IPIs anymore */
408 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
409 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
410 (void *) part->remote_GPs_pa,
412 if (ret != xpcSuccess) {
413 XPC_DEACTIVATE_PARTITION(part, ret);
415 dev_dbg(xpc_chan, "failed to pull GPs from partition "
416 "%d, ret=%d\n", XPC_PARTID(part), ret);
418 /* don't bother processing IPIs anymore */
428 * Allocate the local message queue and the notify queue.
430 static enum xpc_retval
431 xpc_allocate_local_msgqueue(struct xpc_channel *ch)
433 unsigned long irq_flags;
438 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
439 // >>> iterations of the for-loop, bail if set?
441 // >>> should we impose a minumum #of entries? like 4 or 8?
442 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
444 nbytes = nentries * ch->msg_size;
445 ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
446 (GFP_KERNEL | GFP_DMA),
447 &ch->local_msgqueue_base);
448 if (ch->local_msgqueue == NULL) {
451 memset(ch->local_msgqueue, 0, nbytes);
453 nbytes = nentries * sizeof(struct xpc_notify);
454 ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
455 if (ch->notify_queue == NULL) {
456 kfree(ch->local_msgqueue_base);
457 ch->local_msgqueue = NULL;
460 memset(ch->notify_queue, 0, nbytes);
462 spin_lock_irqsave(&ch->lock, irq_flags);
463 if (nentries < ch->local_nentries) {
464 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
465 "partid=%d, channel=%d\n", nentries,
466 ch->local_nentries, ch->partid, ch->number);
468 ch->local_nentries = nentries;
470 spin_unlock_irqrestore(&ch->lock, irq_flags);
474 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
475 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
481 * Allocate the cached remote message queue.
483 static enum xpc_retval
484 xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
486 unsigned long irq_flags;
491 DBUG_ON(ch->remote_nentries <= 0);
493 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
494 // >>> iterations of the for-loop, bail if set?
496 // >>> should we impose a minumum #of entries? like 4 or 8?
497 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
499 nbytes = nentries * ch->msg_size;
500 ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
501 (GFP_KERNEL | GFP_DMA),
502 &ch->remote_msgqueue_base);
503 if (ch->remote_msgqueue == NULL) {
506 memset(ch->remote_msgqueue, 0, nbytes);
508 spin_lock_irqsave(&ch->lock, irq_flags);
509 if (nentries < ch->remote_nentries) {
510 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
511 "partid=%d, channel=%d\n", nentries,
512 ch->remote_nentries, ch->partid, ch->number);
514 ch->remote_nentries = nentries;
516 spin_unlock_irqrestore(&ch->lock, irq_flags);
520 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
521 "partid=%d, channel=%d\n", ch->partid, ch->number);
527 * Allocate message queues and other stuff associated with a channel.
529 * Note: Assumes all of the channel sizes are filled in.
531 static enum xpc_retval
532 xpc_allocate_msgqueues(struct xpc_channel *ch)
534 unsigned long irq_flags;
539 DBUG_ON(ch->flags & XPC_C_SETUP);
541 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
545 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
546 kfree(ch->local_msgqueue_base);
547 ch->local_msgqueue = NULL;
548 kfree(ch->notify_queue);
549 ch->notify_queue = NULL;
553 for (i = 0; i < ch->local_nentries; i++) {
554 /* use a semaphore as an event wait queue */
555 sema_init(&ch->notify_queue[i].sema, 0);
558 sema_init(&ch->teardown_sema, 0); /* event wait */
560 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags);
569 * Process a connect message from a remote partition.
571 * Note: xpc_process_connect() is expecting to be called with the
572 * spin_lock_irqsave held and will leave it locked upon return.
575 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
580 DBUG_ON(!spin_is_locked(&ch->lock));
582 if (!(ch->flags & XPC_C_OPENREQUEST) ||
583 !(ch->flags & XPC_C_ROPENREQUEST)) {
584 /* nothing more to do for now */
587 DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
589 if (!(ch->flags & XPC_C_SETUP)) {
590 spin_unlock_irqrestore(&ch->lock, *irq_flags);
591 ret = xpc_allocate_msgqueues(ch);
592 spin_lock_irqsave(&ch->lock, *irq_flags);
594 if (ret != xpcSuccess) {
595 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
597 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
601 DBUG_ON(!(ch->flags & XPC_C_SETUP));
602 DBUG_ON(ch->local_msgqueue == NULL);
603 DBUG_ON(ch->remote_msgqueue == NULL);
606 if (!(ch->flags & XPC_C_OPENREPLY)) {
607 ch->flags |= XPC_C_OPENREPLY;
608 xpc_IPI_send_openreply(ch, irq_flags);
611 if (!(ch->flags & XPC_C_ROPENREPLY)) {
615 DBUG_ON(ch->remote_msgqueue_pa == 0);
617 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
619 dev_info(xpc_chan, "channel %d to partition %d connected\n",
620 ch->number, ch->partid);
622 spin_unlock_irqrestore(&ch->lock, *irq_flags);
623 xpc_create_kthreads(ch, 1);
624 spin_lock_irqsave(&ch->lock, *irq_flags);
629 * Free up message queues and other stuff that were allocated for the specified
632 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
633 * they're cleared when XPC_C_DISCONNECTED is cleared.
636 xpc_free_msgqueues(struct xpc_channel *ch)
638 DBUG_ON(!spin_is_locked(&ch->lock));
639 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
641 ch->remote_msgqueue_pa = 0;
645 ch->local_nentries = 0;
646 ch->remote_nentries = 0;
647 ch->kthreads_assigned_limit = 0;
648 ch->kthreads_idle_limit = 0;
650 ch->local_GP->get = 0;
651 ch->local_GP->put = 0;
652 ch->remote_GP.get = 0;
653 ch->remote_GP.put = 0;
654 ch->w_local_GP.get = 0;
655 ch->w_local_GP.put = 0;
656 ch->w_remote_GP.get = 0;
657 ch->w_remote_GP.put = 0;
658 ch->next_msg_to_pull = 0;
660 if (ch->flags & XPC_C_SETUP) {
661 ch->flags &= ~XPC_C_SETUP;
663 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
664 ch->flags, ch->partid, ch->number);
666 kfree(ch->local_msgqueue_base);
667 ch->local_msgqueue = NULL;
668 kfree(ch->remote_msgqueue_base);
669 ch->remote_msgqueue = NULL;
670 kfree(ch->notify_queue);
671 ch->notify_queue = NULL;
673 /* in case someone is waiting for the teardown to complete */
674 up(&ch->teardown_sema);
680 * spin_lock_irqsave() is expected to be held on entry.
683 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
685 struct xpc_partition *part = &xpc_partitions[ch->partid];
686 u32 ch_flags = ch->flags;
689 DBUG_ON(!spin_is_locked(&ch->lock));
691 if (!(ch->flags & XPC_C_DISCONNECTING)) {
695 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
697 /* make sure all activity has settled down first */
699 if (atomic_read(&ch->references) > 0) {
702 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
704 /* it's now safe to free the channel's message queues */
706 xpc_free_msgqueues(ch);
707 DBUG_ON(ch->flags & XPC_C_SETUP);
709 if (part->act_state != XPC_P_DEACTIVATING) {
711 /* as long as the other side is up do the full protocol */
713 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
717 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
718 ch->flags |= XPC_C_CLOSEREPLY;
719 xpc_IPI_send_closereply(ch, irq_flags);
722 if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
727 /* both sides are disconnected now */
729 ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */
731 atomic_dec(&part->nchannels_active);
733 if (ch_flags & XPC_C_WASCONNECTED) {
734 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
735 "reason=%d\n", ch->number, ch->partid, ch->reason);
741 * Process a change in the channel's remote connection state.
744 xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
747 unsigned long irq_flags;
748 struct xpc_openclose_args *args =
749 &part->remote_openclose_args[ch_number];
750 struct xpc_channel *ch = &part->channels[ch_number];
751 enum xpc_retval reason;
755 spin_lock_irqsave(&ch->lock, irq_flags);
758 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
760 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
761 "from partid=%d, channel=%d\n", args->reason,
762 ch->partid, ch->number);
765 * If RCLOSEREQUEST is set, we're probably waiting for
766 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
767 * with this RCLOSEQREUQEST in the IPI_flags.
770 if (ch->flags & XPC_C_RCLOSEREQUEST) {
771 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
772 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
773 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
774 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
776 DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
777 IPI_flags &= ~XPC_IPI_CLOSEREPLY;
778 ch->flags |= XPC_C_RCLOSEREPLY;
780 /* both sides have finished disconnecting */
781 xpc_process_disconnect(ch, &irq_flags);
784 if (ch->flags & XPC_C_DISCONNECTED) {
785 // >>> explain this section
787 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
788 DBUG_ON(part->act_state !=
790 spin_unlock_irqrestore(&ch->lock, irq_flags);
794 XPC_SET_REASON(ch, 0, 0);
795 ch->flags &= ~XPC_C_DISCONNECTED;
797 atomic_inc(&part->nchannels_active);
798 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
801 IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
804 * The meaningful CLOSEREQUEST connection state fields are:
805 * reason = reason connection is to be closed
808 ch->flags |= XPC_C_RCLOSEREQUEST;
810 if (!(ch->flags & XPC_C_DISCONNECTING)) {
811 reason = args->reason;
812 if (reason <= xpcSuccess || reason > xpcUnknownReason) {
813 reason = xpcUnknownReason;
814 } else if (reason == xpcUnregistering) {
815 reason = xpcOtherUnregistering;
818 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
820 xpc_process_disconnect(ch, &irq_flags);
825 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
827 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
828 " channel=%d\n", ch->partid, ch->number);
830 if (ch->flags & XPC_C_DISCONNECTED) {
831 DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
832 spin_unlock_irqrestore(&ch->lock, irq_flags);
836 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
837 DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST));
839 ch->flags |= XPC_C_RCLOSEREPLY;
841 if (ch->flags & XPC_C_CLOSEREPLY) {
842 /* both sides have finished disconnecting */
843 xpc_process_disconnect(ch, &irq_flags);
848 if (IPI_flags & XPC_IPI_OPENREQUEST) {
850 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
851 "local_nentries=%d) received from partid=%d, "
852 "channel=%d\n", args->msg_size, args->local_nentries,
853 ch->partid, ch->number);
855 if ((ch->flags & XPC_C_DISCONNECTING) ||
856 part->act_state == XPC_P_DEACTIVATING) {
857 spin_unlock_irqrestore(&ch->lock, irq_flags);
860 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
861 XPC_C_OPENREQUEST)));
862 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
863 XPC_C_OPENREPLY | XPC_C_CONNECTED));
866 * The meaningful OPENREQUEST connection state fields are:
867 * msg_size = size of channel's messages in bytes
868 * local_nentries = remote partition's local_nentries
870 DBUG_ON(args->msg_size == 0);
871 DBUG_ON(args->local_nentries == 0);
873 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
874 ch->remote_nentries = args->local_nentries;
877 if (ch->flags & XPC_C_OPENREQUEST) {
878 if (args->msg_size != ch->msg_size) {
879 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
881 spin_unlock_irqrestore(&ch->lock, irq_flags);
885 ch->msg_size = args->msg_size;
887 XPC_SET_REASON(ch, 0, 0);
888 ch->flags &= ~XPC_C_DISCONNECTED;
890 atomic_inc(&part->nchannels_active);
893 xpc_process_connect(ch, &irq_flags);
897 if (IPI_flags & XPC_IPI_OPENREPLY) {
899 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
900 "local_nentries=%d, remote_nentries=%d) received from "
901 "partid=%d, channel=%d\n", args->local_msgqueue_pa,
902 args->local_nentries, args->remote_nentries,
903 ch->partid, ch->number);
905 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
906 spin_unlock_irqrestore(&ch->lock, irq_flags);
909 DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST));
910 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
911 DBUG_ON(ch->flags & XPC_C_CONNECTED);
914 * The meaningful OPENREPLY connection state fields are:
915 * local_msgqueue_pa = physical address of remote
916 * partition's local_msgqueue
917 * local_nentries = remote partition's local_nentries
918 * remote_nentries = remote partition's remote_nentries
920 DBUG_ON(args->local_msgqueue_pa == 0);
921 DBUG_ON(args->local_nentries == 0);
922 DBUG_ON(args->remote_nentries == 0);
924 ch->flags |= XPC_C_ROPENREPLY;
925 ch->remote_msgqueue_pa = args->local_msgqueue_pa;
927 if (args->local_nentries < ch->remote_nentries) {
928 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
929 "remote_nentries=%d, old remote_nentries=%d, "
930 "partid=%d, channel=%d\n",
931 args->local_nentries, ch->remote_nentries,
932 ch->partid, ch->number);
934 ch->remote_nentries = args->local_nentries;
936 if (args->remote_nentries < ch->local_nentries) {
937 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
938 "local_nentries=%d, old local_nentries=%d, "
939 "partid=%d, channel=%d\n",
940 args->remote_nentries, ch->local_nentries,
941 ch->partid, ch->number);
943 ch->local_nentries = args->remote_nentries;
946 xpc_process_connect(ch, &irq_flags);
949 spin_unlock_irqrestore(&ch->lock, irq_flags);
954 * Attempt to establish a channel connection to a remote partition.
956 static enum xpc_retval
957 xpc_connect_channel(struct xpc_channel *ch)
959 unsigned long irq_flags;
960 struct xpc_registration *registration = &xpc_registrations[ch->number];
963 if (down_interruptible(®istration->sema) != 0) {
964 return xpcInterrupted;
967 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
968 up(®istration->sema);
969 return xpcUnregistered;
972 spin_lock_irqsave(&ch->lock, irq_flags);
974 DBUG_ON(ch->flags & XPC_C_CONNECTED);
975 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
977 if (ch->flags & XPC_C_DISCONNECTING) {
978 spin_unlock_irqrestore(&ch->lock, irq_flags);
979 up(®istration->sema);
984 /* add info from the channel connect registration to the channel */
986 ch->kthreads_assigned_limit = registration->assigned_limit;
987 ch->kthreads_idle_limit = registration->idle_limit;
988 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
989 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
990 DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
992 ch->func = registration->func;
993 DBUG_ON(registration->func == NULL);
994 ch->key = registration->key;
996 ch->local_nentries = registration->nentries;
998 if (ch->flags & XPC_C_ROPENREQUEST) {
999 if (registration->msg_size != ch->msg_size) {
1000 /* the local and remote sides aren't the same */
1003 * Because XPC_DISCONNECT_CHANNEL() can block we're
1004 * forced to up the registration sema before we unlock
1005 * the channel lock. But that's okay here because we're
1006 * done with the part that required the registration
1007 * sema. XPC_DISCONNECT_CHANNEL() requires that the
1008 * channel lock be locked and will unlock and relock
1009 * the channel lock as needed.
1011 up(®istration->sema);
1012 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1014 spin_unlock_irqrestore(&ch->lock, irq_flags);
1015 return xpcUnequalMsgSizes;
1018 ch->msg_size = registration->msg_size;
1020 XPC_SET_REASON(ch, 0, 0);
1021 ch->flags &= ~XPC_C_DISCONNECTED;
1023 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1026 up(®istration->sema);
1029 /* initiate the connection */
1031 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
1032 xpc_IPI_send_openrequest(ch, &irq_flags);
1034 xpc_process_connect(ch, &irq_flags);
1036 spin_unlock_irqrestore(&ch->lock, irq_flags);
1043 * Notify those who wanted to be notified upon delivery of their message.
1046 xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
1048 struct xpc_notify *notify;
1050 s64 get = ch->w_remote_GP.get - 1;
1053 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1055 notify = &ch->notify_queue[get % ch->local_nentries];
1058 * See if the notify entry indicates it was associated with
1059 * a message who's sender wants to be notified. It is possible
1060 * that it is, but someone else is doing or has done the
1063 notify_type = notify->type;
1064 if (notify_type == 0 ||
1065 cmpxchg(¬ify->type, notify_type, 0) !=
1070 DBUG_ON(notify_type != XPC_N_CALL);
1072 atomic_dec(&ch->n_to_notify);
1074 if (notify->func != NULL) {
1075 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
1076 "msg_number=%ld, partid=%d, channel=%d\n",
1077 (void *) notify, get, ch->partid, ch->number);
1079 notify->func(reason, ch->partid, ch->number,
1082 dev_dbg(xpc_chan, "notify->func() returned, "
1083 "notify=0x%p, msg_number=%ld, partid=%d, "
1084 "channel=%d\n", (void *) notify, get,
1085 ch->partid, ch->number);
1092 * Clear some of the msg flags in the local message queue.
1095 xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1097 struct xpc_msg *msg;
1101 get = ch->w_remote_GP.get;
1103 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1104 (get % ch->local_nentries) * ch->msg_size);
1106 } while (++get < (volatile s64) ch->remote_GP.get);
1111 * Clear some of the msg flags in the remote message queue.
1114 xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1116 struct xpc_msg *msg;
1120 put = ch->w_remote_GP.put;
1122 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
1123 (put % ch->remote_nentries) * ch->msg_size);
1125 } while (++put < (volatile s64) ch->remote_GP.put);
1130 xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1132 struct xpc_channel *ch = &part->channels[ch_number];
1136 ch->remote_GP = part->remote_GPs[ch_number];
1139 /* See what, if anything, has changed for each connected channel */
1141 xpc_msgqueue_ref(ch);
1143 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1144 ch->w_remote_GP.put == ch->remote_GP.put) {
1145 /* nothing changed since GPs were last pulled */
1146 xpc_msgqueue_deref(ch);
1150 if (!(ch->flags & XPC_C_CONNECTED)){
1151 xpc_msgqueue_deref(ch);
1157 * First check to see if messages recently sent by us have been
1158 * received by the other side. (The remote GET value will have
1159 * changed since we last looked at it.)
1162 if (ch->w_remote_GP.get != ch->remote_GP.get) {
1165 * We need to notify any senders that want to be notified
1166 * that their sent messages have been received by their
1167 * intended recipients. We need to do this before updating
1168 * w_remote_GP.get so that we don't allocate the same message
1169 * queue entries prematurely (see xpc_allocate_msg()).
1171 if (atomic_read(&ch->n_to_notify) > 0) {
1173 * Notify senders that messages sent have been
1174 * received and delivered by the other side.
1176 xpc_notify_senders(ch, xpcMsgDelivered,
1181 * Clear msg->flags in previously sent messages, so that
1182 * they're ready for xpc_allocate_msg().
1184 xpc_clear_local_msgqueue_flags(ch);
1186 (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get;
1188 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1189 "channel=%d\n", ch->w_remote_GP.get, ch->partid,
1193 * If anyone was waiting for message queue entries to become
1194 * available, wake them up.
1196 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1197 wake_up(&ch->msg_allocate_wq);
1203 * Now check for newly sent messages by the other side. (The remote
1204 * PUT value will have changed since we last looked at it.)
1207 if (ch->w_remote_GP.put != ch->remote_GP.put) {
1209 * Clear msg->flags in previously received messages, so that
1210 * they're ready for xpc_get_deliverable_msg().
1212 xpc_clear_remote_msgqueue_flags(ch);
1214 (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put;
1216 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1217 "channel=%d\n", ch->w_remote_GP.put, ch->partid,
1220 nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
1221 if (nmsgs_sent > 0) {
1222 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1223 "delivered=%d, partid=%d, channel=%d\n",
1224 nmsgs_sent, ch->partid, ch->number);
1226 if (ch->flags & XPC_C_CONNECTCALLOUT) {
1227 xpc_activate_kthreads(ch, nmsgs_sent);
1232 xpc_msgqueue_deref(ch);
1237 xpc_process_channel_activity(struct xpc_partition *part)
1239 unsigned long irq_flags;
1240 u64 IPI_amo, IPI_flags;
1241 struct xpc_channel *ch;
1245 IPI_amo = xpc_get_IPI_flags(part);
1248 * Initiate channel connections for registered channels.
1250 * For each connected channel that has pending messages activate idle
1251 * kthreads and/or create new kthreads as needed.
1254 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1255 ch = &part->channels[ch_number];
1259 * Process any open or close related IPI flags, and then deal
1260 * with connecting or disconnecting the channel as required.
1263 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1265 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
1266 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1270 if (ch->flags & XPC_C_DISCONNECTING) {
1271 spin_lock_irqsave(&ch->lock, irq_flags);
1272 xpc_process_disconnect(ch, &irq_flags);
1273 spin_unlock_irqrestore(&ch->lock, irq_flags);
1277 if (part->act_state == XPC_P_DEACTIVATING) {
1281 if (!(ch->flags & XPC_C_CONNECTED)) {
1282 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1283 DBUG_ON(ch->flags & XPC_C_SETUP);
1284 (void) xpc_connect_channel(ch);
1286 spin_lock_irqsave(&ch->lock, irq_flags);
1287 xpc_process_connect(ch, &irq_flags);
1288 spin_unlock_irqrestore(&ch->lock, irq_flags);
1295 * Process any message related IPI flags, this may involve the
1296 * activation of kthreads to deliver any pending messages sent
1297 * from the other partition.
1300 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
1301 xpc_process_msg_IPI(part, ch_number);
1308 * XPC's heartbeat code calls this function to inform XPC that a partition has
1309 * gone down. XPC responds by tearing down the XPartition Communication
1310 * infrastructure used for the just downed partition.
1312 * XPC's heartbeat code will never call this function and xpc_partition_up()
1313 * at the same time. Nor will it ever make multiple calls to either function
1317 xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason)
1319 unsigned long irq_flags;
1321 struct xpc_channel *ch;
1324 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1325 XPC_PARTID(part), reason);
1327 if (!xpc_part_ref(part)) {
1328 /* infrastructure for this partition isn't currently set up */
1333 /* disconnect all channels associated with the downed partition */
1335 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1336 ch = &part->channels[ch_number];
1339 xpc_msgqueue_ref(ch);
1340 spin_lock_irqsave(&ch->lock, irq_flags);
1342 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
1344 spin_unlock_irqrestore(&ch->lock, irq_flags);
1345 xpc_msgqueue_deref(ch);
1348 xpc_wakeup_channel_mgr(part);
1350 xpc_part_deref(part);
1355 * Teardown the infrastructure necessary to support XPartition Communication
1356 * between the specified remote partition and the local one.
1359 xpc_teardown_infrastructure(struct xpc_partition *part)
1361 partid_t partid = XPC_PARTID(part);
1365 * We start off by making this partition inaccessible to local
1366 * processes by marking it as no longer setup. Then we make it
1367 * inaccessible to remote processes by clearing the XPC per partition
1368 * specific variable's magic # (which indicates that these variables
1369 * are no longer valid) and by ignoring all XPC notify IPIs sent to
1373 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1374 DBUG_ON(part->setup_state != XPC_P_SETUP);
1375 part->setup_state = XPC_P_WTEARDOWN;
1377 xpc_vars_part[partid].magic = 0;
1380 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1384 * Before proceding with the teardown we have to wait until all
1385 * existing references cease.
1387 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1390 /* now we can begin tearing down the infrastructure */
1392 part->setup_state = XPC_P_TORNDOWN;
1394 /* in case we've still got outstanding timers registered... */
1395 del_timer_sync(&part->dropped_IPI_timer);
1397 kfree(part->remote_openclose_args_base);
1398 part->remote_openclose_args = NULL;
1399 kfree(part->local_openclose_args_base);
1400 part->local_openclose_args = NULL;
1401 kfree(part->remote_GPs_base);
1402 part->remote_GPs = NULL;
1403 kfree(part->local_GPs_base);
1404 part->local_GPs = NULL;
1405 kfree(part->channels);
1406 part->channels = NULL;
1407 part->local_IPI_amo_va = NULL;
1412 * Called by XP at the time of channel connection registration to cause
1413 * XPC to establish connections to all currently active partitions.
1416 xpc_initiate_connect(int ch_number)
1419 struct xpc_partition *part;
1420 struct xpc_channel *ch;
1423 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1425 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1426 part = &xpc_partitions[partid];
1428 if (xpc_part_ref(part)) {
1429 ch = &part->channels[ch_number];
1431 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1432 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
1433 DBUG_ON(ch->flags & XPC_C_CONNECTED);
1434 DBUG_ON(ch->flags & XPC_C_SETUP);
1437 * Initiate the establishment of a connection
1438 * on the newly registered channel to the
1441 xpc_wakeup_channel_mgr(part);
1444 xpc_part_deref(part);
1451 xpc_connected_callout(struct xpc_channel *ch)
1453 unsigned long irq_flags;
1456 /* let the registerer know that a connection has been established */
1458 if (ch->func != NULL) {
1459 dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
1460 "partid=%d, channel=%d\n", ch->partid, ch->number);
1462 ch->func(xpcConnected, ch->partid, ch->number,
1463 (void *) (u64) ch->local_nentries, ch->key);
1465 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1466 "partid=%d, channel=%d\n", ch->partid, ch->number);
1469 spin_lock_irqsave(&ch->lock, irq_flags);
1470 ch->flags |= XPC_C_CONNECTCALLOUT;
1471 spin_unlock_irqrestore(&ch->lock, irq_flags);
1476 * Called by XP at the time of channel connection unregistration to cause
1477 * XPC to teardown all current connections for the specified channel.
1479 * Before returning xpc_initiate_disconnect() will wait until all connections
1480 * on the specified channel have been closed/torndown. So the caller can be
1481 * assured that they will not be receiving any more callouts from XPC to the
1482 * function they registered via xpc_connect().
1486 * ch_number - channel # to unregister.
1489 xpc_initiate_disconnect(int ch_number)
1491 unsigned long irq_flags;
1493 struct xpc_partition *part;
1494 struct xpc_channel *ch;
1497 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1499 /* initiate the channel disconnect for every active partition */
1500 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1501 part = &xpc_partitions[partid];
1503 if (xpc_part_ref(part)) {
1504 ch = &part->channels[ch_number];
1505 xpc_msgqueue_ref(ch);
1507 spin_lock_irqsave(&ch->lock, irq_flags);
1509 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
1512 spin_unlock_irqrestore(&ch->lock, irq_flags);
1514 xpc_msgqueue_deref(ch);
1515 xpc_part_deref(part);
1519 xpc_disconnect_wait(ch_number);
1524 * To disconnect a channel, and reflect it back to all who may be waiting.
1526 * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
1527 * >>> xpc_free_msgqueues().
1529 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
1532 xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1533 enum xpc_retval reason, unsigned long *irq_flags)
1538 DBUG_ON(!spin_is_locked(&ch->lock));
1540 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
1543 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1545 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
1546 reason, line, ch->partid, ch->number);
1548 XPC_SET_REASON(ch, reason, line);
1551 /* some of these may not have been set */
1552 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1553 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1554 XPC_C_CONNECTING | XPC_C_CONNECTED);
1556 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1557 xpc_IPI_send_closerequest(ch, irq_flags);
1559 if (flags & XPC_C_CONNECTED) {
1560 ch->flags |= XPC_C_WASCONNECTED;
1563 if (atomic_read(&ch->kthreads_idle) > 0) {
1564 /* wake all idle kthreads so they can exit */
1565 wake_up_all(&ch->idle_wq);
1568 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1571 /* wake those waiting to allocate an entry from the local msg queue */
1573 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1574 wake_up(&ch->msg_allocate_wq);
1577 /* wake those waiting for notify completion */
1579 if (atomic_read(&ch->n_to_notify) > 0) {
1580 xpc_notify_senders(ch, reason, ch->w_local_GP.put);
1583 spin_lock_irqsave(&ch->lock, *irq_flags);
1588 xpc_disconnected_callout(struct xpc_channel *ch)
1591 * Let the channel's registerer know that the channel is now
1592 * disconnected. We don't want to do this if the registerer was never
1593 * informed of a connection being made, unless the disconnect was for
1597 if (ch->func != NULL) {
1598 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
1599 "channel=%d\n", ch->reason, ch->partid, ch->number);
1601 ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key);
1603 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
1604 "channel=%d\n", ch->reason, ch->partid, ch->number);
1610 * Wait for a message entry to become available for the specified channel,
1611 * but don't wait any longer than 1 jiffy.
1613 static enum xpc_retval
1614 xpc_allocate_msg_wait(struct xpc_channel *ch)
1616 enum xpc_retval ret;
1619 if (ch->flags & XPC_C_DISCONNECTING) {
1620 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1624 atomic_inc(&ch->n_on_msg_allocate_wq);
1625 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
1626 atomic_dec(&ch->n_on_msg_allocate_wq);
1628 if (ch->flags & XPC_C_DISCONNECTING) {
1630 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1631 } else if (ret == 0) {
1634 ret = xpcInterrupted;
1642 * Allocate an entry for a message from the message queue associated with the
1643 * specified channel.
1645 static enum xpc_retval
1646 xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1647 struct xpc_msg **address_of_msg)
1649 struct xpc_msg *msg;
1650 enum xpc_retval ret;
1654 /* this reference will be dropped in xpc_send_msg() */
1655 xpc_msgqueue_ref(ch);
1657 if (ch->flags & XPC_C_DISCONNECTING) {
1658 xpc_msgqueue_deref(ch);
1661 if (!(ch->flags & XPC_C_CONNECTED)) {
1662 xpc_msgqueue_deref(ch);
1663 return xpcNotConnected;
1668 * Get the next available message entry from the local message queue.
1669 * If none are available, we'll make sure that we grab the latest
1676 put = (volatile s64) ch->w_local_GP.put;
1677 if (put - (volatile s64) ch->w_remote_GP.get <
1678 ch->local_nentries) {
1680 /* There are available message entries. We need to try
1681 * to secure one for ourselves. We'll do this by trying
1682 * to increment w_local_GP.put as long as someone else
1683 * doesn't beat us to it. If they do, we'll have to
1686 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
1688 /* we got the entry referenced by put */
1691 continue; /* try again */
1696 * There aren't any available msg entries at this time.
1698 * In waiting for a message entry to become available,
1699 * we set a timeout in case the other side is not
1700 * sending completion IPIs. This lets us fake an IPI
1701 * that will cause the IPI handler to fetch the latest
1702 * GP values as if an IPI was sent by the other side.
1704 if (ret == xpcTimeout) {
1705 xpc_IPI_send_local_msgrequest(ch);
1708 if (flags & XPC_NOWAIT) {
1709 xpc_msgqueue_deref(ch);
1713 ret = xpc_allocate_msg_wait(ch);
1714 if (ret != xpcInterrupted && ret != xpcTimeout) {
1715 xpc_msgqueue_deref(ch);
1721 /* get the message's address and initialize it */
1722 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1723 (put % ch->local_nentries) * ch->msg_size);
1726 DBUG_ON(msg->flags != 0);
1729 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1730 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1731 (void *) msg, msg->number, ch->partid, ch->number);
1733 *address_of_msg = msg;
1740 * Allocate an entry for a message from the message queue associated with the
1741 * specified channel. NOTE that this routine can sleep waiting for a message
1742 * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
1746 * partid - ID of partition to which the channel is connected.
1747 * ch_number - channel #.
1748 * flags - see xpc.h for valid flags.
1749 * payload - address of the allocated payload area pointer (filled in on
1750 * return) in which the user-defined message is constructed.
1753 xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1755 struct xpc_partition *part = &xpc_partitions[partid];
1756 enum xpc_retval ret = xpcUnknownReason;
1757 struct xpc_msg *msg;
1760 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1761 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1765 if (xpc_part_ref(part)) {
1766 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1767 xpc_part_deref(part);
1770 *payload = &msg->payload;
1779 * Now we actually send the messages that are ready to be sent by advancing
1780 * the local message queue's Put value and then send an IPI to the recipient
1784 xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1786 struct xpc_msg *msg;
1787 s64 put = initial_put + 1;
1794 if (put == (volatile s64) ch->w_local_GP.put) {
1798 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1799 (put % ch->local_nentries) * ch->msg_size);
1801 if (!(msg->flags & XPC_M_READY)) {
1808 if (put == initial_put) {
1809 /* nothing's changed */
1813 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1815 /* someone else beat us to it */
1816 DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
1820 /* we just set the new value of local_GP->put */
1822 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1823 "channel=%d\n", put, ch->partid, ch->number);
1828 * We need to ensure that the message referenced by
1829 * local_GP->put is not XPC_M_READY or that local_GP->put
1830 * equals w_local_GP.put, so we'll go have a look.
1836 xpc_IPI_send_msgrequest(ch);
1842 * Common code that does the actual sending of the message by advancing the
1843 * local message queue's Put value and sends an IPI to the partition the
1844 * message is being sent to.
1846 static enum xpc_retval
1847 xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1848 xpc_notify_func func, void *key)
1850 enum xpc_retval ret = xpcSuccess;
1851 struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!!
1852 s64 put, msg_number = msg->number;
1855 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1856 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
1857 msg_number % ch->local_nentries);
1858 DBUG_ON(msg->flags & XPC_M_READY);
1860 if (ch->flags & XPC_C_DISCONNECTING) {
1861 /* drop the reference grabbed in xpc_allocate_msg() */
1862 xpc_msgqueue_deref(ch);
1866 if (notify_type != 0) {
1868 * Tell the remote side to send an ACK interrupt when the
1869 * message has been delivered.
1871 msg->flags |= XPC_M_INTERRUPT;
1873 atomic_inc(&ch->n_to_notify);
1875 notify = &ch->notify_queue[msg_number % ch->local_nentries];
1876 notify->func = func;
1878 (volatile u8) notify->type = notify_type;
1880 // >>> is a mb() needed here?
1882 if (ch->flags & XPC_C_DISCONNECTING) {
1884 * An error occurred between our last error check and
1885 * this one. We will try to clear the type field from
1886 * the notify entry. If we succeed then
1887 * xpc_disconnect_channel() didn't already process
1890 if (cmpxchg(¬ify->type, notify_type, 0) ==
1892 atomic_dec(&ch->n_to_notify);
1896 /* drop the reference grabbed in xpc_allocate_msg() */
1897 xpc_msgqueue_deref(ch);
1902 msg->flags |= XPC_M_READY;
1905 * The preceding store of msg->flags must occur before the following
1906 * load of ch->local_GP->put.
1910 /* see if the message is next in line to be sent, if so send it */
1912 put = ch->local_GP->put;
1913 if (put == msg_number) {
1914 xpc_send_msgs(ch, put);
1917 /* drop the reference grabbed in xpc_allocate_msg() */
1918 xpc_msgqueue_deref(ch);
1924 * Send a message previously allocated using xpc_initiate_allocate() on the
1925 * specified channel connected to the specified partition.
1927 * This routine will not wait for the message to be received, nor will
1928 * notification be given when it does happen. Once this routine has returned
1929 * the message entry allocated via xpc_initiate_allocate() is no longer
1930 * accessable to the caller.
1932 * This routine, although called by users, does not call xpc_part_ref() to
1933 * ensure that the partition infrastructure is in place. It relies on the
1934 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1938 * partid - ID of partition to which the channel is connected.
1939 * ch_number - channel # to send message on.
1940 * payload - pointer to the payload area allocated via
1941 * xpc_initiate_allocate().
1944 xpc_initiate_send(partid_t partid, int ch_number, void *payload)
1946 struct xpc_partition *part = &xpc_partitions[partid];
1947 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
1948 enum xpc_retval ret;
1951 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
1954 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1955 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1956 DBUG_ON(msg == NULL);
1958 ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
1965 * Send a message previously allocated using xpc_initiate_allocate on the
1966 * specified channel connected to the specified partition.
1968 * This routine will not wait for the message to be sent. Once this routine
1969 * has returned the message entry allocated via xpc_initiate_allocate() is no
1970 * longer accessable to the caller.
1972 * Once the remote end of the channel has received the message, the function
1973 * passed as an argument to xpc_initiate_send_notify() will be called. This
1974 * allows the sender to free up or re-use any buffers referenced by the
1975 * message, but does NOT mean the message has been processed at the remote
1976 * end by a receiver.
1978 * If this routine returns an error, the caller's function will NOT be called.
1980 * This routine, although called by users, does not call xpc_part_ref() to
1981 * ensure that the partition infrastructure is in place. It relies on the
1982 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1986 * partid - ID of partition to which the channel is connected.
1987 * ch_number - channel # to send message on.
1988 * payload - pointer to the payload area allocated via
1989 * xpc_initiate_allocate().
1990 * func - function to call with asynchronous notification of message
1991 * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
1992 * key - user-defined key to be passed to the function when it's called.
1995 xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
1996 xpc_notify_func func, void *key)
1998 struct xpc_partition *part = &xpc_partitions[partid];
1999 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2000 enum xpc_retval ret;
2003 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2006 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2007 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2008 DBUG_ON(msg == NULL);
2009 DBUG_ON(func == NULL);
2011 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2017 static struct xpc_msg *
2018 xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2020 struct xpc_partition *part = &xpc_partitions[ch->partid];
2021 struct xpc_msg *remote_msg, *msg;
2022 u32 msg_index, nmsgs;
2024 enum xpc_retval ret;
2027 if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
2028 /* we were interrupted by a signal */
2032 while (get >= ch->next_msg_to_pull) {
2034 /* pull as many messages as are ready and able to be pulled */
2036 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2038 DBUG_ON(ch->next_msg_to_pull >=
2039 (volatile s64) ch->w_remote_GP.put);
2040 nmsgs = (volatile s64) ch->w_remote_GP.put -
2041 ch->next_msg_to_pull;
2042 if (msg_index + nmsgs > ch->remote_nentries) {
2043 /* ignore the ones that wrap the msg queue for now */
2044 nmsgs = ch->remote_nentries - msg_index;
2047 msg_offset = msg_index * ch->msg_size;
2048 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2050 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
2053 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2054 nmsgs * ch->msg_size)) != xpcSuccess) {
2056 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2057 " msg %ld from partition %d, channel=%d, "
2058 "ret=%d\n", nmsgs, ch->next_msg_to_pull,
2059 ch->partid, ch->number, ret);
2061 XPC_DEACTIVATE_PARTITION(part, ret);
2063 up(&ch->msg_to_pull_sema);
2067 mb(); /* >>> this may not be needed, we're not sure */
2069 ch->next_msg_to_pull += nmsgs;
2072 up(&ch->msg_to_pull_sema);
2074 /* return the message we were looking for */
2075 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2076 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
2083 * Get a message to be delivered.
2085 static struct xpc_msg *
2086 xpc_get_deliverable_msg(struct xpc_channel *ch)
2088 struct xpc_msg *msg = NULL;
2093 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
2097 get = (volatile s64) ch->w_local_GP.get;
2098 if (get == (volatile s64) ch->w_remote_GP.put) {
2102 /* There are messages waiting to be pulled and delivered.
2103 * We need to try to secure one for ourselves. We'll do this
2104 * by trying to increment w_local_GP.get and hope that no one
2105 * else beats us to it. If they do, we'll we'll simply have
2106 * to try again for the next one.
2109 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2110 /* we got the entry referenced by get */
2112 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
2113 "partid=%d, channel=%d\n", get + 1,
2114 ch->partid, ch->number);
2116 /* pull the message from the remote partition */
2118 msg = xpc_pull_remote_msg(ch, get);
2120 DBUG_ON(msg != NULL && msg->number != get);
2121 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
2122 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
2134 * Deliver a message to its intended recipient.
2137 xpc_deliver_msg(struct xpc_channel *ch)
2139 struct xpc_msg *msg;
2142 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
2145 * This ref is taken to protect the payload itself from being
2146 * freed before the user is finished with it, which the user
2147 * indicates by calling xpc_initiate_received().
2149 xpc_msgqueue_ref(ch);
2151 atomic_inc(&ch->kthreads_active);
2153 if (ch->func != NULL) {
2154 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2155 "msg_number=%ld, partid=%d, channel=%d\n",
2156 (void *) msg, msg->number, ch->partid,
2159 /* deliver the message to its intended recipient */
2160 ch->func(xpcMsgReceived, ch->partid, ch->number,
2161 &msg->payload, ch->key);
2163 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2164 "msg_number=%ld, partid=%d, channel=%d\n",
2165 (void *) msg, msg->number, ch->partid,
2169 atomic_dec(&ch->kthreads_active);
2175 * Now we actually acknowledge the messages that have been delivered and ack'd
2176 * by advancing the cached remote message queue's Get value and if requested
2177 * send an IPI to the message sender's partition.
2180 xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2182 struct xpc_msg *msg;
2183 s64 get = initial_get + 1;
2190 if (get == (volatile s64) ch->w_local_GP.get) {
2194 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2195 (get % ch->remote_nentries) * ch->msg_size);
2197 if (!(msg->flags & XPC_M_DONE)) {
2201 msg_flags |= msg->flags;
2205 if (get == initial_get) {
2206 /* nothing's changed */
2210 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2212 /* someone else beat us to it */
2213 DBUG_ON((volatile s64) ch->local_GP->get <=
2218 /* we just set the new value of local_GP->get */
2220 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2221 "channel=%d\n", get, ch->partid, ch->number);
2223 send_IPI = (msg_flags & XPC_M_INTERRUPT);
2226 * We need to ensure that the message referenced by
2227 * local_GP->get is not XPC_M_DONE or that local_GP->get
2228 * equals w_local_GP.get, so we'll go have a look.
2234 xpc_IPI_send_msgrequest(ch);
2240 * Acknowledge receipt of a delivered message.
2242 * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
2243 * that sent the message.
2245 * This function, although called by users, does not call xpc_part_ref() to
2246 * ensure that the partition infrastructure is in place. It relies on the
2247 * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
2251 * partid - ID of partition to which the channel is connected.
2252 * ch_number - channel # message received on.
2253 * payload - pointer to the payload area allocated via
2254 * xpc_initiate_allocate().
2257 xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2259 struct xpc_partition *part = &xpc_partitions[partid];
2260 struct xpc_channel *ch;
2261 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2262 s64 get, msg_number = msg->number;
2265 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2266 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2268 ch = &part->channels[ch_number];
2270 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2271 (void *) msg, msg_number, ch->partid, ch->number);
2273 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
2274 msg_number % ch->remote_nentries);
2275 DBUG_ON(msg->flags & XPC_M_DONE);
2277 msg->flags |= XPC_M_DONE;
2280 * The preceding store of msg->flags must occur before the following
2281 * load of ch->local_GP->get.
2286 * See if this message is next in line to be acknowledged as having
2289 get = ch->local_GP->get;
2290 if (get == msg_number) {
2291 xpc_acknowledge_msgs(ch, get, msg->flags);
2294 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2295 xpc_msgqueue_deref(ch);