cpumask_t cpu_possible_map = CPU_MASK_NONE;
cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
int smt_enabled_at_boot = 1;
+static int ipi_fail_ok;
+
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
;
}
-void smp_send_stop(void)
-{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
-}
-
/*
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
#define SMP_CALL_TIMEOUT 8
/*
- * This function sends a 'generic call function' IPI to all other CPUs
- * in the system.
+ * These functions send a 'generic call function' IPI to other online
+ * CPUS in the system.
*
- * [SUMMARY] Run a function on all other CPUs.
+ * [SUMMARY] Run a function on other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused.
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ * <map> is a cpu map of the cpus to send IPI to.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
+static int __smp_call_function_map(void (*func) (void *info), void *info,
+ int nonatomic, int wait, cpumask_t map)
+{
struct call_data_struct data;
- int ret = -1, cpus;
+ int ret = -1, num_cpus;
+ int cpu;
u64 timeout;
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
if (unlikely(smp_ops == NULL))
- return -1;
+ return ret;
data.func = func;
data.info = info;
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
- /* Must grab online cpu count with preempt disabled, otherwise
- * it can change. */
- cpus = num_online_cpus() - 1;
- if (!cpus) {
- ret = 0;
- goto out;
- }
+ /* remove 'self' from the map */
+ if (cpu_isset(smp_processor_id(), map))
+ cpu_clear(smp_processor_id(), map);
+
+ /* sanity check the map, remove any non-online processors. */
+ cpus_and(map, map, cpu_online_map);
+
+ num_cpus = cpus_weight(map);
+ if (!num_cpus)
+ goto done;
call_data = &data;
smp_wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
+ /* Send a message to all CPUs in the map */
+ for_each_cpu_mask(cpu, map)
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
- /* Wait for response */
- while (atomic_read(&data.started) != cpus) {
+ /* Wait for indication that they have received the message */
+ while (atomic_read(&data.started) != num_cpus) {
HMT_low();
if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other cpus not "
- "responding (%d)\n", smp_processor_id(),
- atomic_read(&data.started));
- debugger(NULL);
+ "responding (%d)\n", smp_processor_id(),
+ atomic_read(&data.started));
+ if (!ipi_fail_ok)
+ debugger(NULL);
goto out;
}
}
+ /* optionally wait for the CPUs to complete */
if (wait) {
- while (atomic_read(&data.finished) != cpus) {
+ while (atomic_read(&data.finished) != num_cpus) {
HMT_low();
if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other "
- "cpus not finishing (%d/%d)\n",
- smp_processor_id(),
- atomic_read(&data.finished),
- atomic_read(&data.started));
+ "cpus not finishing (%d/%d)\n",
+ smp_processor_id(),
+ atomic_read(&data.finished),
+ atomic_read(&data.started));
debugger(NULL);
goto out;
}
}
}
+ done:
ret = 0;
out:
call_data = NULL;
HMT_medium();
+ return ret;
+}
+
+static int __smp_call_function(void (*func)(void *info), void *info,
+ int nonatomic, int wait)
+{
+ int ret;
+ spin_lock(&call_lock);
+ ret =__smp_call_function_map(func, info, nonatomic, wait,
+ cpu_online_map);
spin_unlock(&call_lock);
return ret;
}
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ return __smp_call_function(func, info, nonatomic, wait);
+}
EXPORT_SYMBOL(smp_call_function);
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ cpumask_t map = CPU_MASK_NONE;
+ int ret = 0;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ if (!cpu_online(cpu))
+ return -EINVAL;
+
+ cpu_set(cpu, map);
+ if (cpu != get_cpu()) {
+ spin_lock(&call_lock);
+ ret = __smp_call_function_map(func, info, nonatomic, wait, map);
+ spin_unlock(&call_lock);
+ } else {
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
+ }
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+void smp_send_stop(void)
+{
+ int nolock;
+
+ /* It's OK to fail sending the IPI, since the alternative is to
+ * be stuck forever waiting on the other CPU to take the interrupt.
+ *
+ * It's better to at least continue and go through reboot, since this
+ * function is usually called at panic or reboot time in the first
+ * place.
+ */
+ ipi_fail_ok = 1;
+
+ /* Don't deadlock in case we got called through panic */
+ nolock = !spin_trylock(&call_lock);
+ __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
+ if (!nolock)
+ spin_unlock(&call_lock);
+}
+
void smp_call_function_interrupt(void)
{
void (*func) (void *info);
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
#ifdef CONFIG_PPC64
paca[cpu].__current = p;
+ paca[cpu].kstack = (unsigned long) task_thread_info(p)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
current_set[cpu] = task_thread_info(p);
task_thread_info(p)->cpu = cpu;
if (system_state > SYSTEM_BOOTING)
snapshot_timebase();
+ secondary_cpu_time_init();
+
spin_lock(&call_lock);
cpu_set(cpu, cpu_online_map);
spin_unlock(&call_lock);