{
int cpu;
- preempt_disable();
+ get_online_cpus();
for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1);
- preempt_enable();
+ put_online_cpus();
}
/**
/* Disable all cpu but the first in cpu_irq_cpumask. */
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
- for_each_cpu_mask(cpu, cpumask)
+ for_each_cpu_mask_nr(cpu, cpumask)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
goto out;
/* Declare per cpu buffers. */
rc = -EIO;
- preempt_disable();
+ get_online_cpus();
for_each_online_cpu(cpu)
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
- preempt_enable();
if (cpus_empty(iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
goto out_path;
+ put_online_cpus();
return 0;
out_path:
+ put_online_cpus();
kfree(iucv_path_table);
out:
return rc;
*/
static void iucv_disable(void)
{
+ get_online_cpus();
on_each_cpu(iucv_retrieve_cpu, NULL, 1);
+ put_online_cpus();
kfree(iucv_path_table);
}
return NOTIFY_BAD;
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param[cpu])
+ if (!iucv_param[cpu]) {
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
return NOTIFY_BAD;
+ }
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata iucv_cpu_notifier = {
+static struct notifier_block __refdata iucv_cpu_notifier = {
.notifier_call = iucv_cpu_notify,
};