2 * arch/s390/kernel/smp.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * We work with logical cpu numbering everywhere we can. The only
15 * functions using the real cpu address (got from STAP) are the sigp
16 * functions. For all other functions we use the identity mapping.
17 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
18 * used e.g. to find the idle task belonging to a logical cpu. Every array
19 * in the kernel is sorted by the logical cpu number and not by the physical
20 * one which is causing all the confusion with __cpu_logical_map and
21 * cpu_number_map in other architectures.
24 #include <linux/module.h>
25 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/smp_lock.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/cpu.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
46 extern volatile int __cpu_logical_map[];
49 * An array with a pointer the lowcore of every CPU.
52 struct _lowcore *lowcore_ptr[NR_CPUS];
54 cpumask_t cpu_online_map;
55 cpumask_t cpu_possible_map;
57 static struct task_struct *current_set[NR_CPUS];
59 EXPORT_SYMBOL(cpu_online_map);
62 * Reboot, halt and power_off routines for SMP.
64 extern char vmhalt_cmd[];
65 extern char vmpoff_cmd[];
67 extern void reipl(unsigned long devno);
69 static void smp_ext_bitcall(int, ec_bit_sig);
70 static void smp_ext_bitcall_others(ec_bit_sig);
73 * Structure and data for smp_call_function(). This is designed to minimise
74 * static memory requirements. It also looks cleaner.
76 static DEFINE_SPINLOCK(call_lock);
78 struct call_data_struct {
79 void (*func) (void *info);
86 static struct call_data_struct * call_data;
89 * 'Call function' interrupt callback
91 static void do_call_function(void)
93 void (*func) (void *info) = call_data->func;
94 void *info = call_data->info;
95 int wait = call_data->wait;
97 atomic_inc(&call_data->started);
100 atomic_inc(&call_data->finished);
104 * this function sends a 'generic call function' IPI to all other CPUs
108 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
111 * [SUMMARY] Run a function on all other CPUs.
112 * <func> The function to run. This must be fast and non-blocking.
113 * <info> An arbitrary pointer to pass to the function.
114 * <nonatomic> currently unused.
115 * <wait> If true, wait (atomically) until function has completed on other CPUs.
116 * [RETURNS] 0 on success, else a negative status code. Does not return until
117 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
119 * You must not call this function with disabled interrupts or from a
120 * hardware interrupt handler or from a bottom half handler.
123 struct call_data_struct data;
124 int cpus = num_online_cpus()-1;
129 /* Can deadlock when called with interrupts disabled */
130 WARN_ON(irqs_disabled());
134 atomic_set(&data.started, 0);
137 atomic_set(&data.finished, 0);
139 spin_lock(&call_lock);
141 /* Send a message to all other CPUs and wait for them to respond */
142 smp_ext_bitcall_others(ec_call_function);
144 /* Wait for response */
145 while (atomic_read(&data.started) != cpus)
149 while (atomic_read(&data.finished) != cpus)
151 spin_unlock(&call_lock);
157 * Call a function on one CPU
158 * cpu : the CPU the function should be executed on
160 * You must not call this function with disabled interrupts or from a
161 * hardware interrupt handler. You may call it from a bottom half.
163 * It is guaranteed that the called function runs on the specified CPU,
164 * preemption is disabled.
166 int smp_call_function_on(void (*func) (void *info), void *info,
167 int nonatomic, int wait, int cpu)
169 struct call_data_struct data;
172 if (!cpu_online(cpu))
175 /* disable preemption for local function call */
176 curr_cpu = get_cpu();
178 if (curr_cpu == cpu) {
179 /* direct call to function */
187 atomic_set(&data.started, 0);
190 atomic_set(&data.finished, 0);
192 spin_lock_bh(&call_lock);
194 smp_ext_bitcall(cpu, ec_call_function);
196 /* Wait for response */
197 while (atomic_read(&data.started) != 1)
201 while (atomic_read(&data.finished) != 1)
204 spin_unlock_bh(&call_lock);
208 EXPORT_SYMBOL(smp_call_function_on);
210 static inline void do_send_stop(void)
214 /* stop all processors */
215 for_each_online_cpu(cpu) {
216 if (cpu == smp_processor_id())
219 rc = signal_processor(cpu, sigp_stop);
220 } while (rc == sigp_busy);
224 static inline void do_store_status(void)
228 /* store status of all processors in their lowcores (real 0) */
229 for_each_online_cpu(cpu) {
230 if (cpu == smp_processor_id())
233 rc = signal_processor_p(
234 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
235 sigp_store_status_at_address);
236 } while(rc == sigp_busy);
241 * this function sends a 'stop' sigp to all other CPUs in the system.
242 * it goes straight through.
244 void smp_send_stop(void)
246 /* write magic number to zero page (absolute 0) */
247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
249 /* stop other processors. */
252 /* store status of other processors. */
257 * Reboot, halt and power_off routines for SMP.
260 static void do_machine_restart(void * __unused)
263 static atomic_t cpuid = ATOMIC_INIT(-1);
265 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
266 signal_processor(smp_processor_id(), sigp_stop);
268 /* Wait for all other cpus to enter stopped state */
269 for_each_online_cpu(cpu) {
270 if (cpu == smp_processor_id())
272 while(!smp_cpu_not_running(cpu))
276 /* Store status of other cpus. */
280 * Finally call reipl. Because we waited for all other
281 * cpus to enter this function we know that they do
282 * not hold any s390irq-locks (the cpus have been
283 * interrupted by an external interrupt and s390irq
284 * locks are always held disabled).
287 cpcmd ("IPL", NULL, 0, NULL);
289 reipl (0x10000 | S390_lowcore.ipl_device);
292 void machine_restart_smp(char * __unused)
294 on_each_cpu(do_machine_restart, NULL, 0, 0);
297 static void do_wait_for_stop(void)
299 unsigned long cr[16];
301 __ctl_store(cr, 0, 15);
304 __ctl_load(cr, 0, 15);
309 static void do_machine_halt(void * __unused)
311 static atomic_t cpuid = ATOMIC_INIT(-1);
313 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
315 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
316 cpcmd(vmhalt_cmd, NULL, 0, NULL);
317 signal_processor(smp_processor_id(),
318 sigp_stop_and_store_status);
323 void machine_halt_smp(void)
325 on_each_cpu(do_machine_halt, NULL, 0, 0);
328 static void do_machine_power_off(void * __unused)
330 static atomic_t cpuid = ATOMIC_INIT(-1);
332 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
334 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
335 cpcmd(vmpoff_cmd, NULL, 0, NULL);
336 signal_processor(smp_processor_id(),
337 sigp_stop_and_store_status);
342 void machine_power_off_smp(void)
344 on_each_cpu(do_machine_power_off, NULL, 0, 0);
348 * This is the main routine where commands issued by other
352 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
357 * handle bit signal external calls
359 * For the ec_schedule signal we have to do nothing. All the work
360 * is done automatically when we return from the interrupt.
362 bits = xchg(&S390_lowcore.ext_call_fast, 0);
364 if (test_bit(ec_call_function, &bits))
369 * Send an external call sigp to another cpu and return without waiting
370 * for its completion.
372 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
375 * Set signaling bit in lowcore of target cpu and kick it
377 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
378 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
383 * Send an external call sigp to every other cpu in the system and
384 * return without waiting for its completion.
386 static void smp_ext_bitcall_others(ec_bit_sig sig)
390 for_each_online_cpu(cpu) {
391 if (cpu == smp_processor_id())
394 * Set signaling bit in lowcore of target cpu and kick it
396 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
397 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
402 #ifndef CONFIG_ARCH_S390X
404 * this function sends a 'purge tlb' signal to another CPU.
406 void smp_ptlb_callback(void *info)
411 void smp_ptlb_all(void)
413 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
415 EXPORT_SYMBOL(smp_ptlb_all);
416 #endif /* ! CONFIG_ARCH_S390X */
419 * this function sends a 'reschedule' IPI to another CPU.
420 * it goes straight through and wastes no time serializing
421 * anything. Worst case is that we lose a reschedule ...
423 void smp_send_reschedule(int cpu)
425 smp_ext_bitcall(cpu, ec_schedule);
429 * parameter area for the set/clear control bit callbacks
435 unsigned long orvals[16];
436 unsigned long andvals[16];
437 } ec_creg_mask_parms;
440 * callback for setting/clearing control bits
442 void smp_ctl_bit_callback(void *info) {
443 ec_creg_mask_parms *pp;
444 unsigned long cregs[16];
447 pp = (ec_creg_mask_parms *) info;
448 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
449 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
450 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
451 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
455 * Set a bit in a control register of all cpus
457 void smp_ctl_set_bit(int cr, int bit) {
458 ec_creg_mask_parms parms;
460 parms.start_ctl = cr;
462 parms.orvals[cr] = 1 << bit;
463 parms.andvals[cr] = -1L;
465 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
466 __ctl_set_bit(cr, bit);
471 * Clear a bit in a control register of all cpus
473 void smp_ctl_clear_bit(int cr, int bit) {
474 ec_creg_mask_parms parms;
476 parms.start_ctl = cr;
478 parms.orvals[cr] = 0;
479 parms.andvals[cr] = ~(1L << bit);
481 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
482 __ctl_clear_bit(cr, bit);
487 * Lets check how many CPUs we have.
491 __init smp_check_cpus(unsigned int max_cpus)
497 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
500 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
501 current_thread_info()->cpu = 0;
503 for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
504 if ((__u16) cpu == boot_cpu_addr)
506 __cpu_logical_map[num_cpus] = (__u16) cpu;
507 if (signal_processor(num_cpus, sigp_sense) ==
508 sigp_not_operational)
510 cpu_set(num_cpus, cpu_present_map);
514 for (cpu = 1; cpu < max_cpus; cpu++)
515 cpu_set(cpu, cpu_possible_map);
517 printk("Detected %d CPU's\n",(int) num_cpus);
518 printk("Boot cpu address %2X\n", boot_cpu_addr);
522 * Activate a secondary processor.
524 extern void init_cpu_timer(void);
525 extern void init_cpu_vtimer(void);
526 extern int pfault_init(void);
527 extern void pfault_fini(void);
529 int __devinit start_secondary(void *cpuvoid)
533 /* init per CPU timer */
535 #ifdef CONFIG_VIRT_TIMER
539 /* Enable pfault pseudo page faults on this cpu. */
542 /* Mark this cpu as online */
543 cpu_set(smp_processor_id(), cpu_online_map);
544 /* Switch on interrupts */
546 /* Print info about this processor */
547 print_cpu_info(&S390_lowcore.cpu_data);
548 /* cpu_idle will call schedule for us */
553 static void __init smp_create_idle(unsigned int cpu)
555 struct task_struct *p;
558 * don't care about the psw and regs settings since we'll never
559 * reschedule the forked task.
563 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
564 current_set[cpu] = p;
567 /* Reserving and releasing of CPUs */
569 static DEFINE_SPINLOCK(smp_reserve_lock);
570 static int smp_cpu_reserved[NR_CPUS];
573 smp_get_cpu(cpumask_t cpu_mask)
578 spin_lock_irqsave(&smp_reserve_lock, flags);
579 /* Try to find an already reserved cpu. */
580 for_each_cpu_mask(cpu, cpu_mask) {
581 if (smp_cpu_reserved[cpu] != 0) {
582 smp_cpu_reserved[cpu]++;
587 /* Reserve a new cpu from cpu_mask. */
588 for_each_cpu_mask(cpu, cpu_mask) {
589 if (cpu_online(cpu)) {
590 smp_cpu_reserved[cpu]++;
596 spin_unlock_irqrestore(&smp_reserve_lock, flags);
605 spin_lock_irqsave(&smp_reserve_lock, flags);
606 smp_cpu_reserved[cpu]--;
607 spin_unlock_irqrestore(&smp_reserve_lock, flags);
615 /* Check for stopped state */
616 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
623 /* Upping and downing of CPUs */
626 __cpu_up(unsigned int cpu)
628 struct task_struct *idle;
629 struct _lowcore *cpu_lowcore;
630 struct stack_frame *sf;
634 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
635 __cpu_logical_map[cpu] = (__u16) curr_cpu;
636 if (cpu_stopped(cpu))
640 if (!cpu_stopped(cpu))
643 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
644 cpu, sigp_set_prefix);
646 printk("sigp_set_prefix failed for cpu %d "
647 "with condition code %d\n",
648 (int) cpu, (int) ccode);
652 idle = current_set[cpu];
653 cpu_lowcore = lowcore_ptr[cpu];
654 cpu_lowcore->kernel_stack = (unsigned long)
655 idle->thread_info + (THREAD_SIZE);
656 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
657 - sizeof(struct pt_regs)
658 - sizeof(struct stack_frame));
659 memset(sf, 0, sizeof(struct stack_frame));
660 sf->gprs[9] = (unsigned long) sf;
661 cpu_lowcore->save_area[15] = (unsigned long) sf;
662 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
663 __asm__ __volatile__("stam 0,15,0(%0)"
664 : : "a" (&cpu_lowcore->access_regs_save_area)
666 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
667 cpu_lowcore->current_task = (unsigned long) idle;
668 cpu_lowcore->cpu_data.cpu_nr = cpu;
670 signal_processor(cpu,sigp_restart);
672 while (!cpu_online(cpu))
681 ec_creg_mask_parms cr_parms;
682 int cpu = smp_processor_id();
684 spin_lock_irqsave(&smp_reserve_lock, flags);
685 if (smp_cpu_reserved[cpu] != 0) {
686 spin_unlock_irqrestore(&smp_reserve_lock, flags);
689 cpu_clear(cpu, cpu_online_map);
692 /* Disable pfault pseudo page faults on this cpu. */
696 /* disable all external interrupts */
698 cr_parms.start_ctl = 0;
699 cr_parms.end_ctl = 0;
700 cr_parms.orvals[0] = 0;
701 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
702 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
703 smp_ctl_bit_callback(&cr_parms);
705 /* disable all I/O interrupts */
707 cr_parms.start_ctl = 6;
708 cr_parms.end_ctl = 6;
709 cr_parms.orvals[6] = 0;
710 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
711 1<<27 | 1<<26 | 1<<25 | 1<<24);
712 smp_ctl_bit_callback(&cr_parms);
714 /* disable most machine checks */
716 cr_parms.start_ctl = 14;
717 cr_parms.end_ctl = 14;
718 cr_parms.orvals[14] = 0;
719 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
720 smp_ctl_bit_callback(&cr_parms);
722 spin_unlock_irqrestore(&smp_reserve_lock, flags);
727 __cpu_die(unsigned int cpu)
729 /* Wait until target cpu is down */
730 while (!smp_cpu_not_running(cpu))
732 printk("Processor %d spun down\n", cpu);
739 signal_processor(smp_processor_id(), sigp_stop);
745 * Cycle through the processors and setup structures.
748 void __init smp_prepare_cpus(unsigned int max_cpus)
754 /* request the 0x1201 emergency signal external interrupt */
755 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
756 panic("Couldn't request external interrupt 0x1201");
757 smp_check_cpus(max_cpus);
758 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
760 * Initialize prefix pages and stacks for all possible cpus
762 print_cpu_info(&S390_lowcore.cpu_data);
764 for(i = 0; i < NR_CPUS; i++) {
765 if (!cpu_possible(i))
767 lowcore_ptr[i] = (struct _lowcore *)
768 __get_free_pages(GFP_KERNEL|GFP_DMA,
769 sizeof(void*) == 8 ? 1 : 0);
770 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
771 if (lowcore_ptr[i] == NULL || stack == 0ULL)
772 panic("smp_boot_cpus failed to allocate memory\n");
774 *(lowcore_ptr[i]) = S390_lowcore;
775 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
776 stack = __get_free_pages(GFP_KERNEL,0);
778 panic("smp_boot_cpus failed to allocate memory\n");
779 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
781 if (MACHINE_HAS_IEEE) {
782 lowcore_ptr[i]->extended_save_area_addr =
783 (__u32) __get_free_pages(GFP_KERNEL,0);
784 if (lowcore_ptr[i]->extended_save_area_addr == 0)
785 panic("smp_boot_cpus failed to "
786 "allocate memory\n");
791 if (MACHINE_HAS_IEEE)
792 ctl_set_bit(14, 29); /* enable extended save area */
794 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
797 if (cpu != smp_processor_id())
798 smp_create_idle(cpu);
801 void __devinit smp_prepare_boot_cpu(void)
803 BUG_ON(smp_processor_id() != 0);
805 cpu_set(0, cpu_online_map);
806 cpu_set(0, cpu_present_map);
807 cpu_set(0, cpu_possible_map);
808 S390_lowcore.percpu_offset = __per_cpu_offset[0];
809 current_set[0] = current;
812 void smp_cpus_done(unsigned int max_cpus)
814 cpu_present_map = cpu_possible_map;
818 * the frequency of the profiling timer can be changed
819 * by writing a multiplier value into /proc/profile.
821 * usually you want to run this on all CPUs ;)
823 int setup_profiling_timer(unsigned int multiplier)
828 static DEFINE_PER_CPU(struct cpu, cpu_devices);
830 static int __init topology_init(void)
836 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
838 printk(KERN_WARNING "topology_init: register_cpu %d "
839 "failed (%d)\n", cpu, ret);
844 subsys_initcall(topology_init);
846 EXPORT_SYMBOL(cpu_possible_map);
847 EXPORT_SYMBOL(lowcore_ptr);
848 EXPORT_SYMBOL(smp_ctl_set_bit);
849 EXPORT_SYMBOL(smp_ctl_clear_bit);
850 EXPORT_SYMBOL(smp_call_function);
851 EXPORT_SYMBOL(smp_get_cpu);
852 EXPORT_SYMBOL(smp_put_cpu);