2 * include/asm-s390/smp.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
13 #include <linux/threads.h>
14 #include <linux/cpumask.h>
15 #include <linux/bitops.h>
17 #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
19 #include <asm/lowcore.h>
21 #include <asm/ptrace.h>
24 s390 specific smp.c headers
34 extern void machine_restart_smp(char *);
35 extern void machine_halt_smp(void);
36 extern void machine_power_off_smp(void);
38 #define NO_PROC_ID 0xFF /* No processor magic marker */
41 * This magic constant controls our willingness to transfer
42 * a process across CPUs. Such a transfer incurs misses on the L1
43 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
44 * gut feeling is this will vary by board in value. For a board
45 * with separate L2 cache it probably depends also on the RSS, and
46 * for a board with shared L2 cache it ought to decay fast as other
50 #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
52 #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
54 static inline __u16 hard_smp_processor_id(void)
58 asm volatile("stap %0" : "=m" (cpu_address));
63 * returns 1 if cpu is in stopped/check stopped state or not operational
67 smp_cpu_not_running(int cpu)
71 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
72 case sigp_order_code_accepted:
73 case sigp_status_stored:
74 /* Check for stopped and check stop state */
78 case sigp_not_operational:
86 #define cpu_logical_map(cpu) (cpu)
88 extern int __cpu_disable (void);
89 extern void __cpu_die (unsigned int cpu);
90 extern void cpu_die (void) __attribute__ ((noreturn));
91 extern int __cpu_up (unsigned int cpu);
93 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait);
98 static inline void smp_send_stop(void)
100 /* Disable all interrupts/machine checks */
101 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
104 #define hard_smp_processor_id() 0
105 #define smp_cpu_not_running(cpu) 1
108 extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];