Commit | Line | Data |
---|---|---|
c27cfeff GC |
1 | #ifndef _ASM_X86_SMP_H_ |
2 | #define _ASM_X86_SMP_H_ | |
3 | #ifndef __ASSEMBLY__ | |
53ebef49 | 4 | #include <linux/cpumask.h> |
93b016f8 | 5 | #include <linux/init.h> |
7e1efc0c | 6 | #include <asm/percpu.h> |
53ebef49 | 7 | |
b23dab08 GC |
8 | /* |
9 | * We need the APIC definitions automatically as part of 'smp.h' | |
10 | */ | |
11 | #ifdef CONFIG_X86_LOCAL_APIC | |
12 | # include <asm/mpspec.h> | |
13 | # include <asm/apic.h> | |
14 | # ifdef CONFIG_X86_IO_APIC | |
15 | # include <asm/io_apic.h> | |
16 | # endif | |
17 | #endif | |
18 | #include <asm/pda.h> | |
19 | #include <asm/thread_info.h> | |
20 | ||
53ebef49 | 21 | extern cpumask_t cpu_callout_map; |
8be9ac85 GC |
22 | extern cpumask_t cpu_initialized; |
23 | extern cpumask_t cpu_callin_map; | |
24 | ||
25 | extern void (*mtrr_hook)(void); | |
26 | extern void zap_low_mappings(void); | |
53ebef49 GC |
27 | |
28 | extern int smp_num_siblings; | |
29 | extern unsigned int num_processors; | |
cb3c8b90 | 30 | extern cpumask_t cpu_initialized; |
c27cfeff | 31 | |
b447a468 | 32 | #ifdef CONFIG_SMP |
7e1efc0c GOC |
33 | extern u16 x86_cpu_to_apicid_init[]; |
34 | extern u16 x86_bios_cpu_apicid_init[]; | |
35 | extern void *x86_cpu_to_apicid_early_ptr; | |
36 | extern void *x86_bios_cpu_apicid_early_ptr; | |
b447a468 MT |
37 | #else |
38 | #define x86_cpu_to_apicid_early_ptr NULL | |
39 | #define x86_bios_cpu_apicid_early_ptr NULL | |
40 | #endif | |
7e1efc0c GOC |
41 | |
42 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | |
43 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | |
44 | DECLARE_PER_CPU(u16, cpu_llc_id); | |
45 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | |
46 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); | |
47 | ||
9d97d0da GOC |
48 | /* Static state in head.S used to set up a CPU */ |
49 | extern struct { | |
50 | void *sp; | |
51 | unsigned short ss; | |
52 | } stack_start; | |
53 | ||
16694024 GC |
54 | struct smp_ops { |
55 | void (*smp_prepare_boot_cpu)(void); | |
56 | void (*smp_prepare_cpus)(unsigned max_cpus); | |
57 | int (*cpu_up)(unsigned cpu); | |
58 | void (*smp_cpus_done)(unsigned max_cpus); | |
59 | ||
60 | void (*smp_send_stop)(void); | |
61 | void (*smp_send_reschedule)(int cpu); | |
62 | int (*smp_call_function_mask)(cpumask_t mask, | |
63 | void (*func)(void *info), void *info, | |
64 | int wait); | |
65 | }; | |
66 | ||
14522076 GC |
67 | /* Globals due to paravirt */ |
68 | extern void set_cpu_sibling_map(int cpu); | |
69 | ||
c76cb368 | 70 | #ifdef CONFIG_SMP |
d0173aea GOC |
71 | #ifndef CONFIG_PARAVIRT |
72 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | |
73 | #endif | |
c76cb368 | 74 | extern struct smp_ops smp_ops; |
8678969e | 75 | |
377d6984 GC |
76 | static inline void smp_send_stop(void) |
77 | { | |
78 | smp_ops.smp_send_stop(); | |
79 | } | |
80 | ||
1e3fac83 GC |
81 | static inline void smp_prepare_boot_cpu(void) |
82 | { | |
83 | smp_ops.smp_prepare_boot_cpu(); | |
84 | } | |
85 | ||
7557da67 GC |
86 | static inline void smp_prepare_cpus(unsigned int max_cpus) |
87 | { | |
88 | smp_ops.smp_prepare_cpus(max_cpus); | |
89 | } | |
90 | ||
c5597649 GC |
91 | static inline void smp_cpus_done(unsigned int max_cpus) |
92 | { | |
93 | smp_ops.smp_cpus_done(max_cpus); | |
94 | } | |
95 | ||
71d19549 GC |
96 | static inline int __cpu_up(unsigned int cpu) |
97 | { | |
98 | return smp_ops.cpu_up(cpu); | |
99 | } | |
100 | ||
8678969e GC |
101 | static inline void smp_send_reschedule(int cpu) |
102 | { | |
103 | smp_ops.smp_send_reschedule(cpu); | |
104 | } | |
64b1a21e GC |
105 | |
106 | static inline int smp_call_function_mask(cpumask_t mask, | |
107 | void (*func) (void *info), void *info, | |
108 | int wait) | |
109 | { | |
110 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | |
111 | } | |
71d19549 | 112 | |
1e3fac83 | 113 | void native_smp_prepare_boot_cpu(void); |
7557da67 | 114 | void native_smp_prepare_cpus(unsigned int max_cpus); |
c5597649 | 115 | void native_smp_cpus_done(unsigned int max_cpus); |
71d19549 | 116 | int native_cpu_up(unsigned int cpunum); |
93b016f8 | 117 | |
69c18c15 GC |
118 | extern int __cpu_disable(void); |
119 | extern void __cpu_die(unsigned int cpu); | |
120 | ||
68a1c3f8 | 121 | extern void prefill_possible_map(void); |
91718e8d | 122 | |
1d89a7f0 | 123 | void smp_store_cpu_info(int id); |
c70dcb74 | 124 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
a9c057c1 GC |
125 | |
126 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | |
127 | static inline int num_booting_cpus(void) | |
128 | { | |
129 | return cpus_weight(cpu_callout_map); | |
130 | } | |
131 | #endif /* CONFIG_SMP */ | |
132 | ||
2fe60147 AS |
133 | extern unsigned disabled_cpus __cpuinitdata; |
134 | ||
a9c057c1 GC |
135 | #ifdef CONFIG_X86_32_SMP |
136 | /* | |
137 | * This function is needed by all SMP systems. It must _always_ be valid | |
138 | * from the initial startup. We map APIC_BASE very early in page_setup(), | |
139 | * so this is correct in the x86 case. | |
140 | */ | |
141 | DECLARE_PER_CPU(int, cpu_number); | |
142 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | |
143 | extern int safe_smp_processor_id(void); | |
144 | ||
145 | #elif defined(CONFIG_X86_64_SMP) | |
146 | #define raw_smp_processor_id() read_pda(cpunumber) | |
147 | ||
148 | #define stack_smp_processor_id() \ | |
149 | ({ \ | |
150 | struct thread_info *ti; \ | |
151 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | |
152 | ti->cpu; \ | |
153 | }) | |
154 | #define safe_smp_processor_id() smp_processor_id() | |
155 | ||
156 | #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ | |
c70dcb74 | 157 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid |
a9c057c1 GC |
158 | #define safe_smp_processor_id() 0 |
159 | #define stack_smp_processor_id() 0 | |
c76cb368 | 160 | #endif |
16694024 | 161 | |
1b000843 GC |
162 | #ifdef CONFIG_X86_LOCAL_APIC |
163 | ||
164 | static inline int logical_smp_processor_id(void) | |
165 | { | |
166 | /* we don't want to mark this access volatile - bad code generation */ | |
167 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | |
168 | } | |
169 | ||
a24eae88 | 170 | #ifndef CONFIG_X86_64 |
05f2d12c JS |
171 | static inline unsigned int read_apic_id(void) |
172 | { | |
173 | return *(u32 *)(APIC_BASE + APIC_ID); | |
174 | } | |
ac23d4ee JS |
175 | #else |
176 | extern unsigned int read_apic_id(void); | |
177 | #endif | |
178 | ||
05f2d12c | 179 | |
1b000843 GC |
180 | # ifdef APIC_DEFINITION |
181 | extern int hard_smp_processor_id(void); | |
182 | # else | |
183 | # include <mach_apicdef.h> | |
184 | static inline int hard_smp_processor_id(void) | |
185 | { | |
186 | /* we don't want to mark this access volatile - bad code generation */ | |
05f2d12c | 187 | return GET_APIC_ID(read_apic_id()); |
1b000843 GC |
188 | } |
189 | # endif /* APIC_DEFINITION */ | |
190 | ||
191 | #else /* CONFIG_X86_LOCAL_APIC */ | |
192 | ||
193 | # ifndef CONFIG_SMP | |
194 | # define hard_smp_processor_id() 0 | |
195 | # endif | |
196 | ||
197 | #endif /* CONFIG_X86_LOCAL_APIC */ | |
198 | ||
1dbb4726 GC |
199 | #ifdef CONFIG_HOTPLUG_CPU |
200 | extern void cpu_exit_clear(void); | |
201 | extern void cpu_uninit(void); | |
202 | extern void remove_siblinginfo(int cpu); | |
203 | #endif | |
204 | ||
639acb16 GC |
205 | extern void smp_alloc_memory(void); |
206 | extern void lock_ipi_call_lock(void); | |
207 | extern void unlock_ipi_call_lock(void); | |
c27cfeff GC |
208 | #endif /* __ASSEMBLY__ */ |
209 | #endif |