1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <asm/alternative.h>
5 #include <asm/sections.h>
7 static int no_replacement = 0;
8 static int smp_alt_once = 0;
9 static int debug_alternative = 0;
11 static int __init noreplacement_setup(char *s)
16 static int __init bootonly(char *str)
21 static int __init debug_alt(char *str)
23 debug_alternative = 1;
27 __setup("noreplacement", noreplacement_setup);
28 __setup("smp-alt-boot", bootonly);
29 __setup("debug-alternative", debug_alt);
31 #define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
35 /* Use inline assembly to define this because the nops are defined
36 as inline assembly strings in the include files and we cannot
37 get them easily into strings. */
38 asm("\t.data\nintelnops: "
39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
40 GENERIC_NOP7 GENERIC_NOP8);
41 extern unsigned char intelnops[];
42 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
47 intelnops + 1 + 2 + 3,
48 intelnops + 1 + 2 + 3 + 4,
49 intelnops + 1 + 2 + 3 + 4 + 5,
50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
56 asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
59 extern unsigned char k8nops[];
60 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
66 k8nops + 1 + 2 + 3 + 4,
67 k8nops + 1 + 2 + 3 + 4 + 5,
68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
74 asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
77 extern unsigned char k7nops[];
78 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
84 k7nops + 1 + 2 + 3 + 4,
85 k7nops + 1 + 2 + 3 + 4 + 5,
86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
93 extern char __vsyscall_0;
94 static inline unsigned char** find_nop_table(void)
99 #else /* CONFIG_X86_64 */
103 unsigned char **noptable;
105 { X86_FEATURE_K8, k8_nops },
106 { X86_FEATURE_K7, k7_nops },
110 static unsigned char** find_nop_table(void)
112 unsigned char **noptable = intel_nops;
115 for (i = 0; noptypes[i].cpuid >= 0; i++) {
116 if (boot_cpu_has(noptypes[i].cpuid)) {
117 noptable = noptypes[i].noptable;
124 #endif /* CONFIG_X86_64 */
126 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128 extern u8 *__smp_locks[], *__smp_locks_end[];
130 extern u8 __smp_alt_begin[], __smp_alt_end[];
132 /* Replace instructions with better alternatives for this CPU type.
133 This runs before SMP is initialized to avoid SMP problems with
134 self modifying code. This implies that assymetric systems where
135 APs have less capabilities than the boot processor are not handled.
136 Tough. Make sure you disable such features by hand. */
138 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
140 unsigned char **noptable = find_nop_table();
145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
146 for (a = start; a < end; a++) {
147 BUG_ON(a->replacementlen > a->instrlen);
148 if (!boot_cpu_has(a->cpuid))
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
159 memcpy(instr, a->replacement, a->replacementlen);
160 diff = a->instrlen - a->replacementlen;
161 /* Pad the rest with nops */
162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
166 memcpy(a->instr + i, noptable[k], k);
171 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
176 for (a = start; a < end; a++) {
177 memcpy(a->replacement + a->replacementlen,
183 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187 for (a = start; a < end; a++) {
189 a->replacement + a->replacementlen,
194 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198 for (ptr = start; ptr < end; ptr++) {
203 **ptr = 0xf0; /* lock prefix */
207 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
209 unsigned char **noptable = find_nop_table();
212 for (ptr = start; ptr < end; ptr++) {
217 **ptr = noptable[1][0];
221 struct smp_alt_module {
222 /* what is this ??? */
226 /* ptrs to lock prefixes */
230 /* .text segment, needed to avoid patching init code ;) */
234 struct list_head next;
236 static LIST_HEAD(smp_alt_modules);
237 static DEFINE_SPINLOCK(smp_alt);
239 void alternatives_smp_module_add(struct module *mod, char *name,
240 void *locks, void *locks_end,
241 void *text, void *text_end)
243 struct smp_alt_module *smp;
250 if (boot_cpu_has(X86_FEATURE_UP))
251 alternatives_smp_unlock(locks, locks_end,
256 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
258 return; /* we'll run the (safe but slow) SMP code then ... */
263 smp->locks_end = locks_end;
265 smp->text_end = text_end;
266 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
267 __FUNCTION__, smp->locks, smp->locks_end,
268 smp->text, smp->text_end, smp->name);
270 spin_lock_irqsave(&smp_alt, flags);
271 list_add_tail(&smp->next, &smp_alt_modules);
272 if (boot_cpu_has(X86_FEATURE_UP))
273 alternatives_smp_unlock(smp->locks, smp->locks_end,
274 smp->text, smp->text_end);
275 spin_unlock_irqrestore(&smp_alt, flags);
278 void alternatives_smp_module_del(struct module *mod)
280 struct smp_alt_module *item;
283 if (no_replacement || smp_alt_once)
286 spin_lock_irqsave(&smp_alt, flags);
287 list_for_each_entry(item, &smp_alt_modules, next) {
288 if (mod != item->mod)
290 list_del(&item->next);
291 spin_unlock_irqrestore(&smp_alt, flags);
292 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
296 spin_unlock_irqrestore(&smp_alt, flags);
299 void alternatives_smp_switch(int smp)
301 struct smp_alt_module *mod;
304 if (no_replacement || smp_alt_once)
306 BUG_ON(!smp && (num_online_cpus() > 1));
308 spin_lock_irqsave(&smp_alt, flags);
310 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
311 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
312 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
313 alternatives_smp_apply(__smp_alt_instructions,
314 __smp_alt_instructions_end);
315 list_for_each_entry(mod, &smp_alt_modules, next)
316 alternatives_smp_lock(mod->locks, mod->locks_end,
317 mod->text, mod->text_end);
319 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
320 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 apply_alternatives(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_unlock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end);
328 spin_unlock_irqrestore(&smp_alt, flags);
331 void __init alternative_instructions(void)
333 if (no_replacement) {
334 printk(KERN_INFO "(SMP-)alternatives turned off\n");
335 free_init_pages("SMP alternatives",
336 (unsigned long)__smp_alt_begin,
337 (unsigned long)__smp_alt_end);
340 apply_alternatives(__alt_instructions, __alt_instructions_end);
342 /* switch to patch-once-at-boottime-only mode and free the
343 * tables in case we know the number of CPUs will never ever
345 #ifdef CONFIG_HOTPLUG_CPU
346 if (num_possible_cpus() < 2)
353 if (1 == num_possible_cpus()) {
354 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
355 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
356 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
357 apply_alternatives(__smp_alt_instructions,
358 __smp_alt_instructions_end);
359 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
362 free_init_pages("SMP alternatives",
363 (unsigned long)__smp_alt_begin,
364 (unsigned long)__smp_alt_end);
366 alternatives_smp_save(__smp_alt_instructions,
367 __smp_alt_instructions_end);
368 alternatives_smp_module_add(NULL, "core kernel",
369 __smp_locks, __smp_locks_end,
371 alternatives_smp_switch(0);