1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int no_replacement = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init noreplacement_setup(char *s)
17 static int __init bootonly(char *str)
22 static int __init debug_alt(char *str)
24 debug_alternative = 1;
28 __setup("noreplacement", noreplacement_setup);
29 __setup("smp-alt-boot", bootonly);
30 __setup("debug-alternative", debug_alt);
32 #define DPRINTK(fmt, args...) if (debug_alternative) \
33 printk(KERN_DEBUG fmt, args)
36 /* Use inline assembly to define this because the nops are defined
37 as inline assembly strings in the include files and we cannot
38 get them easily into strings. */
39 asm("\t.data\nintelnops: "
40 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
41 GENERIC_NOP7 GENERIC_NOP8);
42 extern unsigned char intelnops[];
43 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
48 intelnops + 1 + 2 + 3,
49 intelnops + 1 + 2 + 3 + 4,
50 intelnops + 1 + 2 + 3 + 4 + 5,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
52 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
57 asm("\t.data\nk8nops: "
58 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
60 extern unsigned char k8nops[];
61 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
67 k8nops + 1 + 2 + 3 + 4,
68 k8nops + 1 + 2 + 3 + 4 + 5,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
70 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
75 asm("\t.data\nk7nops: "
76 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
78 extern unsigned char k7nops[];
79 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
85 k7nops + 1 + 2 + 3 + 4,
86 k7nops + 1 + 2 + 3 + 4 + 5,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
88 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
94 extern char __vsyscall_0;
95 static inline unsigned char** find_nop_table(void)
100 #else /* CONFIG_X86_64 */
104 unsigned char **noptable;
106 { X86_FEATURE_K8, k8_nops },
107 { X86_FEATURE_K7, k7_nops },
111 static unsigned char** find_nop_table(void)
113 unsigned char **noptable = intel_nops;
116 for (i = 0; noptypes[i].cpuid >= 0; i++) {
117 if (boot_cpu_has(noptypes[i].cpuid)) {
118 noptable = noptypes[i].noptable;
125 #endif /* CONFIG_X86_64 */
127 static void nop_out(void *insns, unsigned int len)
129 unsigned char **noptable = find_nop_table();
132 unsigned int noplen = len;
133 if (noplen > ASM_NOP_MAX)
134 noplen = ASM_NOP_MAX;
135 memcpy(insns, noptable[noplen], noplen);
141 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
142 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
143 extern u8 *__smp_locks[], *__smp_locks_end[];
145 extern u8 __smp_alt_begin[], __smp_alt_end[];
147 /* Replace instructions with better alternatives for this CPU type.
148 This runs before SMP is initialized to avoid SMP problems with
149 self modifying code. This implies that assymetric systems where
150 APs have less capabilities than the boot processor are not handled.
151 Tough. Make sure you disable such features by hand. */
153 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
159 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
160 for (a = start; a < end; a++) {
161 BUG_ON(a->replacementlen > a->instrlen);
162 if (!boot_cpu_has(a->cpuid))
166 /* vsyscall code is not mapped yet. resolve it manually. */
167 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
168 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
169 DPRINTK("%s: vsyscall fixup: %p => %p\n",
170 __FUNCTION__, a->instr, instr);
173 memcpy(instr, a->replacement, a->replacementlen);
174 diff = a->instrlen - a->replacementlen;
175 nop_out(instr + a->replacementlen, diff);
181 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
185 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
186 for (a = start; a < end; a++) {
187 memcpy(a->replacement + a->replacementlen,
193 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
197 for (a = start; a < end; a++) {
199 a->replacement + a->replacementlen,
204 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
208 for (ptr = start; ptr < end; ptr++) {
213 **ptr = 0xf0; /* lock prefix */
217 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
221 for (ptr = start; ptr < end; ptr++) {
230 struct smp_alt_module {
231 /* what is this ??? */
235 /* ptrs to lock prefixes */
239 /* .text segment, needed to avoid patching init code ;) */
243 struct list_head next;
245 static LIST_HEAD(smp_alt_modules);
246 static DEFINE_SPINLOCK(smp_alt);
248 void alternatives_smp_module_add(struct module *mod, char *name,
249 void *locks, void *locks_end,
250 void *text, void *text_end)
252 struct smp_alt_module *smp;
259 if (boot_cpu_has(X86_FEATURE_UP))
260 alternatives_smp_unlock(locks, locks_end,
265 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
267 return; /* we'll run the (safe but slow) SMP code then ... */
272 smp->locks_end = locks_end;
274 smp->text_end = text_end;
275 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
276 __FUNCTION__, smp->locks, smp->locks_end,
277 smp->text, smp->text_end, smp->name);
279 spin_lock_irqsave(&smp_alt, flags);
280 list_add_tail(&smp->next, &smp_alt_modules);
281 if (boot_cpu_has(X86_FEATURE_UP))
282 alternatives_smp_unlock(smp->locks, smp->locks_end,
283 smp->text, smp->text_end);
284 spin_unlock_irqrestore(&smp_alt, flags);
287 void alternatives_smp_module_del(struct module *mod)
289 struct smp_alt_module *item;
292 if (no_replacement || smp_alt_once)
295 spin_lock_irqsave(&smp_alt, flags);
296 list_for_each_entry(item, &smp_alt_modules, next) {
297 if (mod != item->mod)
299 list_del(&item->next);
300 spin_unlock_irqrestore(&smp_alt, flags);
301 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
305 spin_unlock_irqrestore(&smp_alt, flags);
308 void alternatives_smp_switch(int smp)
310 struct smp_alt_module *mod;
313 #ifdef CONFIG_LOCKDEP
315 * A not yet fixed binutils section handling bug prevents
316 * alternatives-replacement from working reliably, so turn
319 printk("lockdep: not fixing up alternatives.\n");
323 if (no_replacement || smp_alt_once)
325 BUG_ON(!smp && (num_online_cpus() > 1));
327 spin_lock_irqsave(&smp_alt, flags);
329 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
330 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
331 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
332 alternatives_smp_apply(__smp_alt_instructions,
333 __smp_alt_instructions_end);
334 list_for_each_entry(mod, &smp_alt_modules, next)
335 alternatives_smp_lock(mod->locks, mod->locks_end,
336 mod->text, mod->text_end);
338 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
339 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
340 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
341 apply_alternatives(__smp_alt_instructions,
342 __smp_alt_instructions_end);
343 list_for_each_entry(mod, &smp_alt_modules, next)
344 alternatives_smp_unlock(mod->locks, mod->locks_end,
345 mod->text, mod->text_end);
347 spin_unlock_irqrestore(&smp_alt, flags);
352 #ifdef CONFIG_PARAVIRT
353 void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
355 struct paravirt_patch *p;
357 for (p = start; p < end; p++) {
360 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
362 #ifdef CONFIG_DEBUG_PARAVIRT
365 /* Deliberately clobber regs using "not %reg" to find bugs. */
366 for (i = 0; i < 3; i++) {
367 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
368 memcpy(p->instr + used, "\xf7\xd0", 2);
369 p->instr[used+1] |= i;
375 /* Pad the rest with nops */
376 nop_out(p->instr + used, p->len - used);
379 /* Sync to be conservative, in case we patched following instructions */
382 extern struct paravirt_patch __start_parainstructions[],
383 __stop_parainstructions[];
384 #endif /* CONFIG_PARAVIRT */
386 void __init alternative_instructions(void)
389 if (no_replacement) {
390 printk(KERN_INFO "(SMP-)alternatives turned off\n");
391 free_init_pages("SMP alternatives",
392 (unsigned long)__smp_alt_begin,
393 (unsigned long)__smp_alt_end);
397 local_irq_save(flags);
398 apply_alternatives(__alt_instructions, __alt_instructions_end);
400 /* switch to patch-once-at-boottime-only mode and free the
401 * tables in case we know the number of CPUs will never ever
403 #ifdef CONFIG_HOTPLUG_CPU
404 if (num_possible_cpus() < 2)
412 if (1 == num_possible_cpus()) {
413 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
414 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
415 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
416 apply_alternatives(__smp_alt_instructions,
417 __smp_alt_instructions_end);
418 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
421 free_init_pages("SMP alternatives",
422 (unsigned long)__smp_alt_begin,
423 (unsigned long)__smp_alt_end);
425 alternatives_smp_save(__smp_alt_instructions,
426 __smp_alt_instructions_end);
427 alternatives_smp_module_add(NULL, "core kernel",
428 __smp_locks, __smp_locks_end,
430 alternatives_smp_switch(0);
433 apply_paravirt(__start_parainstructions, __stop_parainstructions);
434 local_irq_restore(flags);