1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int no_replacement = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init noreplacement_setup(char *s)
17 static int __init bootonly(char *str)
22 static int __init debug_alt(char *str)
24 debug_alternative = 1;
28 __setup("noreplacement", noreplacement_setup);
29 __setup("smp-alt-boot", bootonly);
30 __setup("debug-alternative", debug_alt);
32 #define DPRINTK(fmt, args...) if (debug_alternative) \
33 printk(KERN_DEBUG fmt, args)
36 /* Use inline assembly to define this because the nops are defined
37 as inline assembly strings in the include files and we cannot
38 get them easily into strings. */
39 asm("\t.data\nintelnops: "
40 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
41 GENERIC_NOP7 GENERIC_NOP8);
42 extern unsigned char intelnops[];
43 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
48 intelnops + 1 + 2 + 3,
49 intelnops + 1 + 2 + 3 + 4,
50 intelnops + 1 + 2 + 3 + 4 + 5,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
52 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
57 asm("\t.data\nk8nops: "
58 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
60 extern unsigned char k8nops[];
61 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
67 k8nops + 1 + 2 + 3 + 4,
68 k8nops + 1 + 2 + 3 + 4 + 5,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
70 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
75 asm("\t.data\nk7nops: "
76 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
78 extern unsigned char k7nops[];
79 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
85 k7nops + 1 + 2 + 3 + 4,
86 k7nops + 1 + 2 + 3 + 4 + 5,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
88 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
94 extern char __vsyscall_0;
95 static inline unsigned char** find_nop_table(void)
100 #else /* CONFIG_X86_64 */
104 unsigned char **noptable;
106 { X86_FEATURE_K8, k8_nops },
107 { X86_FEATURE_K7, k7_nops },
111 static unsigned char** find_nop_table(void)
113 unsigned char **noptable = intel_nops;
116 for (i = 0; noptypes[i].cpuid >= 0; i++) {
117 if (boot_cpu_has(noptypes[i].cpuid)) {
118 noptable = noptypes[i].noptable;
125 #endif /* CONFIG_X86_64 */
127 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
128 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
129 extern u8 *__smp_locks[], *__smp_locks_end[];
131 extern u8 __smp_alt_begin[], __smp_alt_end[];
133 /* Replace instructions with better alternatives for this CPU type.
134 This runs before SMP is initialized to avoid SMP problems with
135 self modifying code. This implies that assymetric systems where
136 APs have less capabilities than the boot processor are not handled.
137 Tough. Make sure you disable such features by hand. */
139 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
141 unsigned char **noptable = find_nop_table();
146 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
147 for (a = start; a < end; a++) {
148 BUG_ON(a->replacementlen > a->instrlen);
149 if (!boot_cpu_has(a->cpuid))
153 /* vsyscall code is not mapped yet. resolve it manually. */
154 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
155 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
156 DPRINTK("%s: vsyscall fixup: %p => %p\n",
157 __FUNCTION__, a->instr, instr);
160 memcpy(instr, a->replacement, a->replacementlen);
161 diff = a->instrlen - a->replacementlen;
162 /* Pad the rest with nops */
163 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
167 memcpy(a->instr + i, noptable[k], k);
174 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
186 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
190 for (a = start; a < end; a++) {
192 a->replacement + a->replacementlen,
197 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
201 for (ptr = start; ptr < end; ptr++) {
206 **ptr = 0xf0; /* lock prefix */
210 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
212 unsigned char **noptable = find_nop_table();
215 for (ptr = start; ptr < end; ptr++) {
220 **ptr = noptable[1][0];
224 struct smp_alt_module {
225 /* what is this ??? */
229 /* ptrs to lock prefixes */
233 /* .text segment, needed to avoid patching init code ;) */
237 struct list_head next;
239 static LIST_HEAD(smp_alt_modules);
240 static DEFINE_SPINLOCK(smp_alt);
242 void alternatives_smp_module_add(struct module *mod, char *name,
243 void *locks, void *locks_end,
244 void *text, void *text_end)
246 struct smp_alt_module *smp;
253 if (boot_cpu_has(X86_FEATURE_UP))
254 alternatives_smp_unlock(locks, locks_end,
259 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
261 return; /* we'll run the (safe but slow) SMP code then ... */
266 smp->locks_end = locks_end;
268 smp->text_end = text_end;
269 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
270 __FUNCTION__, smp->locks, smp->locks_end,
271 smp->text, smp->text_end, smp->name);
273 spin_lock_irqsave(&smp_alt, flags);
274 list_add_tail(&smp->next, &smp_alt_modules);
275 if (boot_cpu_has(X86_FEATURE_UP))
276 alternatives_smp_unlock(smp->locks, smp->locks_end,
277 smp->text, smp->text_end);
278 spin_unlock_irqrestore(&smp_alt, flags);
281 void alternatives_smp_module_del(struct module *mod)
283 struct smp_alt_module *item;
286 if (no_replacement || smp_alt_once)
289 spin_lock_irqsave(&smp_alt, flags);
290 list_for_each_entry(item, &smp_alt_modules, next) {
291 if (mod != item->mod)
293 list_del(&item->next);
294 spin_unlock_irqrestore(&smp_alt, flags);
295 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
299 spin_unlock_irqrestore(&smp_alt, flags);
302 void alternatives_smp_switch(int smp)
304 struct smp_alt_module *mod;
307 #ifdef CONFIG_LOCKDEP
309 * A not yet fixed binutils section handling bug prevents
310 * alternatives-replacement from working reliably, so turn
313 printk("lockdep: not fixing up alternatives.\n");
317 if (no_replacement || smp_alt_once)
319 BUG_ON(!smp && (num_online_cpus() > 1));
321 spin_lock_irqsave(&smp_alt, flags);
323 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
324 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
325 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
326 alternatives_smp_apply(__smp_alt_instructions,
327 __smp_alt_instructions_end);
328 list_for_each_entry(mod, &smp_alt_modules, next)
329 alternatives_smp_lock(mod->locks, mod->locks_end,
330 mod->text, mod->text_end);
332 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
333 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
334 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
335 apply_alternatives(__smp_alt_instructions,
336 __smp_alt_instructions_end);
337 list_for_each_entry(mod, &smp_alt_modules, next)
338 alternatives_smp_unlock(mod->locks, mod->locks_end,
339 mod->text, mod->text_end);
341 spin_unlock_irqrestore(&smp_alt, flags);
346 void __init alternative_instructions(void)
349 if (no_replacement) {
350 printk(KERN_INFO "(SMP-)alternatives turned off\n");
351 free_init_pages("SMP alternatives",
352 (unsigned long)__smp_alt_begin,
353 (unsigned long)__smp_alt_end);
357 local_irq_save(flags);
358 apply_alternatives(__alt_instructions, __alt_instructions_end);
360 /* switch to patch-once-at-boottime-only mode and free the
361 * tables in case we know the number of CPUs will never ever
363 #ifdef CONFIG_HOTPLUG_CPU
364 if (num_possible_cpus() < 2)
372 if (1 == num_possible_cpus()) {
373 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
374 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
375 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
376 apply_alternatives(__smp_alt_instructions,
377 __smp_alt_instructions_end);
378 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
381 free_init_pages("SMP alternatives",
382 (unsigned long)__smp_alt_begin,
383 (unsigned long)__smp_alt_end);
385 alternatives_smp_save(__smp_alt_instructions,
386 __smp_alt_instructions_end);
387 alternatives_smp_module_add(NULL, "core kernel",
388 __smp_locks, __smp_locks_end,
390 alternatives_smp_switch(0);
393 local_irq_restore(flags);