1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int noreplace_smp = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init bootonly(char *str)
17 __setup("smp-alt-boot", bootonly);
19 static int __init debug_alt(char *str)
21 debug_alternative = 1;
24 __setup("debug-alternative", debug_alt);
26 static int __init setup_noreplace_smp(char *str)
31 __setup("noreplace-smp", setup_noreplace_smp);
33 #ifdef CONFIG_PARAVIRT
34 static int noreplace_paravirt = 0;
36 static int __init setup_noreplace_paravirt(char *str)
38 noreplace_paravirt = 1;
41 __setup("noreplace-paravirt", setup_noreplace_paravirt);
44 #define DPRINTK(fmt, args...) if (debug_alternative) \
45 printk(KERN_DEBUG fmt, args)
48 /* Use inline assembly to define this because the nops are defined
49 as inline assembly strings in the include files and we cannot
50 get them easily into strings. */
51 asm("\t.data\nintelnops: "
52 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
53 GENERIC_NOP7 GENERIC_NOP8);
54 extern unsigned char intelnops[];
55 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
60 intelnops + 1 + 2 + 3,
61 intelnops + 1 + 2 + 3 + 4,
62 intelnops + 1 + 2 + 3 + 4 + 5,
63 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
64 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
69 asm("\t.data\nk8nops: "
70 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
72 extern unsigned char k8nops[];
73 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
79 k8nops + 1 + 2 + 3 + 4,
80 k8nops + 1 + 2 + 3 + 4 + 5,
81 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
82 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
87 asm("\t.data\nk7nops: "
88 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
90 extern unsigned char k7nops[];
91 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
97 k7nops + 1 + 2 + 3 + 4,
98 k7nops + 1 + 2 + 3 + 4 + 5,
99 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
100 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
106 extern char __vsyscall_0;
107 static inline unsigned char** find_nop_table(void)
112 #else /* CONFIG_X86_64 */
116 unsigned char **noptable;
118 { X86_FEATURE_K8, k8_nops },
119 { X86_FEATURE_K7, k7_nops },
123 static unsigned char** find_nop_table(void)
125 unsigned char **noptable = intel_nops;
128 for (i = 0; noptypes[i].cpuid >= 0; i++) {
129 if (boot_cpu_has(noptypes[i].cpuid)) {
130 noptable = noptypes[i].noptable;
137 #endif /* CONFIG_X86_64 */
139 static void nop_out(void *insns, unsigned int len)
141 unsigned char **noptable = find_nop_table();
144 unsigned int noplen = len;
145 if (noplen > ASM_NOP_MAX)
146 noplen = ASM_NOP_MAX;
147 memcpy(insns, noptable[noplen], noplen);
153 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
154 extern u8 *__smp_locks[], *__smp_locks_end[];
156 /* Replace instructions with better alternatives for this CPU type.
157 This runs before SMP is initialized to avoid SMP problems with
158 self modifying code. This implies that assymetric systems where
159 APs have less capabilities than the boot processor are not handled.
160 Tough. Make sure you disable such features by hand. */
162 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
168 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
169 for (a = start; a < end; a++) {
170 BUG_ON(a->replacementlen > a->instrlen);
171 if (!boot_cpu_has(a->cpuid))
175 /* vsyscall code is not mapped yet. resolve it manually. */
176 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
177 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
178 DPRINTK("%s: vsyscall fixup: %p => %p\n",
179 __FUNCTION__, a->instr, instr);
182 memcpy(instr, a->replacement, a->replacementlen);
183 diff = a->instrlen - a->replacementlen;
184 nop_out(instr + a->replacementlen, diff);
190 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
194 for (ptr = start; ptr < end; ptr++) {
199 **ptr = 0xf0; /* lock prefix */
203 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
210 for (ptr = start; ptr < end; ptr++) {
219 struct smp_alt_module {
220 /* what is this ??? */
224 /* ptrs to lock prefixes */
228 /* .text segment, needed to avoid patching init code ;) */
232 struct list_head next;
234 static LIST_HEAD(smp_alt_modules);
235 static DEFINE_SPINLOCK(smp_alt);
237 void alternatives_smp_module_add(struct module *mod, char *name,
238 void *locks, void *locks_end,
239 void *text, void *text_end)
241 struct smp_alt_module *smp;
248 if (boot_cpu_has(X86_FEATURE_UP))
249 alternatives_smp_unlock(locks, locks_end,
254 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
256 return; /* we'll run the (safe but slow) SMP code then ... */
261 smp->locks_end = locks_end;
263 smp->text_end = text_end;
264 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
265 __FUNCTION__, smp->locks, smp->locks_end,
266 smp->text, smp->text_end, smp->name);
268 spin_lock_irqsave(&smp_alt, flags);
269 list_add_tail(&smp->next, &smp_alt_modules);
270 if (boot_cpu_has(X86_FEATURE_UP))
271 alternatives_smp_unlock(smp->locks, smp->locks_end,
272 smp->text, smp->text_end);
273 spin_unlock_irqrestore(&smp_alt, flags);
276 void alternatives_smp_module_del(struct module *mod)
278 struct smp_alt_module *item;
281 if (smp_alt_once || noreplace_smp)
284 spin_lock_irqsave(&smp_alt, flags);
285 list_for_each_entry(item, &smp_alt_modules, next) {
286 if (mod != item->mod)
288 list_del(&item->next);
289 spin_unlock_irqrestore(&smp_alt, flags);
290 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
294 spin_unlock_irqrestore(&smp_alt, flags);
297 void alternatives_smp_switch(int smp)
299 struct smp_alt_module *mod;
302 #ifdef CONFIG_LOCKDEP
304 * A not yet fixed binutils section handling bug prevents
305 * alternatives-replacement from working reliably, so turn
308 printk("lockdep: not fixing up alternatives.\n");
312 if (noreplace_smp || smp_alt_once)
314 BUG_ON(!smp && (num_online_cpus() > 1));
316 spin_lock_irqsave(&smp_alt, flags);
318 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
319 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
320 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
321 list_for_each_entry(mod, &smp_alt_modules, next)
322 alternatives_smp_lock(mod->locks, mod->locks_end,
323 mod->text, mod->text_end);
325 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
326 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
327 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
328 list_for_each_entry(mod, &smp_alt_modules, next)
329 alternatives_smp_unlock(mod->locks, mod->locks_end,
330 mod->text, mod->text_end);
332 spin_unlock_irqrestore(&smp_alt, flags);
337 #ifdef CONFIG_PARAVIRT
338 void apply_paravirt(struct paravirt_patch_site *start,
339 struct paravirt_patch_site *end)
341 struct paravirt_patch_site *p;
343 if (noreplace_paravirt)
346 for (p = start; p < end; p++) {
349 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
352 BUG_ON(used > p->len);
354 /* Pad the rest with nops */
355 nop_out(p->instr + used, p->len - used);
358 /* Sync to be conservative, in case we patched following
362 extern struct paravirt_patch_site __start_parainstructions[],
363 __stop_parainstructions[];
364 #endif /* CONFIG_PARAVIRT */
366 void __init alternative_instructions(void)
370 local_irq_save(flags);
371 apply_alternatives(__alt_instructions, __alt_instructions_end);
373 /* switch to patch-once-at-boottime-only mode and free the
374 * tables in case we know the number of CPUs will never ever
376 #ifdef CONFIG_HOTPLUG_CPU
377 if (num_possible_cpus() < 2)
385 if (1 == num_possible_cpus()) {
386 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
388 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
389 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
392 free_init_pages("SMP alternatives",
393 __pa_symbol(&__smp_locks),
394 __pa_symbol(&__smp_locks_end));
396 alternatives_smp_module_add(NULL, "core kernel",
397 __smp_locks, __smp_locks_end,
399 alternatives_smp_switch(0);
402 apply_paravirt(__parainstructions, __parainstructions_end);
403 local_irq_restore(flags);