1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int noreplace_smp = 0;
9 static int smp_alt_once = 0;
10 static int debug_alternative = 0;
12 static int __init bootonly(char *str)
17 __setup("smp-alt-boot", bootonly);
19 static int __init debug_alt(char *str)
21 debug_alternative = 1;
24 __setup("debug-alternative", debug_alt);
26 static int __init setup_noreplace_smp(char *str)
31 __setup("noreplace-smp", setup_noreplace_smp);
34 #define DPRINTK(fmt, args...) if (debug_alternative) \
35 printk(KERN_DEBUG fmt, args)
38 /* Use inline assembly to define this because the nops are defined
39 as inline assembly strings in the include files and we cannot
40 get them easily into strings. */
41 asm("\t.data\nintelnops: "
42 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
43 GENERIC_NOP7 GENERIC_NOP8);
44 extern unsigned char intelnops[];
45 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
50 intelnops + 1 + 2 + 3,
51 intelnops + 1 + 2 + 3 + 4,
52 intelnops + 1 + 2 + 3 + 4 + 5,
53 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
54 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
59 asm("\t.data\nk8nops: "
60 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
62 extern unsigned char k8nops[];
63 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
69 k8nops + 1 + 2 + 3 + 4,
70 k8nops + 1 + 2 + 3 + 4 + 5,
71 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
72 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
77 asm("\t.data\nk7nops: "
78 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
80 extern unsigned char k7nops[];
81 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
87 k7nops + 1 + 2 + 3 + 4,
88 k7nops + 1 + 2 + 3 + 4 + 5,
89 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
90 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
96 extern char __vsyscall_0;
97 static inline unsigned char** find_nop_table(void)
102 #else /* CONFIG_X86_64 */
106 unsigned char **noptable;
108 { X86_FEATURE_K8, k8_nops },
109 { X86_FEATURE_K7, k7_nops },
113 static unsigned char** find_nop_table(void)
115 unsigned char **noptable = intel_nops;
118 for (i = 0; noptypes[i].cpuid >= 0; i++) {
119 if (boot_cpu_has(noptypes[i].cpuid)) {
120 noptable = noptypes[i].noptable;
127 #endif /* CONFIG_X86_64 */
129 static void nop_out(void *insns, unsigned int len)
131 unsigned char **noptable = find_nop_table();
134 unsigned int noplen = len;
135 if (noplen > ASM_NOP_MAX)
136 noplen = ASM_NOP_MAX;
137 memcpy(insns, noptable[noplen], noplen);
143 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
144 extern u8 *__smp_locks[], *__smp_locks_end[];
146 /* Replace instructions with better alternatives for this CPU type.
147 This runs before SMP is initialized to avoid SMP problems with
148 self modifying code. This implies that assymetric systems where
149 APs have less capabilities than the boot processor are not handled.
150 Tough. Make sure you disable such features by hand. */
152 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
158 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
159 for (a = start; a < end; a++) {
160 BUG_ON(a->replacementlen > a->instrlen);
161 if (!boot_cpu_has(a->cpuid))
165 /* vsyscall code is not mapped yet. resolve it manually. */
166 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
167 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
168 DPRINTK("%s: vsyscall fixup: %p => %p\n",
169 __FUNCTION__, a->instr, instr);
172 memcpy(instr, a->replacement, a->replacementlen);
173 diff = a->instrlen - a->replacementlen;
174 nop_out(instr + a->replacementlen, diff);
180 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
184 for (ptr = start; ptr < end; ptr++) {
189 **ptr = 0xf0; /* lock prefix */
193 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
200 for (ptr = start; ptr < end; ptr++) {
209 struct smp_alt_module {
210 /* what is this ??? */
214 /* ptrs to lock prefixes */
218 /* .text segment, needed to avoid patching init code ;) */
222 struct list_head next;
224 static LIST_HEAD(smp_alt_modules);
225 static DEFINE_SPINLOCK(smp_alt);
227 void alternatives_smp_module_add(struct module *mod, char *name,
228 void *locks, void *locks_end,
229 void *text, void *text_end)
231 struct smp_alt_module *smp;
238 if (boot_cpu_has(X86_FEATURE_UP))
239 alternatives_smp_unlock(locks, locks_end,
244 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
246 return; /* we'll run the (safe but slow) SMP code then ... */
251 smp->locks_end = locks_end;
253 smp->text_end = text_end;
254 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
255 __FUNCTION__, smp->locks, smp->locks_end,
256 smp->text, smp->text_end, smp->name);
258 spin_lock_irqsave(&smp_alt, flags);
259 list_add_tail(&smp->next, &smp_alt_modules);
260 if (boot_cpu_has(X86_FEATURE_UP))
261 alternatives_smp_unlock(smp->locks, smp->locks_end,
262 smp->text, smp->text_end);
263 spin_unlock_irqrestore(&smp_alt, flags);
266 void alternatives_smp_module_del(struct module *mod)
268 struct smp_alt_module *item;
271 if (smp_alt_once || noreplace_smp)
274 spin_lock_irqsave(&smp_alt, flags);
275 list_for_each_entry(item, &smp_alt_modules, next) {
276 if (mod != item->mod)
278 list_del(&item->next);
279 spin_unlock_irqrestore(&smp_alt, flags);
280 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
284 spin_unlock_irqrestore(&smp_alt, flags);
287 void alternatives_smp_switch(int smp)
289 struct smp_alt_module *mod;
292 #ifdef CONFIG_LOCKDEP
294 * A not yet fixed binutils section handling bug prevents
295 * alternatives-replacement from working reliably, so turn
298 printk("lockdep: not fixing up alternatives.\n");
302 if (noreplace_smp || smp_alt_once)
304 BUG_ON(!smp && (num_online_cpus() > 1));
306 spin_lock_irqsave(&smp_alt, flags);
308 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
309 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
310 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
311 list_for_each_entry(mod, &smp_alt_modules, next)
312 alternatives_smp_lock(mod->locks, mod->locks_end,
313 mod->text, mod->text_end);
315 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
316 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
317 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
318 list_for_each_entry(mod, &smp_alt_modules, next)
319 alternatives_smp_unlock(mod->locks, mod->locks_end,
320 mod->text, mod->text_end);
322 spin_unlock_irqrestore(&smp_alt, flags);
327 #ifdef CONFIG_PARAVIRT
328 void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
330 struct paravirt_patch *p;
332 for (p = start; p < end; p++) {
335 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
338 /* Pad the rest with nops */
339 nop_out(p->instr + used, p->len - used);
342 /* Sync to be conservative, in case we patched following instructions */
345 extern struct paravirt_patch __start_parainstructions[],
346 __stop_parainstructions[];
347 #endif /* CONFIG_PARAVIRT */
349 void __init alternative_instructions(void)
353 local_irq_save(flags);
354 apply_alternatives(__alt_instructions, __alt_instructions_end);
356 /* switch to patch-once-at-boottime-only mode and free the
357 * tables in case we know the number of CPUs will never ever
359 #ifdef CONFIG_HOTPLUG_CPU
360 if (num_possible_cpus() < 2)
368 if (1 == num_possible_cpus()) {
369 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
370 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
371 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
372 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
375 free_init_pages("SMP alternatives",
376 __pa_symbol(&__smp_locks),
377 __pa_symbol(&__smp_locks_end));
379 alternatives_smp_module_add(NULL, "core kernel",
380 __smp_locks, __smp_locks_end,
382 alternatives_smp_switch(0);
385 apply_paravirt(__start_parainstructions, __stop_parainstructions);
386 local_irq_restore(flags);