1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 static int smp_alt_once = 0;
9 static int debug_alternative = 0;
11 static int __init bootonly(char *str)
16 static int __init debug_alt(char *str)
18 debug_alternative = 1;
22 __setup("smp-alt-boot", bootonly);
23 __setup("debug-alternative", debug_alt);
25 #define DPRINTK(fmt, args...) if (debug_alternative) \
26 printk(KERN_DEBUG fmt, args)
29 /* Use inline assembly to define this because the nops are defined
30 as inline assembly strings in the include files and we cannot
31 get them easily into strings. */
32 asm("\t.data\nintelnops: "
33 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
34 GENERIC_NOP7 GENERIC_NOP8);
35 extern unsigned char intelnops[];
36 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
41 intelnops + 1 + 2 + 3,
42 intelnops + 1 + 2 + 3 + 4,
43 intelnops + 1 + 2 + 3 + 4 + 5,
44 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
45 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
50 asm("\t.data\nk8nops: "
51 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
53 extern unsigned char k8nops[];
54 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
60 k8nops + 1 + 2 + 3 + 4,
61 k8nops + 1 + 2 + 3 + 4 + 5,
62 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
63 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
68 asm("\t.data\nk7nops: "
69 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
71 extern unsigned char k7nops[];
72 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
78 k7nops + 1 + 2 + 3 + 4,
79 k7nops + 1 + 2 + 3 + 4 + 5,
80 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
81 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
87 extern char __vsyscall_0;
88 static inline unsigned char** find_nop_table(void)
93 #else /* CONFIG_X86_64 */
97 unsigned char **noptable;
99 { X86_FEATURE_K8, k8_nops },
100 { X86_FEATURE_K7, k7_nops },
104 static unsigned char** find_nop_table(void)
106 unsigned char **noptable = intel_nops;
109 for (i = 0; noptypes[i].cpuid >= 0; i++) {
110 if (boot_cpu_has(noptypes[i].cpuid)) {
111 noptable = noptypes[i].noptable;
118 #endif /* CONFIG_X86_64 */
120 static void nop_out(void *insns, unsigned int len)
122 unsigned char **noptable = find_nop_table();
125 unsigned int noplen = len;
126 if (noplen > ASM_NOP_MAX)
127 noplen = ASM_NOP_MAX;
128 memcpy(insns, noptable[noplen], noplen);
134 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
135 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
136 extern u8 *__smp_locks[], *__smp_locks_end[];
138 extern u8 __smp_alt_begin[], __smp_alt_end[];
140 /* Replace instructions with better alternatives for this CPU type.
141 This runs before SMP is initialized to avoid SMP problems with
142 self modifying code. This implies that assymetric systems where
143 APs have less capabilities than the boot processor are not handled.
144 Tough. Make sure you disable such features by hand. */
146 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
152 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
153 for (a = start; a < end; a++) {
154 BUG_ON(a->replacementlen > a->instrlen);
155 if (!boot_cpu_has(a->cpuid))
159 /* vsyscall code is not mapped yet. resolve it manually. */
160 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
161 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
162 DPRINTK("%s: vsyscall fixup: %p => %p\n",
163 __FUNCTION__, a->instr, instr);
166 memcpy(instr, a->replacement, a->replacementlen);
167 diff = a->instrlen - a->replacementlen;
168 nop_out(instr + a->replacementlen, diff);
174 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
178 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
179 for (a = start; a < end; a++) {
180 memcpy(a->replacement + a->replacementlen,
186 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
190 for (a = start; a < end; a++) {
192 a->replacement + a->replacementlen,
197 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
201 for (ptr = start; ptr < end; ptr++) {
206 **ptr = 0xf0; /* lock prefix */
210 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
214 for (ptr = start; ptr < end; ptr++) {
223 struct smp_alt_module {
224 /* what is this ??? */
228 /* ptrs to lock prefixes */
232 /* .text segment, needed to avoid patching init code ;) */
236 struct list_head next;
238 static LIST_HEAD(smp_alt_modules);
239 static DEFINE_SPINLOCK(smp_alt);
241 void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
245 struct smp_alt_module *smp;
249 if (boot_cpu_has(X86_FEATURE_UP))
250 alternatives_smp_unlock(locks, locks_end,
255 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
257 return; /* we'll run the (safe but slow) SMP code then ... */
262 smp->locks_end = locks_end;
264 smp->text_end = text_end;
265 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
266 __FUNCTION__, smp->locks, smp->locks_end,
267 smp->text, smp->text_end, smp->name);
269 spin_lock_irqsave(&smp_alt, flags);
270 list_add_tail(&smp->next, &smp_alt_modules);
271 if (boot_cpu_has(X86_FEATURE_UP))
272 alternatives_smp_unlock(smp->locks, smp->locks_end,
273 smp->text, smp->text_end);
274 spin_unlock_irqrestore(&smp_alt, flags);
277 void alternatives_smp_module_del(struct module *mod)
279 struct smp_alt_module *item;
285 spin_lock_irqsave(&smp_alt, flags);
286 list_for_each_entry(item, &smp_alt_modules, next) {
287 if (mod != item->mod)
289 list_del(&item->next);
290 spin_unlock_irqrestore(&smp_alt, flags);
291 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
295 spin_unlock_irqrestore(&smp_alt, flags);
298 void alternatives_smp_switch(int smp)
300 struct smp_alt_module *mod;
303 #ifdef CONFIG_LOCKDEP
305 * A not yet fixed binutils section handling bug prevents
306 * alternatives-replacement from working reliably, so turn
309 printk("lockdep: not fixing up alternatives.\n");
315 BUG_ON(!smp && (num_online_cpus() > 1));
317 spin_lock_irqsave(&smp_alt, flags);
319 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
320 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
321 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
322 alternatives_smp_apply(__smp_alt_instructions,
323 __smp_alt_instructions_end);
324 list_for_each_entry(mod, &smp_alt_modules, next)
325 alternatives_smp_lock(mod->locks, mod->locks_end,
326 mod->text, mod->text_end);
328 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
329 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
330 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
331 apply_alternatives(__smp_alt_instructions,
332 __smp_alt_instructions_end);
333 list_for_each_entry(mod, &smp_alt_modules, next)
334 alternatives_smp_unlock(mod->locks, mod->locks_end,
335 mod->text, mod->text_end);
337 spin_unlock_irqrestore(&smp_alt, flags);
342 #ifdef CONFIG_PARAVIRT
343 void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
345 struct paravirt_patch *p;
347 for (p = start; p < end; p++) {
350 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
352 #ifdef CONFIG_DEBUG_PARAVIRT
355 /* Deliberately clobber regs using "not %reg" to find bugs. */
356 for (i = 0; i < 3; i++) {
357 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
358 memcpy(p->instr + used, "\xf7\xd0", 2);
359 p->instr[used+1] |= i;
365 /* Pad the rest with nops */
366 nop_out(p->instr + used, p->len - used);
369 /* Sync to be conservative, in case we patched following instructions */
372 extern struct paravirt_patch __start_parainstructions[],
373 __stop_parainstructions[];
374 #endif /* CONFIG_PARAVIRT */
376 void __init alternative_instructions(void)
380 local_irq_save(flags);
381 apply_alternatives(__alt_instructions, __alt_instructions_end);
383 /* switch to patch-once-at-boottime-only mode and free the
384 * tables in case we know the number of CPUs will never ever
386 #ifdef CONFIG_HOTPLUG_CPU
387 if (num_possible_cpus() < 2)
395 if (1 == num_possible_cpus()) {
396 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
397 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
398 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
399 apply_alternatives(__smp_alt_instructions,
400 __smp_alt_instructions_end);
401 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
404 free_init_pages("SMP alternatives",
405 (unsigned long)__smp_alt_begin,
406 (unsigned long)__smp_alt_end);
408 alternatives_smp_save(__smp_alt_instructions,
409 __smp_alt_instructions_end);
410 alternatives_smp_module_add(NULL, "core kernel",
411 __smp_locks, __smp_locks_end,
413 alternatives_smp_switch(0);
416 apply_paravirt(__start_parainstructions, __stop_parainstructions);
417 local_irq_restore(flags);