[PATCH] vmi: sched clock paravirt op fix
[linux-2.6] / arch / i386 / kernel / paravirt.c
1 /*  Paravirtualization interfaces
2     Copyright (C) 2006 Rusty Russell IBM Corporation
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18 #include <linux/errno.h>
19 #include <linux/module.h>
20 #include <linux/efi.h>
21 #include <linux/bcd.h>
22 #include <linux/start_kernel.h>
23
24 #include <asm/bug.h>
25 #include <asm/paravirt.h>
26 #include <asm/desc.h>
27 #include <asm/setup.h>
28 #include <asm/arch_hooks.h>
29 #include <asm/time.h>
30 #include <asm/irq.h>
31 #include <asm/delay.h>
32 #include <asm/fixmap.h>
33 #include <asm/apic.h>
34 #include <asm/tlbflush.h>
35 #include <asm/timer.h>
36
37 /* nop stub */
38 static void native_nop(void)
39 {
40 }
41
42 static void __init default_banner(void)
43 {
44         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
45                paravirt_ops.name);
46 }
47
48 char *memory_setup(void)
49 {
50         return paravirt_ops.memory_setup();
51 }
52
53 /* Simple instruction patching code. */
54 #define DEF_NATIVE(name, code)                                  \
55         extern const char start_##name[], end_##name[];         \
56         asm("start_" #name ": " code "; end_" #name ":")
57 DEF_NATIVE(cli, "cli");
58 DEF_NATIVE(sti, "sti");
59 DEF_NATIVE(popf, "push %eax; popf");
60 DEF_NATIVE(pushf, "pushf; pop %eax");
61 DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
62 DEF_NATIVE(iret, "iret");
63 DEF_NATIVE(sti_sysexit, "sti; sysexit");
64
65 static const struct native_insns
66 {
67         const char *start, *end;
68 } native_insns[] = {
69         [PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
70         [PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
71         [PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
72         [PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
73         [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
74         [PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
75         [PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
76 };
77
78 static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
79 {
80         unsigned int insn_len;
81
82         /* Don't touch it if we don't have a replacement */
83         if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
84                 return len;
85
86         insn_len = native_insns[type].end - native_insns[type].start;
87
88         /* Similarly if we can't fit replacement. */
89         if (len < insn_len)
90                 return len;
91
92         memcpy(insns, native_insns[type].start, insn_len);
93         return insn_len;
94 }
95
96 static unsigned long native_get_debugreg(int regno)
97 {
98         unsigned long val = 0;  /* Damn you, gcc! */
99
100         switch (regno) {
101         case 0:
102                 asm("movl %%db0, %0" :"=r" (val)); break;
103         case 1:
104                 asm("movl %%db1, %0" :"=r" (val)); break;
105         case 2:
106                 asm("movl %%db2, %0" :"=r" (val)); break;
107         case 3:
108                 asm("movl %%db3, %0" :"=r" (val)); break;
109         case 6:
110                 asm("movl %%db6, %0" :"=r" (val)); break;
111         case 7:
112                 asm("movl %%db7, %0" :"=r" (val)); break;
113         default:
114                 BUG();
115         }
116         return val;
117 }
118
119 static void native_set_debugreg(int regno, unsigned long value)
120 {
121         switch (regno) {
122         case 0:
123                 asm("movl %0,%%db0"     : /* no output */ :"r" (value));
124                 break;
125         case 1:
126                 asm("movl %0,%%db1"     : /* no output */ :"r" (value));
127                 break;
128         case 2:
129                 asm("movl %0,%%db2"     : /* no output */ :"r" (value));
130                 break;
131         case 3:
132                 asm("movl %0,%%db3"     : /* no output */ :"r" (value));
133                 break;
134         case 6:
135                 asm("movl %0,%%db6"     : /* no output */ :"r" (value));
136                 break;
137         case 7:
138                 asm("movl %0,%%db7"     : /* no output */ :"r" (value));
139                 break;
140         default:
141                 BUG();
142         }
143 }
144
145 void init_IRQ(void)
146 {
147         paravirt_ops.init_IRQ();
148 }
149
150 static void native_clts(void)
151 {
152         asm volatile ("clts");
153 }
154
155 static unsigned long native_read_cr0(void)
156 {
157         unsigned long val;
158         asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
159         return val;
160 }
161
162 static void native_write_cr0(unsigned long val)
163 {
164         asm volatile("movl %0,%%cr0": :"r" (val));
165 }
166
167 static unsigned long native_read_cr2(void)
168 {
169         unsigned long val;
170         asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
171         return val;
172 }
173
174 static void native_write_cr2(unsigned long val)
175 {
176         asm volatile("movl %0,%%cr2": :"r" (val));
177 }
178
179 static unsigned long native_read_cr3(void)
180 {
181         unsigned long val;
182         asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
183         return val;
184 }
185
186 static void native_write_cr3(unsigned long val)
187 {
188         asm volatile("movl %0,%%cr3": :"r" (val));
189 }
190
191 static unsigned long native_read_cr4(void)
192 {
193         unsigned long val;
194         asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
195         return val;
196 }
197
198 static unsigned long native_read_cr4_safe(void)
199 {
200         unsigned long val;
201         /* This could fault if %cr4 does not exist */
202         asm("1: movl %%cr4, %0          \n"
203                 "2:                             \n"
204                 ".section __ex_table,\"a\"      \n"
205                 ".long 1b,2b                    \n"
206                 ".previous                      \n"
207                 : "=r" (val): "0" (0));
208         return val;
209 }
210
211 static void native_write_cr4(unsigned long val)
212 {
213         asm volatile("movl %0,%%cr4": :"r" (val));
214 }
215
216 static unsigned long native_save_fl(void)
217 {
218         unsigned long f;
219         asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
220         return f;
221 }
222
223 static void native_restore_fl(unsigned long f)
224 {
225         asm volatile("pushl %0 ; popfl": /* no output */
226                              :"g" (f)
227                              :"memory", "cc");
228 }
229
230 static void native_irq_disable(void)
231 {
232         asm volatile("cli": : :"memory");
233 }
234
235 static void native_irq_enable(void)
236 {
237         asm volatile("sti": : :"memory");
238 }
239
240 static void native_safe_halt(void)
241 {
242         asm volatile("sti; hlt": : :"memory");
243 }
244
245 static void native_halt(void)
246 {
247         asm volatile("hlt": : :"memory");
248 }
249
250 static void native_wbinvd(void)
251 {
252         asm volatile("wbinvd": : :"memory");
253 }
254
255 static unsigned long long native_read_msr(unsigned int msr, int *err)
256 {
257         unsigned long long val;
258
259         asm volatile("2: rdmsr ; xorl %0,%0\n"
260                      "1:\n\t"
261                      ".section .fixup,\"ax\"\n\t"
262                      "3:  movl %3,%0 ; jmp 1b\n\t"
263                      ".previous\n\t"
264                      ".section __ex_table,\"a\"\n"
265                      "   .align 4\n\t"
266                      "   .long  2b,3b\n\t"
267                      ".previous"
268                      : "=r" (*err), "=A" (val)
269                      : "c" (msr), "i" (-EFAULT));
270
271         return val;
272 }
273
274 static int native_write_msr(unsigned int msr, unsigned long long val)
275 {
276         int err;
277         asm volatile("2: wrmsr ; xorl %0,%0\n"
278                      "1:\n\t"
279                      ".section .fixup,\"ax\"\n\t"
280                      "3:  movl %4,%0 ; jmp 1b\n\t"
281                      ".previous\n\t"
282                      ".section __ex_table,\"a\"\n"
283                      "   .align 4\n\t"
284                      "   .long  2b,3b\n\t"
285                      ".previous"
286                      : "=a" (err)
287                      : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
288                        "i" (-EFAULT));
289         return err;
290 }
291
292 static unsigned long long native_read_tsc(void)
293 {
294         unsigned long long val;
295         asm volatile("rdtsc" : "=A" (val));
296         return val;
297 }
298
299 static unsigned long long native_read_pmc(void)
300 {
301         unsigned long long val;
302         asm volatile("rdpmc" : "=A" (val));
303         return val;
304 }
305
306 static void native_load_tr_desc(void)
307 {
308         asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
309 }
310
311 static void native_load_gdt(const struct Xgt_desc_struct *dtr)
312 {
313         asm volatile("lgdt %0"::"m" (*dtr));
314 }
315
316 static void native_load_idt(const struct Xgt_desc_struct *dtr)
317 {
318         asm volatile("lidt %0"::"m" (*dtr));
319 }
320
321 static void native_store_gdt(struct Xgt_desc_struct *dtr)
322 {
323         asm ("sgdt %0":"=m" (*dtr));
324 }
325
326 static void native_store_idt(struct Xgt_desc_struct *dtr)
327 {
328         asm ("sidt %0":"=m" (*dtr));
329 }
330
331 static unsigned long native_store_tr(void)
332 {
333         unsigned long tr;
334         asm ("str %0":"=r" (tr));
335         return tr;
336 }
337
338 static void native_load_tls(struct thread_struct *t, unsigned int cpu)
339 {
340 #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
341         C(0); C(1); C(2);
342 #undef C
343 }
344
345 static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
346 {
347         u32 *lp = (u32 *)((char *)dt + entry*8);
348         lp[0] = entry_low;
349         lp[1] = entry_high;
350 }
351
352 static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
353 {
354         native_write_dt_entry(dt, entrynum, low, high);
355 }
356
357 static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
358 {
359         native_write_dt_entry(dt, entrynum, low, high);
360 }
361
362 static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
363 {
364         native_write_dt_entry(dt, entrynum, low, high);
365 }
366
367 static void native_load_esp0(struct tss_struct *tss,
368                                       struct thread_struct *thread)
369 {
370         tss->esp0 = thread->esp0;
371
372         /* This can only happen when SEP is enabled, no need to test "SEP"arately */
373         if (unlikely(tss->ss1 != thread->sysenter_cs)) {
374                 tss->ss1 = thread->sysenter_cs;
375                 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
376         }
377 }
378
379 static void native_io_delay(void)
380 {
381         asm volatile("outb %al,$0x80");
382 }
383
384 static void native_flush_tlb(void)
385 {
386         __native_flush_tlb();
387 }
388
389 /*
390  * Global pages have to be flushed a bit differently. Not a real
391  * performance problem because this does not happen often.
392  */
393 static void native_flush_tlb_global(void)
394 {
395         __native_flush_tlb_global();
396 }
397
398 static void native_flush_tlb_single(u32 addr)
399 {
400         __native_flush_tlb_single(addr);
401 }
402
403 #ifndef CONFIG_X86_PAE
404 static void native_set_pte(pte_t *ptep, pte_t pteval)
405 {
406         *ptep = pteval;
407 }
408
409 static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
410 {
411         *ptep = pteval;
412 }
413
414 static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
415 {
416         *pmdp = pmdval;
417 }
418
419 #else /* CONFIG_X86_PAE */
420
421 static void native_set_pte(pte_t *ptep, pte_t pte)
422 {
423         ptep->pte_high = pte.pte_high;
424         smp_wmb();
425         ptep->pte_low = pte.pte_low;
426 }
427
428 static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
429 {
430         ptep->pte_high = pte.pte_high;
431         smp_wmb();
432         ptep->pte_low = pte.pte_low;
433 }
434
435 static void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
436 {
437         ptep->pte_low = 0;
438         smp_wmb();
439         ptep->pte_high = pte.pte_high;
440         smp_wmb();
441         ptep->pte_low = pte.pte_low;
442 }
443
444 static void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
445 {
446         set_64bit((unsigned long long *)ptep,pte_val(pteval));
447 }
448
449 static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
450 {
451         set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
452 }
453
454 static void native_set_pud(pud_t *pudp, pud_t pudval)
455 {
456         *pudp = pudval;
457 }
458
459 static void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
460 {
461         ptep->pte_low = 0;
462         smp_wmb();
463         ptep->pte_high = 0;
464 }
465
466 static void native_pmd_clear(pmd_t *pmd)
467 {
468         u32 *tmp = (u32 *)pmd;
469         *tmp = 0;
470         smp_wmb();
471         *(tmp + 1) = 0;
472 }
473 #endif /* CONFIG_X86_PAE */
474
475 /* These are in entry.S */
476 extern void native_iret(void);
477 extern void native_irq_enable_sysexit(void);
478
479 static int __init print_banner(void)
480 {
481         paravirt_ops.banner();
482         return 0;
483 }
484 core_initcall(print_banner);
485
486 struct paravirt_ops paravirt_ops = {
487         .name = "bare hardware",
488         .paravirt_enabled = 0,
489         .kernel_rpl = 0,
490
491         .patch = native_patch,
492         .banner = default_banner,
493         .arch_setup = native_nop,
494         .memory_setup = machine_specific_memory_setup,
495         .get_wallclock = native_get_wallclock,
496         .set_wallclock = native_set_wallclock,
497         .time_init = time_init_hook,
498         .init_IRQ = native_init_IRQ,
499
500         .cpuid = native_cpuid,
501         .get_debugreg = native_get_debugreg,
502         .set_debugreg = native_set_debugreg,
503         .clts = native_clts,
504         .read_cr0 = native_read_cr0,
505         .write_cr0 = native_write_cr0,
506         .read_cr2 = native_read_cr2,
507         .write_cr2 = native_write_cr2,
508         .read_cr3 = native_read_cr3,
509         .write_cr3 = native_write_cr3,
510         .read_cr4 = native_read_cr4,
511         .read_cr4_safe = native_read_cr4_safe,
512         .write_cr4 = native_write_cr4,
513         .save_fl = native_save_fl,
514         .restore_fl = native_restore_fl,
515         .irq_disable = native_irq_disable,
516         .irq_enable = native_irq_enable,
517         .safe_halt = native_safe_halt,
518         .halt = native_halt,
519         .wbinvd = native_wbinvd,
520         .read_msr = native_read_msr,
521         .write_msr = native_write_msr,
522         .read_tsc = native_read_tsc,
523         .read_pmc = native_read_pmc,
524         .get_scheduled_cycles = native_read_tsc,
525         .load_tr_desc = native_load_tr_desc,
526         .set_ldt = native_set_ldt,
527         .load_gdt = native_load_gdt,
528         .load_idt = native_load_idt,
529         .store_gdt = native_store_gdt,
530         .store_idt = native_store_idt,
531         .store_tr = native_store_tr,
532         .load_tls = native_load_tls,
533         .write_ldt_entry = native_write_ldt_entry,
534         .write_gdt_entry = native_write_gdt_entry,
535         .write_idt_entry = native_write_idt_entry,
536         .load_esp0 = native_load_esp0,
537
538         .set_iopl_mask = native_set_iopl_mask,
539         .io_delay = native_io_delay,
540         .const_udelay = __const_udelay,
541
542 #ifdef CONFIG_X86_LOCAL_APIC
543         .apic_write = native_apic_write,
544         .apic_write_atomic = native_apic_write_atomic,
545         .apic_read = native_apic_read,
546         .setup_boot_clock = setup_boot_APIC_clock,
547         .setup_secondary_clock = setup_secondary_APIC_clock,
548 #endif
549         .set_lazy_mode = (void *)native_nop,
550
551         .flush_tlb_user = native_flush_tlb,
552         .flush_tlb_kernel = native_flush_tlb_global,
553         .flush_tlb_single = native_flush_tlb_single,
554
555         .alloc_pt = (void *)native_nop,
556         .alloc_pd = (void *)native_nop,
557         .alloc_pd_clone = (void *)native_nop,
558         .release_pt = (void *)native_nop,
559         .release_pd = (void *)native_nop,
560
561         .set_pte = native_set_pte,
562         .set_pte_at = native_set_pte_at,
563         .set_pmd = native_set_pmd,
564         .pte_update = (void *)native_nop,
565         .pte_update_defer = (void *)native_nop,
566 #ifdef CONFIG_X86_PAE
567         .set_pte_atomic = native_set_pte_atomic,
568         .set_pte_present = native_set_pte_present,
569         .set_pud = native_set_pud,
570         .pte_clear = native_pte_clear,
571         .pmd_clear = native_pmd_clear,
572 #endif
573
574         .irq_enable_sysexit = native_irq_enable_sysexit,
575         .iret = native_iret,
576
577         .startup_ipi_hook = (void *)native_nop,
578 };
579
580 /*
581  * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops
582  * semantics are subject to change. Hence we only do this
583  * internal-only export of this, until it gets sorted out and
584  * all lowlevel CPU ops used by modules are separately exported.
585  */
586 EXPORT_SYMBOL_GPL(paravirt_ops);