2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/bootmem.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
33 #include <asm/fixmap.h>
34 #include <asm/apicdef.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
38 #include <asm/vmi_time.h>
39 #include <asm/kmap_types.h>
40 #include <asm/setup.h>
42 /* Convenient for calling VMI functions indirectly in the ROM */
43 typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
44 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
46 #define call_vrom_func(rom,func) \
47 (((VROMFUNC *)(rom->func))())
49 #define call_vrom_long_func(rom,func,arg) \
50 (((VROMLONGFUNC *)(rom->func)) (arg))
52 static struct vrom_header *vmi_rom;
53 static int disable_pge;
54 static int disable_pse;
55 static int disable_sep;
56 static int disable_tsc;
57 static int disable_mtrr;
58 static int disable_noidle;
59 static int disable_vmi_timer;
61 /* Cached VMI operations */
63 void (*cpuid)(void /* non-c */);
64 void (*_set_ldt)(u32 selector);
65 void (*set_tr)(u32 selector);
66 void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
67 void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
68 void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
69 void (*set_kernel_stack)(u32 selector, u32 sp0);
70 void (*allocate_page)(u32, u32, u32, u32, u32);
71 void (*release_page)(u32, u32);
72 void (*set_pte)(pte_t, pte_t *, unsigned);
73 void (*update_pte)(pte_t *, unsigned);
74 void (*set_linear_mapping)(int, void *, u32, u32);
75 void (*_flush_tlb)(int);
76 void (*set_initial_ap_state)(int, int);
78 void (*set_lazy_mode)(int mode);
81 /* Cached VMI operations */
82 struct vmi_timer_ops vmi_timer_ops;
85 * VMI patching routines.
87 #define MNEM_CALL 0xe8
91 #define IRQ_PATCH_INT_MASK 0
92 #define IRQ_PATCH_DISABLE 5
94 static inline void patch_offset(void *insnbuf,
95 unsigned long ip, unsigned long dest)
97 *(unsigned long *)(insnbuf+1) = dest-ip-5;
100 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
104 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
105 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
107 case VMI_RELOCATION_CALL_REL:
109 *(char *)insnbuf = MNEM_CALL;
110 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
113 case VMI_RELOCATION_JUMP_REL:
115 *(char *)insnbuf = MNEM_JMP;
116 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
119 case VMI_RELOCATION_NOP:
120 /* obliterate the whole thing */
123 case VMI_RELOCATION_NONE:
124 /* leave native code in place */
134 * Apply patch if appropriate, return length of new instruction
135 * sequence. The callee does nop padding for us.
137 static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
138 unsigned long ip, unsigned len)
141 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
142 return patch_internal(VMI_CALL_DisableInterrupts, len,
144 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
145 return patch_internal(VMI_CALL_EnableInterrupts, len,
147 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
148 return patch_internal(VMI_CALL_SetInterruptMask, len,
150 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
151 return patch_internal(VMI_CALL_GetInterruptMask, len,
153 case PARAVIRT_PATCH(pv_cpu_ops.iret):
154 return patch_internal(VMI_CALL_IRET, len, insns, ip);
155 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
156 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
163 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
164 static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
165 unsigned int *cx, unsigned int *dx)
170 asm volatile ("call *%6"
175 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
178 *dx &= ~X86_FEATURE_PSE;
180 *dx &= ~X86_FEATURE_PGE;
182 *dx &= ~X86_FEATURE_SEP;
184 *dx &= ~X86_FEATURE_TSC;
186 *dx &= ~X86_FEATURE_MTRR;
190 static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
192 if (gdt[nr].a != new->a || gdt[nr].b != new->b)
193 write_gdt_entry(gdt, nr, new, 0);
196 static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
198 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
199 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
200 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
201 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
204 static void vmi_set_ldt(const void *addr, unsigned entries)
206 unsigned cpu = smp_processor_id();
207 struct desc_struct desc;
209 pack_descriptor(&desc, (unsigned long)addr,
210 entries * sizeof(struct desc_struct) - 1,
212 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
213 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
216 static void vmi_set_tr(void)
218 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
221 static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
223 u32 *idt_entry = (u32 *)g;
224 vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]);
227 static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
228 const void *desc, int type)
230 u32 *gdt_entry = (u32 *)desc;
231 vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]);
234 static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
237 u32 *ldt_entry = (u32 *)desc;
238 vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
241 static void vmi_load_sp0(struct tss_struct *tss,
242 struct thread_struct *thread)
244 tss->x86_tss.sp0 = thread->sp0;
246 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
247 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
248 tss->x86_tss.ss1 = thread->sysenter_cs;
249 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
251 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
254 static void vmi_flush_tlb_user(void)
256 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
259 static void vmi_flush_tlb_kernel(void)
261 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
264 /* Stub to do nothing at all; used for delays and unimplemented calls */
265 static void vmi_nop(void)
269 #ifdef CONFIG_HIGHPTE
270 static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
272 void *va = kmap_atomic(page, type);
275 * Internally, the VMI ROM must map virtual addresses to physical
276 * addresses for processing MMU updates. By the time MMU updates
277 * are issued, this information is typically already lost.
278 * Fortunately, the VMI provides a cache of mapping slots for active
281 * We use slot zero for the linear mapping of physical memory, and
282 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
284 * args: SLOT VA COUNT PFN
286 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
287 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
293 static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
295 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
298 static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
301 * This call comes in very early, before mem_map is setup.
302 * It is called only for swapper_pg_dir, which already has
305 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
308 static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
310 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
313 static void vmi_release_pte(unsigned long pfn)
315 vmi_ops.release_page(pfn, VMI_PAGE_L1);
318 static void vmi_release_pmd(unsigned long pfn)
320 vmi_ops.release_page(pfn, VMI_PAGE_L2);
324 * We use the pgd_free hook for releasing the pgd page:
326 static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
328 unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
330 vmi_ops.release_page(pfn, VMI_PAGE_L2);
334 * Helper macros for MMU update flags. We can defer updates until a flush
335 * or page invalidation only if the update is to the current address space
336 * (otherwise, there is no flush). We must check against init_mm, since
337 * this could be a kernel update, which usually passes init_mm, although
338 * sometimes this check can be skipped if we know the particular function
339 * is only called on user mode PTEs. We could change the kernel to pass
340 * current->active_mm here, but in particular, I was unsure if changing
341 * mm/highmem.c to do this would still be correct on other architectures.
343 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
344 (!mustbeuser && (mm) == &init_mm))
345 #define vmi_flags_addr(mm, addr, level, user) \
346 ((level) | (is_current_as(mm, user) ? \
347 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
348 #define vmi_flags_addr_defer(mm, addr, level, user) \
349 ((level) | (is_current_as(mm, user) ? \
350 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
352 static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
354 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
357 static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
359 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
362 static void vmi_set_pte(pte_t *ptep, pte_t pte)
364 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
365 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
368 static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
370 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
373 static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
375 #ifdef CONFIG_X86_PAE
376 const pte_t pte = { .pte = pmdval.pmd };
378 const pte_t pte = { pmdval.pud.pgd.pgd };
380 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
383 #ifdef CONFIG_X86_PAE
385 static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
388 * XXX This is called from set_pmd_pte, but at both PT
389 * and PD layers so the VMI_PAGE_PT flag is wrong. But
390 * it is only called for large page mapping changes,
391 * the Xen backend, doesn't support large pages, and the
392 * ESX backend doesn't depend on the flag.
394 set_64bit((unsigned long long *)ptep,pte_val(pteval));
395 vmi_ops.update_pte(ptep, VMI_PAGE_PT);
398 static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
400 vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
403 static void vmi_set_pud(pud_t *pudp, pud_t pudval)
406 const pte_t pte = { .pte = pudval.pgd.pgd };
407 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
410 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
412 const pte_t pte = { .pte = 0 };
413 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
416 static void vmi_pmd_clear(pmd_t *pmd)
418 const pte_t pte = { .pte = 0 };
419 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
424 static void __devinit
425 vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
426 unsigned long start_esp)
428 struct vmi_ap_state ap;
430 /* Default everything to zero. This is fine for most GPRs. */
431 memset(&ap, 0, sizeof(struct vmi_ap_state));
433 ap.gdtr_limit = GDT_SIZE - 1;
434 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
436 ap.idtr_limit = IDT_ENTRIES * 8 - 1;
437 ap.idtr_base = (unsigned long) idt_table;
442 ap.eip = (unsigned long) start_eip;
444 ap.esp = (unsigned long) start_esp;
448 ap.fs = __KERNEL_PERCPU;
453 #ifdef CONFIG_X86_PAE
454 /* efer should match BSP efer. */
457 rdmsr(MSR_EFER, l, h);
458 ap.efer = (unsigned long long) h << 32 | l;
462 ap.cr3 = __pa(swapper_pg_dir);
463 /* Protected mode, paging, AM, WP, NE, MP. */
465 ap.cr4 = mmu_cr4_features;
466 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
470 static void vmi_enter_lazy_cpu(void)
472 paravirt_enter_lazy_cpu();
473 vmi_ops.set_lazy_mode(2);
476 static void vmi_leave_lazy_cpu(void)
478 vmi_ops.set_lazy_mode(0);
479 paravirt_leave_lazy_cpu();
482 static void vmi_enter_lazy_mmu(void)
484 paravirt_enter_lazy_mmu();
485 vmi_ops.set_lazy_mode(1);
488 static void vmi_leave_lazy_mmu(void)
490 vmi_ops.set_lazy_mode(0);
491 paravirt_leave_lazy_mmu();
494 static inline int __init check_vmi_rom(struct vrom_header *rom)
496 struct pci_header *pci;
497 struct pnp_header *pnp;
498 const char *manufacturer = "UNKNOWN";
499 const char *product = "UNKNOWN";
500 const char *license = "unspecified";
502 if (rom->rom_signature != 0xaa55)
504 if (rom->vrom_signature != VMI_SIGNATURE)
506 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
507 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
508 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
509 rom->api_version_maj,
510 rom->api_version_min);
515 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
516 * the PCI header and device type to make sure this is really a
519 if (!rom->pci_header_offs) {
520 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
524 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
525 if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
526 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
527 /* Allow it to run... anyways, but warn */
528 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
531 if (rom->pnp_header_offs) {
532 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
533 if (pnp->manufacturer_offset)
534 manufacturer = (const char *)rom+pnp->manufacturer_offset;
535 if (pnp->product_offset)
536 product = (const char *)rom+pnp->product_offset;
539 if (rom->license_offs)
540 license = (char *)rom+rom->license_offs;
542 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
543 manufacturer, product,
544 rom->api_version_maj, rom->api_version_min,
545 pci->rom_version_maj, pci->rom_version_min);
547 /* Don't allow BSD/MIT here for now because we don't want to end up
548 with any binary only shim layers */
549 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
550 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
559 * Probe for the VMI option ROM
561 static inline int __init probe_vmi_rom(void)
565 /* VMI ROM is in option ROM area, check signature */
566 for (base = 0xC0000; base < 0xE0000; base += 2048) {
567 struct vrom_header *romstart;
568 romstart = (struct vrom_header *)isa_bus_to_virt(base);
569 if (check_vmi_rom(romstart)) {
578 * VMI setup common to all processors
580 void vmi_bringup(void)
582 /* We must establish the lowmem mapping for MMU ops to work */
583 if (vmi_ops.set_linear_mapping)
584 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
588 * Return a pointer to a VMI function or NULL if unimplemented
590 static void *vmi_get_function(int vmicall)
593 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
594 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
595 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
596 if (rel->type == VMI_RELOCATION_CALL_REL)
597 return (void *)rel->eip;
603 * Helper macro for making the VMI paravirt-ops fill code readable.
604 * For unimplemented operations, fall back to default, unless nop
605 * is returned by the ROM.
607 #define para_fill(opname, vmicall) \
609 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
610 VMI_CALL_##vmicall); \
611 if (rel->type == VMI_RELOCATION_CALL_REL) \
612 opname = (void *)rel->eip; \
613 else if (rel->type == VMI_RELOCATION_NOP) \
614 opname = (void *)vmi_nop; \
615 else if (rel->type != VMI_RELOCATION_NONE) \
616 printk(KERN_WARNING "VMI: Unknown relocation " \
617 "type %d for " #vmicall"\n",\
622 * Helper macro for making the VMI paravirt-ops fill code readable.
623 * For cached operations which do not match the VMI ROM ABI and must
624 * go through a tranlation stub. Ignore NOPs, since it is not clear
625 * a NOP * VMI function corresponds to a NOP paravirt-op when the
626 * functions are not in 1-1 correspondence.
628 #define para_wrap(opname, wrapper, cache, vmicall) \
630 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
631 VMI_CALL_##vmicall); \
632 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
633 if (rel->type == VMI_RELOCATION_CALL_REL) { \
635 vmi_ops.cache = (void *)rel->eip; \
640 * Activate the VMI interface and switch into paravirtualized mode
642 static inline int __init activate_vmi(void)
646 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
648 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
649 printk(KERN_ERR "VMI ROM failed to initialize!");
652 savesegment(cs, kernel_cs);
654 pv_info.paravirt_enabled = 1;
655 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
656 pv_info.name = "vmi";
658 pv_init_ops.patch = vmi_patch;
661 * Many of these operations are ABI compatible with VMI.
662 * This means we can fill in the paravirt-ops with direct
663 * pointers into the VMI ROM. If the calling convention for
664 * these operations changes, this code needs to be updated.
667 * CPUID paravirt-op uses pointers, not the native ISA
668 * halt has no VMI equivalent; all VMI halts are "safe"
669 * no MSR support yet - just trap and emulate. VMI uses the
670 * same ABI as the native ISA, but Linux wants exceptions
671 * from bogus MSR read / write handled
672 * rdpmc is not yet used in Linux
675 /* CPUID is special, so very special it gets wrapped like a present */
676 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
678 para_fill(pv_cpu_ops.clts, CLTS);
679 para_fill(pv_cpu_ops.get_debugreg, GetDR);
680 para_fill(pv_cpu_ops.set_debugreg, SetDR);
681 para_fill(pv_cpu_ops.read_cr0, GetCR0);
682 para_fill(pv_mmu_ops.read_cr2, GetCR2);
683 para_fill(pv_mmu_ops.read_cr3, GetCR3);
684 para_fill(pv_cpu_ops.read_cr4, GetCR4);
685 para_fill(pv_cpu_ops.write_cr0, SetCR0);
686 para_fill(pv_mmu_ops.write_cr2, SetCR2);
687 para_fill(pv_mmu_ops.write_cr3, SetCR3);
688 para_fill(pv_cpu_ops.write_cr4, SetCR4);
690 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
691 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
692 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
693 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
695 para_fill(pv_cpu_ops.wbinvd, WBINVD);
696 para_fill(pv_cpu_ops.read_tsc, RDTSC);
698 /* The following we emulate with trap and emulate for now */
699 /* paravirt_ops.read_msr = vmi_rdmsr */
700 /* paravirt_ops.write_msr = vmi_wrmsr */
701 /* paravirt_ops.rdpmc = vmi_rdpmc */
703 /* TR interface doesn't pass TR value, wrap */
704 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
706 /* LDT is special, too */
707 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
709 para_fill(pv_cpu_ops.load_gdt, SetGDT);
710 para_fill(pv_cpu_ops.load_idt, SetIDT);
711 para_fill(pv_cpu_ops.store_gdt, GetGDT);
712 para_fill(pv_cpu_ops.store_idt, GetIDT);
713 para_fill(pv_cpu_ops.store_tr, GetTR);
714 pv_cpu_ops.load_tls = vmi_load_tls;
715 para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
716 write_ldt_entry, WriteLDTEntry);
717 para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
718 write_gdt_entry, WriteGDTEntry);
719 para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
720 write_idt_entry, WriteIDTEntry);
721 para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
722 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
723 para_fill(pv_cpu_ops.io_delay, IODelay);
725 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
726 set_lazy_mode, SetLazyMode);
727 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy_cpu,
728 set_lazy_mode, SetLazyMode);
730 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
731 set_lazy_mode, SetLazyMode);
732 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
733 set_lazy_mode, SetLazyMode);
735 /* user and kernel flush are just handled with different flags to FlushTLB */
736 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
737 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
738 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
741 * Until a standard flag format can be agreed on, we need to
742 * implement these as wrappers in Linux. Get the VMI ROM
743 * function pointers for the two backend calls.
745 #ifdef CONFIG_X86_PAE
746 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
747 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
749 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
750 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
753 if (vmi_ops.set_pte) {
754 pv_mmu_ops.set_pte = vmi_set_pte;
755 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
756 pv_mmu_ops.set_pmd = vmi_set_pmd;
757 #ifdef CONFIG_X86_PAE
758 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
759 pv_mmu_ops.set_pte_present = vmi_set_pte_present;
760 pv_mmu_ops.set_pud = vmi_set_pud;
761 pv_mmu_ops.pte_clear = vmi_pte_clear;
762 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
766 if (vmi_ops.update_pte) {
767 pv_mmu_ops.pte_update = vmi_update_pte;
768 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
771 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
772 if (vmi_ops.allocate_page) {
773 pv_mmu_ops.alloc_pte = vmi_allocate_pte;
774 pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
775 pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
778 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
779 if (vmi_ops.release_page) {
780 pv_mmu_ops.release_pte = vmi_release_pte;
781 pv_mmu_ops.release_pmd = vmi_release_pmd;
782 pv_mmu_ops.pgd_free = vmi_pgd_free;
785 /* Set linear is needed in all cases */
786 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
787 #ifdef CONFIG_HIGHPTE
788 if (vmi_ops.set_linear_mapping)
789 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
793 * These MUST always be patched. Don't support indirect jumps
794 * through these operations, as the VMI interface may use either
795 * a jump or a call to get to these operations, depending on
796 * the backend. They are performance critical anyway, so requiring
797 * a patch is not a big problem.
799 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
800 pv_cpu_ops.iret = (void *)0xbadbab0;
803 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
806 #ifdef CONFIG_X86_LOCAL_APIC
807 para_fill(apic->read, APICRead);
808 para_fill(apic->write, APICWrite);
812 * Check for VMI timer functionality by probing for a cycle frequency method
814 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
815 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
816 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
817 vmi_timer_ops.get_cycle_counter =
818 vmi_get_function(VMI_CALL_GetCycleCounter);
819 vmi_timer_ops.get_wallclock =
820 vmi_get_function(VMI_CALL_GetWallclockTime);
821 vmi_timer_ops.wallclock_updated =
822 vmi_get_function(VMI_CALL_WallclockUpdated);
823 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
824 vmi_timer_ops.cancel_alarm =
825 vmi_get_function(VMI_CALL_CancelAlarm);
826 pv_time_ops.time_init = vmi_time_init;
827 pv_time_ops.get_wallclock = vmi_get_wallclock;
828 pv_time_ops.set_wallclock = vmi_set_wallclock;
829 #ifdef CONFIG_X86_LOCAL_APIC
830 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
831 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
833 pv_time_ops.sched_clock = vmi_sched_clock;
834 pv_time_ops.get_tsc_khz = vmi_tsc_khz;
836 /* We have true wallclock functions; disable CMOS clock sync */
837 no_sync_cmos_clock = 1;
840 disable_vmi_timer = 1;
843 para_fill(pv_irq_ops.safe_halt, Halt);
846 * Alternative instruction rewriting doesn't happen soon enough
847 * to convert VMI_IRET to a call instead of a jump; so we have
848 * to do this before IRQs get reenabled. Fortunately, it is
851 apply_paravirt(__parainstructions, __parainstructions_end);
860 void __init vmi_init(void)
865 check_vmi_rom(vmi_rom);
867 /* In case probing for or validating the ROM failed, basil */
871 reserve_top_address(-vmi_rom->virtual_top);
873 #ifdef CONFIG_X86_IO_APIC
874 /* This is virtual hardware; timer routing is wired correctly */
879 void __init vmi_activate(void)
886 local_irq_save(flags);
888 local_irq_restore(flags & X86_EFLAGS_IF);
891 static int __init parse_vmi(char *arg)
896 if (!strcmp(arg, "disable_pge")) {
897 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
899 } else if (!strcmp(arg, "disable_pse")) {
900 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
902 } else if (!strcmp(arg, "disable_sep")) {
903 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
905 } else if (!strcmp(arg, "disable_tsc")) {
906 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
908 } else if (!strcmp(arg, "disable_mtrr")) {
909 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
911 } else if (!strcmp(arg, "disable_timer")) {
912 disable_vmi_timer = 1;
914 } else if (!strcmp(arg, "disable_noidle"))
919 early_param("vmi", parse_vmi);