2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/gcc_intrin.h>
38 #include <asm/cacheflush.h>
39 #include <asm/div64.h>
48 static unsigned long kvm_vmm_base;
49 static unsigned long kvm_vsa_base;
50 static unsigned long kvm_vm_buffer;
51 static unsigned long kvm_vm_buffer_size;
52 unsigned long kvm_vmm_gp;
54 static long vp_env_info;
56 static struct kvm_vmm_info *kvm_vmm_info;
58 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
60 struct kvm_stats_debugfs_item debugfs_entries[] = {
70 static void kvm_flush_icache(unsigned long start, unsigned long len)
74 for (l = 0; l < (len + 32); l += 32)
81 static void kvm_flush_tlb_all(void)
83 unsigned long i, j, count0, count1, stride0, stride1, addr;
86 addr = local_cpu_data->ptce_base;
87 count0 = local_cpu_data->ptce_count[0];
88 count1 = local_cpu_data->ptce_count[1];
89 stride0 = local_cpu_data->ptce_stride[0];
90 stride1 = local_cpu_data->ptce_stride[1];
92 local_irq_save(flags);
93 for (i = 0; i < count0; ++i) {
94 for (j = 0; j < count1; ++j) {
100 local_irq_restore(flags);
101 ia64_srlz_i(); /* srlz.i implies srlz.d */
104 long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
106 struct ia64_pal_retval iprv;
108 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
114 static DEFINE_SPINLOCK(vp_lock);
116 void kvm_arch_hardware_enable(void *garbage)
121 unsigned long saved_psr;
124 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
126 local_irq_save(saved_psr);
127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
130 local_irq_restore(saved_psr);
133 status = ia64_pal_vp_init_env(kvm_vsa_base ?
134 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
135 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
137 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
142 kvm_vsa_base = tmp_base;
143 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
145 spin_unlock(&vp_lock);
146 ia64_ptr_entry(0x3, slot);
149 void kvm_arch_hardware_disable(void *garbage)
155 unsigned long saved_psr;
156 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
158 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
161 local_irq_save(saved_psr);
162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
165 local_irq_restore(saved_psr);
167 status = ia64_pal_vp_exit_env(host_iva);
169 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
171 ia64_ptr_entry(0x3, slot);
174 void kvm_arch_check_processor_compat(void *rtn)
179 int kvm_dev_ioctl_check_extension(long ext)
185 case KVM_CAP_IRQCHIP:
186 case KVM_CAP_USER_MEMORY:
197 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
200 struct kvm_io_device *dev;
202 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
207 static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
209 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
210 kvm_run->hw.hardware_exit_reason = 1;
214 static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
216 struct kvm_mmio_req *p;
217 struct kvm_io_device *mmio_dev;
219 p = kvm_get_vcpu_ioreq(vcpu);
221 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
223 vcpu->mmio_needed = 1;
224 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
225 vcpu->mmio_size = kvm_run->mmio.len = p->size;
226 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
228 if (vcpu->mmio_is_write)
229 memcpy(vcpu->mmio_data, &p->data, p->size);
230 memcpy(kvm_run->mmio.data, &p->data, p->size);
231 kvm_run->exit_reason = KVM_EXIT_MMIO;
234 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
237 kvm_iodevice_write(mmio_dev, p->addr, p->size,
240 kvm_iodevice_read(mmio_dev, p->addr, p->size,
244 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
245 p->state = STATE_IORESP_READY;
250 static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
252 struct exit_ctl_data *p;
254 p = kvm_get_exit_data(vcpu);
256 if (p->exit_reason == EXIT_REASON_PAL_CALL)
257 return kvm_pal_emul(vcpu, kvm_run);
259 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
260 kvm_run->hw.hardware_exit_reason = 2;
265 static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
267 struct exit_ctl_data *p;
269 p = kvm_get_exit_data(vcpu);
271 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
275 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
276 kvm_run->hw.hardware_exit_reason = 3;
283 * offset: address offset to IPI space.
284 * value: deliver value.
286 static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
291 kvm_apic_set_irq(vcpu, vector, 0);
294 kvm_apic_set_irq(vcpu, 2, 0);
297 kvm_apic_set_irq(vcpu, 0, 0);
302 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
307 static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
313 for (i = 0; i < KVM_MAX_VCPUS; i++) {
315 lid.val = VCPU_LID(kvm->vcpus[i]);
316 if (lid.id == id && lid.eid == eid)
317 return kvm->vcpus[i];
324 static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
326 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
327 struct kvm_vcpu *target_vcpu;
328 struct kvm_pt_regs *regs;
329 union ia64_ipi_a addr = p->u.ipi_data.addr;
330 union ia64_ipi_d data = p->u.ipi_data.data;
332 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
334 return handle_vm_error(vcpu, kvm_run);
336 if (!target_vcpu->arch.launched) {
337 regs = vcpu_regs(target_vcpu);
339 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
340 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
342 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
343 if (waitqueue_active(&target_vcpu->wq))
344 wake_up_interruptible(&target_vcpu->wq);
346 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
347 if (target_vcpu != vcpu)
348 kvm_vcpu_kick(target_vcpu);
355 struct kvm_ptc_g ptc_g_data;
356 struct kvm_vcpu *vcpu;
359 static void vcpu_global_purge(void *info)
361 struct call_data *p = (struct call_data *)info;
362 struct kvm_vcpu *vcpu = p->vcpu;
364 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
367 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
368 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
369 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
372 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
373 vcpu->arch.ptc_g_count = 0;
374 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
378 static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
380 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
381 struct kvm *kvm = vcpu->kvm;
382 struct call_data call_data;
384 call_data.ptc_g_data = p->u.ptc_g_data;
386 for (i = 0; i < KVM_MAX_VCPUS; i++) {
387 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
388 KVM_MP_STATE_UNINITIALIZED ||
389 vcpu == kvm->vcpus[i])
392 if (waitqueue_active(&kvm->vcpus[i]->wq))
393 wake_up_interruptible(&kvm->vcpus[i]->wq);
395 if (kvm->vcpus[i]->cpu != -1) {
396 call_data.vcpu = kvm->vcpus[i];
397 smp_call_function_single(kvm->vcpus[i]->cpu,
398 vcpu_global_purge, &call_data, 0, 1);
400 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
406 static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
411 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
416 unsigned long vcpu_now_itc;
418 unsigned long expires;
419 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
420 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
421 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
423 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
425 if (time_after(vcpu_now_itc, vpd->itm)) {
426 vcpu->arch.timer_check = 1;
429 itc_diff = vpd->itm - vcpu_now_itc;
431 itc_diff = -itc_diff;
433 expires = div64_u64(itc_diff, cyc_per_usec);
434 kt = ktime_set(0, 1000 * expires);
435 vcpu->arch.ht_active = 1;
436 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
438 if (irqchip_in_kernel(vcpu->kvm)) {
439 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
440 kvm_vcpu_block(vcpu);
441 hrtimer_cancel(p_ht);
442 vcpu->arch.ht_active = 0;
444 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
448 printk(KERN_ERR"kvm: Unsupported userspace halt!");
453 static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
454 struct kvm_run *kvm_run)
456 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
460 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
461 struct kvm_run *kvm_run)
466 static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
467 struct kvm_run *kvm_run) = {
468 [EXIT_REASON_VM_PANIC] = handle_vm_error,
469 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
470 [EXIT_REASON_PAL_CALL] = handle_pal_call,
471 [EXIT_REASON_SAL_CALL] = handle_sal_call,
472 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
473 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
474 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
475 [EXIT_REASON_IPI] = handle_ipi,
476 [EXIT_REASON_PTC_G] = handle_global_purge,
480 static const int kvm_vti_max_exit_handlers =
481 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
483 static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
487 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
489 struct exit_ctl_data *p_exit_data;
491 p_exit_data = kvm_get_exit_data(vcpu);
492 return p_exit_data->exit_reason;
496 * The guest has exited. See if we can fix it or if we need userspace
499 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
501 u32 exit_reason = kvm_get_exit_reason(vcpu);
502 vcpu->arch.last_exit = exit_reason;
504 if (exit_reason < kvm_vti_max_exit_handlers
505 && kvm_vti_exit_handlers[exit_reason])
506 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
508 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
509 kvm_run->hw.hardware_exit_reason = exit_reason;
514 static inline void vti_set_rr6(unsigned long rr6)
516 ia64_set_rr(RR6, rr6);
520 static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
523 struct kvm *kvm = vcpu->kvm;
526 /*Insert a pair of tr to map vmm*/
527 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
528 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
531 vcpu->arch.vmm_tr_slot = r;
532 /*Insert a pairt of tr to map data of vm*/
533 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
534 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
535 pte, KVM_VM_DATA_SHIFT);
538 vcpu->arch.vm_tr_slot = r;
545 static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
548 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
549 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
553 static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
555 int cpu = smp_processor_id();
557 if (vcpu->arch.last_run_cpu != cpu ||
558 per_cpu(last_vcpu, cpu) != vcpu) {
559 per_cpu(last_vcpu, cpu) = vcpu;
560 vcpu->arch.last_run_cpu = cpu;
564 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
565 vti_set_rr6(vcpu->arch.vmm_rr);
566 return kvm_insert_vmm_mapping(vcpu);
568 static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
570 kvm_purge_vmm_mapping(vcpu);
571 vti_set_rr6(vcpu->arch.host_rr6);
574 static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
576 union context *host_ctx, *guest_ctx;
579 /*Get host and guest context with guest address space.*/
580 host_ctx = kvm_get_host_context(vcpu);
581 guest_ctx = kvm_get_guest_context(vcpu);
583 r = kvm_vcpu_pre_transition(vcpu);
586 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
587 kvm_vcpu_post_transition(vcpu);
593 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
600 kvm_prepare_guest_switch(vcpu);
603 if (signal_pending(current)) {
607 kvm_run->exit_reason = KVM_EXIT_INTR;
611 vcpu->guest_mode = 1;
614 r = vti_vcpu_run(vcpu, kvm_run);
618 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
622 vcpu->arch.launched = 1;
623 vcpu->guest_mode = 0;
627 * We must have an instruction between local_irq_enable() and
628 * kvm_guest_exit(), so the timer interrupt isn't delayed by
629 * the interrupt shadow. The stat.exits increment will do nicely.
630 * But we need to prevent reordering, hence this barrier():
638 r = kvm_handle_exit(kvm_run, vcpu);
654 static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
656 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
658 if (!vcpu->mmio_is_write)
659 memcpy(&p->data, vcpu->mmio_data, 8);
660 p->state = STATE_IORESP_READY;
663 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
670 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
671 kvm_vcpu_block(vcpu);
676 if (vcpu->sigset_active)
677 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
679 if (vcpu->mmio_needed) {
680 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
681 kvm_set_mmio_data(vcpu);
682 vcpu->mmio_read_completed = 1;
683 vcpu->mmio_needed = 0;
685 r = __vcpu_run(vcpu, kvm_run);
687 if (vcpu->sigset_active)
688 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
695 * Allocate 16M memory for every vm to hold its specific data.
696 * Its memory map is defined in kvm_host.h.
698 static struct kvm *kvm_alloc_kvm(void)
704 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
707 return ERR_PTR(-ENOMEM);
708 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
710 /* Zero all pages before use! */
711 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
713 kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
714 kvm->arch.vm_base = vm_base;
719 struct kvm_io_range {
725 static const struct kvm_io_range io_ranges[] = {
726 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
727 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
728 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
729 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
730 {PIB_START, PIB_SIZE, GPFN_PIB},
733 static void kvm_build_io_pmt(struct kvm *kvm)
737 /* Mark I/O ranges */
738 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
740 for (j = io_ranges[i].start;
741 j < io_ranges[i].start + io_ranges[i].size;
743 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
744 io_ranges[i].type, 0);
749 /*Use unused rids to virtualize guest rid.*/
750 #define GUEST_PHYSICAL_RR0 0x1739
751 #define GUEST_PHYSICAL_RR4 0x2739
752 #define VMM_INIT_RR 0x1660
754 static void kvm_init_vm(struct kvm *kvm)
760 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
761 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
762 kvm->arch.vmm_init_rr = VMM_INIT_RR;
764 vm_base = kvm->arch.vm_base;
766 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
767 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
768 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
772 *Fill P2M entries for MMIO/IO ranges
774 kvm_build_io_pmt(kvm);
778 struct kvm *kvm_arch_create_vm(void)
780 struct kvm *kvm = kvm_alloc_kvm();
783 return ERR_PTR(-ENOMEM);
790 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
791 struct kvm_irqchip *chip)
796 switch (chip->chip_id) {
797 case KVM_IRQCHIP_IOAPIC:
798 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
799 sizeof(struct kvm_ioapic_state));
808 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
813 switch (chip->chip_id) {
814 case KVM_IRQCHIP_IOAPIC:
815 memcpy(ioapic_irqchip(kvm),
817 sizeof(struct kvm_ioapic_state));
826 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
828 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
831 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
836 for (i = 0; i < 16; i++) {
837 vpd->vgr[i] = regs->vpd.vgr[i];
838 vpd->vbgr[i] = regs->vpd.vbgr[i];
840 for (i = 0; i < 128; i++)
841 vpd->vcr[i] = regs->vpd.vcr[i];
842 vpd->vhpi = regs->vpd.vhpi;
843 vpd->vnat = regs->vpd.vnat;
844 vpd->vbnat = regs->vpd.vbnat;
845 vpd->vpsr = regs->vpd.vpsr;
847 vpd->vpr = regs->vpd.vpr;
850 r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
851 sizeof(union context));
854 r = copy_from_user(vcpu + 1, regs->saved_stack +
855 sizeof(struct kvm_vcpu),
856 IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
859 vcpu->arch.exit_data =
860 ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
862 RESTORE_REGS(mp_state);
863 RESTORE_REGS(vmm_rr);
864 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
865 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
866 RESTORE_REGS(itr_regions);
867 RESTORE_REGS(dtr_regions);
868 RESTORE_REGS(tc_regions);
869 RESTORE_REGS(irq_check);
870 RESTORE_REGS(itc_check);
871 RESTORE_REGS(timer_check);
872 RESTORE_REGS(timer_pending);
873 RESTORE_REGS(last_itc);
874 for (i = 0; i < 8; i++) {
875 vcpu->arch.vrr[i] = regs->vrr[i];
876 vcpu->arch.ibr[i] = regs->ibr[i];
877 vcpu->arch.dbr[i] = regs->dbr[i];
879 for (i = 0; i < 4; i++)
880 vcpu->arch.insvc[i] = regs->insvc[i];
882 RESTORE_REGS(metaphysical_rr0);
883 RESTORE_REGS(metaphysical_rr4);
884 RESTORE_REGS(metaphysical_saved_rr0);
885 RESTORE_REGS(metaphysical_saved_rr4);
886 RESTORE_REGS(fp_psr);
887 RESTORE_REGS(saved_gp);
889 vcpu->arch.irq_new_pending = 1;
890 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
891 set_bit(KVM_REQ_RESUME, &vcpu->requests);
899 long kvm_arch_vm_ioctl(struct file *filp,
900 unsigned int ioctl, unsigned long arg)
902 struct kvm *kvm = filp->private_data;
903 void __user *argp = (void __user *)arg;
907 case KVM_SET_MEMORY_REGION: {
908 struct kvm_memory_region kvm_mem;
909 struct kvm_userspace_memory_region kvm_userspace_mem;
912 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
914 kvm_userspace_mem.slot = kvm_mem.slot;
915 kvm_userspace_mem.flags = kvm_mem.flags;
916 kvm_userspace_mem.guest_phys_addr =
917 kvm_mem.guest_phys_addr;
918 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
919 r = kvm_vm_ioctl_set_memory_region(kvm,
920 &kvm_userspace_mem, 0);
925 case KVM_CREATE_IRQCHIP:
927 r = kvm_ioapic_init(kvm);
932 struct kvm_irq_level irq_event;
935 if (copy_from_user(&irq_event, argp, sizeof irq_event))
937 if (irqchip_in_kernel(kvm)) {
938 mutex_lock(&kvm->lock);
939 kvm_ioapic_set_irq(kvm->arch.vioapic,
942 mutex_unlock(&kvm->lock);
947 case KVM_GET_IRQCHIP: {
948 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
949 struct kvm_irqchip chip;
952 if (copy_from_user(&chip, argp, sizeof chip))
955 if (!irqchip_in_kernel(kvm))
957 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
961 if (copy_to_user(argp, &chip, sizeof chip))
966 case KVM_SET_IRQCHIP: {
967 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
968 struct kvm_irqchip chip;
971 if (copy_from_user(&chip, argp, sizeof chip))
974 if (!irqchip_in_kernel(kvm))
976 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
989 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
990 struct kvm_sregs *sregs)
995 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
996 struct kvm_sregs *sregs)
1001 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1002 struct kvm_translation *tr)
1008 static int kvm_alloc_vmm_area(void)
1010 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1011 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1012 get_order(KVM_VMM_SIZE));
1016 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1017 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1019 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1020 kvm_vmm_base, kvm_vm_buffer);
1026 static void kvm_free_vmm_area(void)
1029 /*Zero this area before free to avoid bits leak!!*/
1030 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1031 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1039 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
1040 * cached on it. Leave it as blank for IA64.
1042 void decache_vcpus_on_cpu(int cpu)
1046 static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1050 static int vti_init_vpd(struct kvm_vcpu *vcpu)
1053 union cpuid3_t cpuid3;
1054 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1057 return PTR_ERR(vpd);
1060 for (i = 0; i < 5; i++)
1061 vpd->vcpuid[i] = ia64_get_cpuid(i);
1063 /* Limit the CPUID number to 5 */
1064 cpuid3.value = vpd->vcpuid[3];
1065 cpuid3.number = 4; /* 5 - 1 */
1066 vpd->vcpuid[3] = cpuid3.value;
1068 /*Set vac and vdc fields*/
1069 vpd->vac.a_from_int_cr = 1;
1070 vpd->vac.a_to_int_cr = 1;
1071 vpd->vac.a_from_psr = 1;
1072 vpd->vac.a_from_cpuid = 1;
1073 vpd->vac.a_cover = 1;
1076 vpd->vdc.d_vmsw = 1;
1078 /*Set virtual buffer*/
1079 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1084 static int vti_create_vp(struct kvm_vcpu *vcpu)
1087 struct vpd *vpd = vcpu->arch.vpd;
1088 unsigned long vmm_ivt;
1090 vmm_ivt = kvm_vmm_info->vmm_ivt;
1092 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1094 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1097 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1103 static void init_ptce_info(struct kvm_vcpu *vcpu)
1105 ia64_ptce_info_t ptce = {0};
1107 ia64_get_ptce(&ptce);
1108 vcpu->arch.ptce_base = ptce.base;
1109 vcpu->arch.ptce_count[0] = ptce.count[0];
1110 vcpu->arch.ptce_count[1] = ptce.count[1];
1111 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1112 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1115 static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1117 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1119 if (hrtimer_cancel(p_ht))
1120 hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
1123 static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1125 struct kvm_vcpu *vcpu;
1126 wait_queue_head_t *q;
1128 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1129 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1133 if (waitqueue_active(q)) {
1134 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1135 wake_up_interruptible(q);
1138 vcpu->arch.timer_check = 1;
1139 return HRTIMER_NORESTART;
1142 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1144 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1150 struct kvm *kvm = vcpu->kvm;
1151 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1153 union context *p_ctx = &vcpu->arch.guest;
1154 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1156 /*Init vcpu context for first run.*/
1157 if (IS_ERR(vmm_vcpu))
1158 return PTR_ERR(vmm_vcpu);
1160 if (vcpu->vcpu_id == 0) {
1161 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1163 /*Set entry address for first run.*/
1164 regs->cr_iip = PALE_RESET_ENTRY;
1166 /*Initilize itc offset for vcpus*/
1167 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1168 for (i = 0; i < MAX_VCPU_NUM; i++) {
1169 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
1170 v->arch.itc_offset = itc_offset;
1171 v->arch.last_itc = 0;
1174 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1177 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1178 if (!vcpu->arch.apic)
1180 vcpu->arch.apic->vcpu = vcpu;
1183 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
1184 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1185 p_ctx->psr = 0x1008522000UL;
1186 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1187 p_ctx->caller_unat = 0;
1189 p_ctx->ar[36] = 0x0; /*unat*/
1190 p_ctx->ar[19] = 0x0; /*rnat*/
1191 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1192 ((sizeof(struct kvm_vcpu)+15) & ~15);
1193 p_ctx->ar[64] = 0x0; /*pfs*/
1194 p_ctx->cr[0] = 0x7e04UL;
1195 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1196 p_ctx->cr[8] = 0x3c;
1198 /*Initilize region register*/
1199 p_ctx->rr[0] = 0x30;
1200 p_ctx->rr[1] = 0x30;
1201 p_ctx->rr[2] = 0x30;
1202 p_ctx->rr[3] = 0x30;
1203 p_ctx->rr[4] = 0x30;
1204 p_ctx->rr[5] = 0x30;
1205 p_ctx->rr[7] = 0x30;
1207 /*Initilize branch register 0*/
1208 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1210 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1211 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1212 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1214 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1215 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1217 vcpu->arch.last_run_cpu = -1;
1218 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
1219 vcpu->arch.vsa_base = kvm_vsa_base;
1220 vcpu->arch.__gp = kvm_vmm_gp;
1221 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1222 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
1223 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
1224 init_ptce_info(vcpu);
1231 static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1236 local_irq_save(psr);
1237 r = kvm_insert_vmm_mapping(vcpu);
1240 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1244 r = vti_init_vpd(vcpu);
1246 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1250 r = vti_create_vp(vcpu);
1254 kvm_purge_vmm_mapping(vcpu);
1255 local_irq_restore(psr);
1259 kvm_vcpu_uninit(vcpu);
1264 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1267 struct kvm_vcpu *vcpu;
1268 unsigned long vm_base = kvm->arch.vm_base;
1274 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1277 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
1281 vti_vcpu_load(vcpu, cpu);
1282 r = vti_vcpu_setup(vcpu, id);
1286 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1295 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1300 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1305 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1310 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1311 struct kvm_debug_guest *dbg)
1316 static void free_kvm(struct kvm *kvm)
1318 unsigned long vm_base = kvm->arch.vm_base;
1321 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1322 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1327 static void kvm_release_vm_pages(struct kvm *kvm)
1329 struct kvm_memory_slot *memslot;
1331 unsigned long base_gfn;
1333 for (i = 0; i < kvm->nmemslots; i++) {
1334 memslot = &kvm->memslots[i];
1335 base_gfn = memslot->base_gfn;
1337 for (j = 0; j < memslot->npages; j++) {
1338 if (memslot->rmap[j])
1339 put_page((struct page *)memslot->rmap[j]);
1344 void kvm_arch_destroy_vm(struct kvm *kvm)
1346 kfree(kvm->arch.vioapic);
1347 kvm_release_vm_pages(kvm);
1348 kvm_free_physmem(kvm);
1352 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1356 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1358 if (cpu != vcpu->cpu) {
1360 if (vcpu->arch.ht_active)
1361 kvm_migrate_hlt_timer(vcpu);
1365 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1367 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1371 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1374 for (i = 0; i < 16; i++) {
1375 regs->vpd.vgr[i] = vpd->vgr[i];
1376 regs->vpd.vbgr[i] = vpd->vbgr[i];
1378 for (i = 0; i < 128; i++)
1379 regs->vpd.vcr[i] = vpd->vcr[i];
1380 regs->vpd.vhpi = vpd->vhpi;
1381 regs->vpd.vnat = vpd->vnat;
1382 regs->vpd.vbnat = vpd->vbnat;
1383 regs->vpd.vpsr = vpd->vpsr;
1384 regs->vpd.vpr = vpd->vpr;
1387 r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
1388 sizeof(union context));
1391 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
1394 SAVE_REGS(mp_state);
1396 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1397 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1398 SAVE_REGS(itr_regions);
1399 SAVE_REGS(dtr_regions);
1400 SAVE_REGS(tc_regions);
1401 SAVE_REGS(irq_check);
1402 SAVE_REGS(itc_check);
1403 SAVE_REGS(timer_check);
1404 SAVE_REGS(timer_pending);
1405 SAVE_REGS(last_itc);
1406 for (i = 0; i < 8; i++) {
1407 regs->vrr[i] = vcpu->arch.vrr[i];
1408 regs->ibr[i] = vcpu->arch.ibr[i];
1409 regs->dbr[i] = vcpu->arch.dbr[i];
1411 for (i = 0; i < 4; i++)
1412 regs->insvc[i] = vcpu->arch.insvc[i];
1413 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1415 SAVE_REGS(metaphysical_rr0);
1416 SAVE_REGS(metaphysical_rr4);
1417 SAVE_REGS(metaphysical_saved_rr0);
1418 SAVE_REGS(metaphysical_saved_rr4);
1420 SAVE_REGS(saved_gp);
1427 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1430 hrtimer_cancel(&vcpu->arch.hlt_timer);
1431 kfree(vcpu->arch.apic);
1435 long kvm_arch_vcpu_ioctl(struct file *filp,
1436 unsigned int ioctl, unsigned long arg)
1441 int kvm_arch_set_memory_region(struct kvm *kvm,
1442 struct kvm_userspace_memory_region *mem,
1443 struct kvm_memory_slot old,
1448 int npages = mem->memory_size >> PAGE_SHIFT;
1449 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1450 unsigned long base_gfn = memslot->base_gfn;
1452 for (i = 0; i < npages; i++) {
1453 page = gfn_to_page(kvm, base_gfn + i);
1454 kvm_set_pmt_entry(kvm, base_gfn + i,
1455 page_to_pfn(page) << PAGE_SHIFT,
1456 _PAGE_AR_RWX|_PAGE_MA_WB);
1457 memslot->rmap[i] = (unsigned long)page;
1464 long kvm_arch_dev_ioctl(struct file *filp,
1465 unsigned int ioctl, unsigned long arg)
1470 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1472 kvm_vcpu_uninit(vcpu);
1475 static int vti_cpu_has_kvm_support(void)
1477 long avail = 1, status = 1, control = 1;
1480 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1484 if (!(avail & PAL_PROC_VM_BIT))
1487 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1489 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1492 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1494 if (!(vp_env_info & VP_OPCODE)) {
1495 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1496 "vm_env_info:0x%lx\n", vp_env_info);
1504 static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1505 struct module *module)
1507 unsigned long module_base;
1508 unsigned long vmm_size;
1510 unsigned long vmm_offset, func_offset, fdesc_offset;
1511 struct fdesc *p_fdesc;
1515 if (!kvm_vmm_base) {
1516 printk("kvm: kvm area hasn't been initilized yet!!\n");
1520 /*Calculate new position of relocated vmm module.*/
1521 module_base = (unsigned long)module->module_core;
1522 vmm_size = module->core_size;
1523 if (unlikely(vmm_size > KVM_VMM_SIZE))
1526 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1527 kvm_flush_icache(kvm_vmm_base, vmm_size);
1529 /*Recalculate kvm_vmm_info based on new VMM*/
1530 vmm_offset = vmm_info->vmm_ivt - module_base;
1531 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1532 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1533 kvm_vmm_info->vmm_ivt);
1535 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1536 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1538 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1539 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1540 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1541 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1543 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1544 KVM_VMM_BASE+func_offset);
1546 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1547 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1549 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1550 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1551 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1552 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1554 kvm_vmm_gp = p_fdesc->gp;
1556 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1557 kvm_vmm_info->vmm_entry);
1558 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1559 KVM_VMM_BASE + func_offset);
1564 int kvm_arch_init(void *opaque)
1567 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1569 if (!vti_cpu_has_kvm_support()) {
1570 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1576 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1582 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1586 if (kvm_alloc_vmm_area())
1589 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1596 kvm_free_vmm_area();
1598 kfree(kvm_vmm_info);
1603 void kvm_arch_exit(void)
1605 kvm_free_vmm_area();
1606 kfree(kvm_vmm_info);
1607 kvm_vmm_info = NULL;
1610 static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1611 struct kvm_dirty_log *log)
1613 struct kvm_memory_slot *memslot;
1616 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
1617 + KVM_MEM_DIRTY_LOG_OFS);
1620 if (log->slot >= KVM_MEMORY_SLOTS)
1623 memslot = &kvm->memslots[log->slot];
1625 if (!memslot->dirty_bitmap)
1628 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1629 base = memslot->base_gfn / BITS_PER_LONG;
1631 for (i = 0; i < n/sizeof(long); ++i) {
1632 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1633 dirty_bitmap[base + i] = 0;
1640 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1641 struct kvm_dirty_log *log)
1645 struct kvm_memory_slot *memslot;
1648 spin_lock(&kvm->arch.dirty_log_lock);
1650 r = kvm_ia64_sync_dirty_log(kvm, log);
1654 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1658 /* If nothing is dirty, don't bother messing with page tables. */
1660 kvm_flush_remote_tlbs(kvm);
1661 memslot = &kvm->memslots[log->slot];
1662 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1663 memset(memslot->dirty_bitmap, 0, n);
1667 spin_unlock(&kvm->arch.dirty_log_lock);
1671 int kvm_arch_hardware_setup(void)
1676 void kvm_arch_hardware_unsetup(void)
1680 static void vcpu_kick_intr(void *info)
1683 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1684 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1688 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1690 int ipi_pcpu = vcpu->cpu;
1692 if (waitqueue_active(&vcpu->wq))
1693 wake_up_interruptible(&vcpu->wq);
1695 if (vcpu->guest_mode)
1696 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
1699 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1702 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1704 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1705 vcpu->arch.irq_new_pending = 1;
1706 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
1707 kvm_vcpu_kick(vcpu);
1708 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
1709 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1710 if (waitqueue_active(&vcpu->wq))
1711 wake_up_interruptible(&vcpu->wq);
1718 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1720 return apic->vcpu->vcpu_id == dest;
1723 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1728 struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1729 unsigned long bitmap)
1731 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1734 for (i = 1; i < KVM_MAX_VCPUS; i++) {
1737 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1738 lvcpu = kvm->vcpus[i];
1744 static int find_highest_bits(int *dat)
1749 /* loop for all 256 bits */
1750 for (i = 7; i >= 0 ; i--) {
1754 return i * 32 + bitnum - 1;
1761 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1763 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1765 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1767 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1768 return ExtINT_VECTOR;
1770 return find_highest_bits((int *)&vpd->irr[0]);
1773 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1775 if (kvm_highest_pending_irq(vcpu) != -1)
1780 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1785 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1790 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1792 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
1795 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1796 struct kvm_mp_state *mp_state)
1801 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1802 struct kvm_mp_state *mp_state)