#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/timer.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
-
+#include <asm/nmi.h>
#include "kvm-s390.h"
#include "gaccess.h"
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
vcpu->arch.sie_block->ecb = 2;
vcpu->arch.sie_block->eca = 0xC1002001U;
- setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
- (unsigned long) vcpu);
+ hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
+ (unsigned long) vcpu);
+ vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id);
- vcpu->arch.cpu_id.version = 0xfe;
+ vcpu->arch.cpu_id.version = 0xff;
return 0;
}
spin_lock_init(&vcpu->arch.local_int.lock);
INIT_LIST_HEAD(&vcpu->arch.local_int.list);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
- spin_lock_bh(&kvm->arch.float_int.lock);
+ spin_lock(&kvm->arch.float_int.lock);
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
init_waitqueue_head(&vcpu->arch.local_int.wq);
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
- spin_unlock_bh(&kvm->arch.float_int.lock);
+ spin_unlock(&kvm->arch.float_int.lock);
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
return -EINVAL; /* not implemented yet */
}
-int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
- struct kvm_debug_guest *dbg)
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
{
return -EINVAL; /* not implemented yet */
}
return -EINVAL; /* not implemented yet */
}
-extern void s390_handle_mcck(void);
-
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
struct kvm_memory_slot old,
int user_alloc)
{
+ int i;
+
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
vmas. It is okay to mmap() and munmap() stuff in this slot after
doing this call at any time */
- if (mem->slot)
+ if (mem->slot || kvm->arch.guest_memsize)
return -EINVAL;
if (mem->guest_phys_addr)
if (mem->memory_size & (PAGE_SIZE - 1))
return -EINVAL;
+ if (!user_alloc)
+ return -EINVAL;
+
+ /* lock all vcpus */
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (!kvm->vcpus[i])
+ continue;
+ if (!mutex_trylock(&kvm->vcpus[i]->mutex))
+ goto fail_out;
+ }
+
kvm->arch.guest_origin = mem->userspace_addr;
kvm->arch.guest_memsize = mem->memory_size;
- /* FIXME: we do want to interrupt running CPUs and update their memory
- configuration now to avoid race conditions. But hey, changing the
- memory layout while virtual CPUs are running is usually bad
- programming practice. */
+ /* update sie control blocks, and unlock all vcpus */
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm->vcpus[i]->arch.sie_block->gmsor =
+ kvm->arch.guest_origin;
+ kvm->vcpus[i]->arch.sie_block->gmslm =
+ kvm->arch.guest_memsize +
+ kvm->arch.guest_origin +
+ VIRTIODESCSPACE - 1ul;
+ mutex_unlock(&kvm->vcpus[i]->mutex);
+ }
+ }
return 0;
+
+fail_out:
+ for (; i >= 0; i--)
+ mutex_unlock(&kvm->vcpus[i]->mutex);
+ return -EINVAL;
}
void kvm_arch_flush_shadow(struct kvm *kvm)