KVM: s390: KVM_GET/SET_ONEREG for s390
[cascardo/linux.git] / arch / s390 / kvm / kvm-s390.c
index 8489edf..894b3e4 100644 (file)
@@ -27,7 +27,8 @@
 #include <asm/lowcore.h>
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
-#include <asm/system.h>
+#include <asm/switch_to.h>
+#include <asm/sclp.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 
@@ -74,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
        { "diagnose_10", VCPU_STAT(diagnose_10) },
        { "diagnose_44", VCPU_STAT(diagnose_44) },
+       { "diagnose_9c", VCPU_STAT(diagnose_9c) },
        { NULL }
 };
 
@@ -129,8 +131,20 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_S390_PSW:
        case KVM_CAP_S390_GMAP:
        case KVM_CAP_SYNC_MMU:
+#ifdef CONFIG_KVM_S390_UCONTROL
+       case KVM_CAP_S390_UCONTROL:
+#endif
+       case KVM_CAP_SYNC_REGS:
+       case KVM_CAP_ONE_REG:
                r = 1;
                break;
+       case KVM_CAP_NR_VCPUS:
+       case KVM_CAP_MAX_VCPUS:
+               r = KVM_MAX_VCPUS;
+               break;
+       case KVM_CAP_S390_COW:
+               r = sclp_get_fac85() & 0x2;
+               break;
        default:
                r = 0;
        }
@@ -228,10 +242,13 @@ out_err:
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 3, "%s", "free cpu");
-       clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
-       if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
-               (__u64) vcpu->arch.sie_block)
-               vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
+       if (!kvm_is_ucontrol(vcpu->kvm)) {
+               clear_bit(63 - vcpu->vcpu_id,
+                         (unsigned long *) &vcpu->kvm->arch.sca->mcn);
+               if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
+                   (__u64) vcpu->arch.sie_block)
+                       vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
+       }
        smp_mb();
 
        if (kvm_is_ucontrol(vcpu->kvm))
@@ -282,6 +299,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
 
        vcpu->arch.gmap = vcpu->kvm->arch.gmap;
+       vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
+                                   KVM_SYNC_GPRS |
+                                   KVM_SYNC_ACRS |
+                                   KVM_SYNC_CRS;
        return 0;
 }
 
@@ -296,7 +317,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        save_access_regs(vcpu->arch.host_acrs);
        vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
        restore_fp_regs(&vcpu->arch.guest_fpregs);
-       restore_access_regs(vcpu->arch.guest_acrs);
+       restore_access_regs(vcpu->run->s.regs.acrs);
        gmap_enable(vcpu->arch.gmap);
        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 }
@@ -306,7 +327,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
        save_fp_regs(&vcpu->arch.guest_fpregs);
-       save_access_regs(vcpu->arch.guest_acrs);
+       save_access_regs(vcpu->run->s.regs.acrs);
        restore_fp_regs(&vcpu->arch.host_fpregs);
        restore_access_regs(vcpu->arch.host_acrs);
 }
@@ -316,8 +337,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        /* this equals initial cpu reset in pop, but we don't switch to ESA */
        vcpu->arch.sie_block->gpsw.mask = 0UL;
        vcpu->arch.sie_block->gpsw.addr = 0UL;
-       vcpu->arch.sie_block->prefix    = 0UL;
-       vcpu->arch.sie_block->ihcpu     = 0xffff;
+       kvm_s390_set_prefix(vcpu, 0);
        vcpu->arch.sie_block->cputm     = 0UL;
        vcpu->arch.sie_block->ckc       = 0UL;
        vcpu->arch.sie_block->todpr     = 0;
@@ -368,12 +388,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                goto out_free_cpu;
 
        vcpu->arch.sie_block->icpua = id;
-       BUG_ON(!kvm->arch.sca);
-       if (!kvm->arch.sca->cpu[id].sda)
-               kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
-       vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
-       vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
-       set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
+       if (!kvm_is_ucontrol(kvm)) {
+               if (!kvm->arch.sca) {
+                       WARN_ON_ONCE(1);
+                       goto out_free_cpu;
+               }
+               if (!kvm->arch.sca->cpu[id].sda)
+                       kvm->arch.sca->cpu[id].sda =
+                               (__u64) vcpu->arch.sie_block;
+               vcpu->arch.sie_block->scaoh =
+                       (__u32)(((__u64)kvm->arch.sca) >> 32);
+               vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
+               set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
+       }
 
        spin_lock_init(&vcpu->arch.local_int.lock);
        INIT_LIST_HEAD(&vcpu->arch.local_int.list);
@@ -406,6 +433,39 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       /* kvm common code refers to this, but never calls it */
+       BUG();
+       return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
+                                          struct kvm_one_reg *reg)
+{
+       int r = -EINVAL;
+
+       switch (reg->id) {
+       default:
+               break;
+       }
+
+       return r;
+}
+
+static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
+                                          struct kvm_one_reg *reg)
+{
+       int r = -EINVAL;
+
+       switch (reg->id) {
+       default:
+               break;
+       }
+
+       return r;
+}
+
 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 {
        kvm_s390_vcpu_initial_reset(vcpu);
@@ -414,29 +474,29 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
-       memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
+       memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
-       memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
+       memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
-       memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
+       memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
        memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
-       restore_access_regs(vcpu->arch.guest_acrs);
+       restore_access_regs(vcpu->run->s.regs.acrs);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
-       memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
+       memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
        memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
        return 0;
 }
@@ -444,7 +504,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+       vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
        restore_fp_regs(&vcpu->arch.guest_fpregs);
        return 0;
 }
@@ -497,7 +557,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 {
        int rc;
 
-       memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+       memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
 
        if (need_resched())
                schedule();
@@ -514,7 +574,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        local_irq_enable();
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
-       rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
+       rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
        if (rc) {
                if (kvm_is_ucontrol(vcpu->kvm)) {
                        rc = SIE_INTERCEPT_UCONTROL;
@@ -530,7 +590,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        kvm_guest_exit();
        local_irq_enable();
 
-       memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+       memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
        return rc;
 }
 
@@ -560,6 +620,15 @@ rerun_vcpu:
 
        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
+       if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
+               kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
+               kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
+       }
+       if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
+               kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
+               memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
+               kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
+       }
 
        might_fault();
 
@@ -608,6 +677,8 @@ rerun_vcpu:
 
        kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
        kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
+       kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
+       memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -654,7 +725,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
                return -EFAULT;
 
        if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
-                       vcpu->arch.guest_gprs, 128, prefix))
+                       vcpu->run->s.regs.gprs, 128, prefix))
                return -EFAULT;
 
        if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
@@ -683,7 +754,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
                return -EFAULT;
 
        if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
-                       &vcpu->arch.guest_acrs, 64, prefix))
+                       &vcpu->run->s.regs.acrs, 64, prefix))
                return -EFAULT;
 
        if (__guestcopy(vcpu,
@@ -725,6 +796,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_S390_INITIAL_RESET:
                r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
                break;
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               r = -EFAULT;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       break;
+               if (ioctl == KVM_SET_ONE_REG)
+                       r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
+               else
+                       r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
+               break;
+       }
 #ifdef CONFIG_KVM_S390_UCONTROL
        case KVM_S390_UCAS_MAP: {
                struct kvm_s390_ucas_mapping ucasmap;
@@ -768,7 +851,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                break;
        }
        default:
-               r = -EINVAL;
+               r = -ENOTTY;
        }
        return r;
 }
@@ -786,6 +869,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
 /* Section: memory related */
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,