KVM: s390: forward hrtimer if guest ckc not pending yet
[cascardo/linux.git] / arch / s390 / kvm / interrupt.c
index f00f31e..7fbbcbc 100644 (file)
@@ -804,14 +804,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP; /* disabled wait */
        }
 
-       __set_cpu_idle(vcpu);
        if (!ckc_interrupts_enabled(vcpu)) {
                VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+               __set_cpu_idle(vcpu);
                goto no_timer;
        }
 
        now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
        sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+       /* underflow */
+       if (vcpu->arch.sie_block->ckc < now)
+               return 0;
+
+       __set_cpu_idle(vcpu);
        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
@@ -820,7 +826,7 @@ no_timer:
        __unset_cpu_idle(vcpu);
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-       hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
+       hrtimer_cancel(&vcpu->arch.ckc_timer);
        return 0;
 }
 
@@ -840,10 +846,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
 {
        struct kvm_vcpu *vcpu;
+       u64 now, sltime;
 
        vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
-       kvm_s390_vcpu_wakeup(vcpu);
+       now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+       sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
+       /*
+        * If the monotonic clock runs faster than the tod clock we might be
+        * woken up too early and have to go back to sleep to avoid deadlocks.
+        */
+       if (vcpu->arch.sie_block->ckc > now &&
+           hrtimer_forward_now(timer, ns_to_ktime(sltime)))
+               return HRTIMER_RESTART;
+       kvm_s390_vcpu_wakeup(vcpu);
        return HRTIMER_NORESTART;
 }
 
@@ -984,7 +1000,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
        return 0;
 }
 
-int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
 {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
        struct kvm_s390_extcall_info *extcall = &li->irq.extcall;