KVM: SVM: Make lazy FPU switching work with nested svm
[cascardo/linux.git] / arch / x86 / kvm / svm.c
index 52f78dd..a8ec53f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/ftrace_event.h>
+#include <linux/slab.h>
 
 #include <asm/desc.h>
 
@@ -128,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm);
+static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
@@ -318,7 +320,7 @@ static int svm_hardware_enable(void *garbage)
 
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct descriptor_table gdt_descr;
+       struct desc_ptr gdt_descr;
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
 
@@ -344,7 +346,7 @@ static int svm_hardware_enable(void *garbage)
        sd->next_asid = sd->max_asid + 1;
 
        kvm_get_gdt(&gdt_descr);
-       gdt = (struct desc_struct *)gdt_descr.base;
+       gdt = (struct desc_struct *)gdt_descr.address;
        sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
        wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -705,29 +707,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_svm;
 
+       err = -ENOMEM;
        page = alloc_page(GFP_KERNEL);
-       if (!page) {
-               err = -ENOMEM;
+       if (!page)
                goto uninit;
-       }
 
-       err = -ENOMEM;
        msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!msrpm_pages)
-               goto uninit;
+               goto free_page1;
 
        nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!nested_msrpm_pages)
-               goto uninit;
-
-       svm->msrpm = page_address(msrpm_pages);
-       svm_vcpu_init_msrpm(svm->msrpm);
+               goto free_page2;
 
        hsave_page = alloc_page(GFP_KERNEL);
        if (!hsave_page)
-               goto uninit;
+               goto free_page3;
+
        svm->nested.hsave = page_address(hsave_page);
 
+       svm->msrpm = page_address(msrpm_pages);
+       svm_vcpu_init_msrpm(svm->msrpm);
+
        svm->nested.msrpm = page_address(nested_msrpm_pages);
 
        svm->vmcb = page_address(page);
@@ -743,6 +744,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        return &svm->vcpu;
 
+free_page3:
+       __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+       __free_page(page);
 uninit:
        kvm_vcpu_uninit(&svm->vcpu);
 free_svm:
@@ -930,36 +937,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
        return save->cpl;
 }
 
-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.idtr.limit;
-       dt->base = svm->vmcb->save.idtr.base;
+       dt->size = svm->vmcb->save.idtr.limit;
+       dt->address = svm->vmcb->save.idtr.base;
 }
 
-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.idtr.limit = dt->limit;
-       svm->vmcb->save.idtr.base = dt->base ;
+       svm->vmcb->save.idtr.limit = dt->size;
+       svm->vmcb->save.idtr.base = dt->address ;
 }
 
-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.gdtr.limit;
-       dt->base = svm->vmcb->save.gdtr.base;
+       dt->size = svm->vmcb->save.gdtr.limit;
+       dt->address = svm->vmcb->save.gdtr.base;
 }
 
-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.gdtr.limit = dt->limit;
-       svm->vmcb->save.gdtr.base = dt->base ;
+       svm->vmcb->save.gdtr.limit = dt->size;
+       svm->vmcb->save.gdtr.base = dt->address ;
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -972,6 +979,7 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 
 static void update_cr0_intercept(struct vcpu_svm *svm)
 {
+       struct vmcb *vmcb = svm->vmcb;
        ulong gcr0 = svm->vcpu.arch.cr0;
        u64 *hcr0 = &svm->vmcb->save.cr0;
 
@@ -983,11 +991,25 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
 
 
        if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
-               svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
-               svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+               vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+               vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+               if (is_nested(svm)) {
+                       struct vmcb *hsave = svm->nested.hsave;
+
+                       hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
+                       hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+                       vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
+                       vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
+               }
        } else {
                svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
                svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+               if (is_nested(svm)) {
+                       struct vmcb *hsave = svm->nested.hsave;
+
+                       hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
+                       hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+               }
        }
 }
 
@@ -1262,7 +1284,22 @@ static int ud_interception(struct vcpu_svm *svm)
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+       u32 excp;
+
+       if (is_nested(svm)) {
+               u32 h_excp, n_excp;
+
+               h_excp  = svm->nested.hsave->control.intercept_exceptions;
+               n_excp  = svm->nested.intercept_exceptions;
+               h_excp &= ~(1 << NM_VECTOR);
+               excp    = h_excp | n_excp;
+       } else {
+               excp  = svm->vmcb->control.intercept_exceptions;
+               excp &= ~(1 << NM_VECTOR);
+       }
+
+       svm->vmcb->control.intercept_exceptions = excp;
+
        svm->vcpu.fpu_active = 1;
        update_cr0_intercept(svm);
 }
@@ -1378,6 +1415,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code)
 {
+       int vmexit;
+
        if (!is_nested(svm))
                return 0;
 
@@ -1386,7 +1425,11 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
        svm->vmcb->control.exit_info_1 = error_code;
        svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
 
-       return nested_svm_exit_handled(svm);
+       vmexit = nested_svm_intercept(svm);
+       if (vmexit == NESTED_EXIT_DONE)
+               svm->nested.exit_required = true;
+
+       return vmexit;
 }
 
 static inline int nested_svm_intr(struct vcpu_svm *svm)
@@ -1417,15 +1460,19 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
        return 0;
 }
 
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
 {
        struct page *page;
 
+       might_sleep();
+
        page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
        if (is_error_page(page))
                goto error;
 
-       return kmap_atomic(page, idx);
+       *_page = page;
+
+       return kmap(page);
 
 error:
        kvm_release_page_clean(page);
@@ -1434,16 +1481,9 @@ error:
        return NULL;
 }
 
-static void nested_svm_unmap(void *addr, enum km_type idx)
+static void nested_svm_unmap(struct page *page)
 {
-       struct page *page;
-
-       if (!addr)
-               return;
-
-       page = kmap_atomic_to_page(addr);
-
-       kunmap_atomic(addr, idx);
+       kunmap(page);
        kvm_release_page_dirty(page);
 }
 
@@ -1453,16 +1493,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
        u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
        bool ret = false;
        u32 t0, t1;
-       u8 *msrpm;
+       u8 val;
 
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
                return false;
 
-       msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
-
-       if (!msrpm)
-               goto out;
-
        switch (msr) {
        case 0 ... 0x1fff:
                t0 = (msr * 2) % 8;
@@ -1483,11 +1518,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
                goto out;
        }
 
-       ret = msrpm[t1] & ((1 << param) << t0);
+       if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
+               ret = val & ((1 << param) << t0);
 
 out:
-       nested_svm_unmap(msrpm, KM_USER0);
-
        return ret;
 }
 
@@ -1509,6 +1543,9 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
                if (!npt_enabled)
                        return NESTED_EXIT_HOST;
                break;
+       case SVM_EXIT_EXCP_BASE + NM_VECTOR:
+               nm_interception(svm);
+               break;
        default:
                break;
        }
@@ -1519,7 +1556,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
 /*
  * If this function returns true, this #vmexit was already handled
  */
-static int nested_svm_exit_handled(struct vcpu_svm *svm)
+static int nested_svm_intercept(struct vcpu_svm *svm)
 {
        u32 exit_code = svm->vmcb->control.exit_code;
        int vmexit = NESTED_EXIT_HOST;
@@ -1565,9 +1602,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
        }
        }
 
-       if (vmexit == NESTED_EXIT_DONE) {
+       return vmexit;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+       int vmexit;
+
+       vmexit = nested_svm_intercept(svm);
+
+       if (vmexit == NESTED_EXIT_DONE)
                nested_svm_vmexit(svm);
-       }
 
        return vmexit;
 }
@@ -1609,6 +1654,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
 
        trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
                                       vmcb->control.exit_info_1,
@@ -1616,10 +1662,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
                                       vmcb->control.exit_int_info,
                                       vmcb->control.exit_int_info_err);
 
-       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
        if (!nested_vmcb)
                return 1;
 
+       /* Exit nested SVM mode */
+       svm->nested.vmcb = 0;
+
        /* Give the current vmcb to the guest */
        disable_gif(svm);
 
@@ -1629,9 +1678,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_vmcb->save.ds     = vmcb->save.ds;
        nested_vmcb->save.gdtr   = vmcb->save.gdtr;
        nested_vmcb->save.idtr   = vmcb->save.idtr;
+       nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
        if (npt_enabled)
                nested_vmcb->save.cr3    = vmcb->save.cr3;
+       else
+               nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
        nested_vmcb->save.cr2    = vmcb->save.cr2;
+       nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
        nested_vmcb->save.rflags = vmcb->save.rflags;
        nested_vmcb->save.rip    = vmcb->save.rip;
        nested_vmcb->save.rsp    = vmcb->save.rsp;
@@ -1703,10 +1756,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        svm->vmcb->save.cpl = 0;
        svm->vmcb->control.exit_int_info = 0;
 
-       /* Exit nested SVM mode */
-       svm->nested.vmcb = 0;
-
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        kvm_mmu_reset_context(&svm->vcpu);
        kvm_mmu_load(&svm->vcpu);
@@ -1717,9 +1767,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 {
        u32 *nested_msrpm;
+       struct page *page;
        int i;
 
-       nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+       nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
        if (!nested_msrpm)
                return false;
 
@@ -1728,7 +1779,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 
        svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
 
-       nested_svm_unmap(nested_msrpm, KM_USER0);
+       nested_svm_unmap(page);
 
        return true;
 }
@@ -1738,14 +1789,15 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
+       u64 vmcb_gpa;
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       vmcb_gpa = svm->vmcb->save.rax;
+
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return false;
 
-       /* nested_vmcb is our indicator if nested SVM is activated */
-       svm->nested.vmcb = svm->vmcb->save.rax;
-
        trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
                               nested_vmcb->save.rip,
                               nested_vmcb->control.int_ctl,
@@ -1813,21 +1865,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
 
-       /* We don't want a nested guest to be more powerful than the guest,
-          so all intercepts are ORed */
-       svm->vmcb->control.intercept_cr_read |=
-               nested_vmcb->control.intercept_cr_read;
-       svm->vmcb->control.intercept_cr_write |=
-               nested_vmcb->control.intercept_cr_write;
-       svm->vmcb->control.intercept_dr_read |=
-               nested_vmcb->control.intercept_dr_read;
-       svm->vmcb->control.intercept_dr_write |=
-               nested_vmcb->control.intercept_dr_write;
-       svm->vmcb->control.intercept_exceptions |=
-               nested_vmcb->control.intercept_exceptions;
-
-       svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
-
        svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
 
        /* cache intercepts */
@@ -1845,13 +1882,38 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        else
                svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
 
+       if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+               /* We only want the cr8 intercept bits of the guest */
+               svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
+               svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+       }
+
+       /* We don't want a nested guest to be more powerful than the guest,
+          so all intercepts are ORed */
+       svm->vmcb->control.intercept_cr_read |=
+               nested_vmcb->control.intercept_cr_read;
+       svm->vmcb->control.intercept_cr_write |=
+               nested_vmcb->control.intercept_cr_write;
+       svm->vmcb->control.intercept_dr_read |=
+               nested_vmcb->control.intercept_dr_read;
+       svm->vmcb->control.intercept_dr_write |=
+               nested_vmcb->control.intercept_dr_write;
+       svm->vmcb->control.intercept_exceptions |=
+               nested_vmcb->control.intercept_exceptions;
+
+       svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+
+       svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
        svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
        svm->vmcb->control.int_state = nested_vmcb->control.int_state;
        svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
        svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
        svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
 
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
+
+       /* nested_vmcb is our indicator if nested SVM is activated */
+       svm->nested.vmcb = vmcb_gpa;
 
        enable_gif(svm);
 
@@ -1877,6 +1939,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
 static int vmload_interception(struct vcpu_svm *svm)
 {
        struct vmcb *nested_vmcb;
+       struct page *page;
 
        if (nested_svm_check_permissions(svm))
                return 1;
@@ -1884,12 +1947,12 @@ static int vmload_interception(struct vcpu_svm *svm)
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
 
        nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        return 1;
 }
@@ -1897,6 +1960,7 @@ static int vmload_interception(struct vcpu_svm *svm)
 static int vmsave_interception(struct vcpu_svm *svm)
 {
        struct vmcb *nested_vmcb;
+       struct page *page;
 
        if (nested_svm_check_permissions(svm))
                return 1;
@@ -1904,12 +1968,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
 
        nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        return 1;
 }
@@ -2505,6 +2569,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
+
        if (irr == -1)
                return;
 
@@ -2608,6 +2675,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
+
        if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
                int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
                kvm_set_cr8(vcpu, cr8);
@@ -2619,6 +2689,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 cr8;
 
+       if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+               return;
+
        cr8 = kvm_get_cr8(vcpu);
        svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
        svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
@@ -2940,8 +3013,10 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       update_cr0_intercept(svm);
        svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
+       if (is_nested(svm))
+               svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+       update_cr0_intercept(svm);
 }
 
 static struct kvm_x86_ops svm_x86_ops = {