#include "kvm.h"
#include "vmx.h"
+#include "segment_descriptor.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
+
#include <asm/io.h>
#include <asm/desc.h>
-#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int init_rmode_tss(struct kvm *kvm);
+
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
#else
#define HOST_IS_64 0
#endif
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
static struct vmcs_descriptor {
int size;
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
+static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
+{
+ return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+{
+ int efer_offset = vcpu->msr_offset_efer;
+ return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
+ msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+}
static inline int is_page_fault(u32 intr_info)
{
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
{
int i;
for (i = 0; i < vcpu->nmsrs; ++i)
if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
+ return i;
+ return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+{
+ int i;
+
+ i = __find_msr_index(vcpu, msr);
+ if (i >= 0)
+ return &vcpu->guest_msrs[i];
return NULL;
}
vmcs_clear(vcpu->vmcs);
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ rdtscll(vcpu->host_tsc);
}
static void vcpu_clear(struct kvm_vcpu *vcpu)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ u32 eb;
+
+ eb = 1u << PF_VECTOR;
+ if (!vcpu->fpu_active)
+ eb |= 1u << NM_VECTOR;
+ if (vcpu->guest_debug.enabled)
+ eb |= 1u << 1;
+ if (vcpu->rmode.active)
+ eb = ~0;
+ vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
static void reload_tss(void)
{
#ifndef CONFIG_X86_64
#endif
}
+static void load_transition_efer(struct kvm_vcpu *vcpu)
+{
+ u64 trans_efer;
+ int efer_offset = vcpu->msr_offset_efer;
+
+ trans_efer = vcpu->host_msrs[efer_offset].data;
+ trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+ trans_efer |= msr_efer_save_restore_bits(
+ vcpu->guest_msrs[efer_offset]);
+ wrmsrl(MSR_EFER, trans_efer);
+ vcpu->stat.efer_reload++;
+}
+
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vmx_host_state *hs = &vcpu->vmx_host_state;
vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
#endif
+
+#ifdef CONFIG_X86_64
+ if (is_long_mode(vcpu)) {
+ save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
+ }
+#endif
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_transition_efer(vcpu);
}
static void vmx_load_host_state(struct kvm_vcpu *vcpu)
reload_tss();
}
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
- }
-#endif
+ save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
}
/*
{
u64 phys_addr = __pa(vcpu->vmcs);
int cpu;
+ u64 tsc_this, delta;
cpu = get_cpu();
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ /*
+ * Make sure the time stamp counter is monotonous.
+ */
+ rdtscll(tsc_this);
+ delta = vcpu->host_tsc - tsc_this;
+ vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
}
}
put_cpu();
}
+static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 1;
+ vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ if (vcpu->cr0 & CR0_TS_MASK)
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
+static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 0;
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
vcpu_clear(vcpu);
INTR_INFO_VALID_MASK);
}
+/*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
+{
+ struct vmx_msr_entry tmp;
+ tmp = vcpu->guest_msrs[to];
+ vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
+ vcpu->guest_msrs[from] = tmp;
+ tmp = vcpu->host_msrs[to];
+ vcpu->host_msrs[to] = vcpu->host_msrs[from];
+ vcpu->host_msrs[from] = tmp;
+}
+
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
*/
static void setup_msrs(struct kvm_vcpu *vcpu)
{
- int nr_skip, nr_good_msrs;
-
- if (is_long_mode(vcpu))
- nr_skip = NR_BAD_MSRS;
- else
- nr_skip = NR_64BIT_MSRS;
- nr_good_msrs = vcpu->nmsrs - nr_skip;
+ int save_nmsrs;
- /*
- * MSR_K6_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- if (find_msr_entry(vcpu, MSR_K6_STAR)) {
- --nr_good_msrs;
+ save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
- ++nr_good_msrs;
-#endif
+ if (is_long_mode(vcpu)) {
+ int index;
+
+ index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_LSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_CSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ /*
+ * MSR_K6_STAR is only needed on long mode guests, and only
+ * if efer.sce is enabled.
+ */
+ index = __find_msr_index(vcpu, MSR_K6_STAR);
+ if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
+ move_msr_up(vcpu, index, save_nmsrs++);
}
+#endif
+ vcpu->save_nmsrs = save_nmsrs;
- vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + nr_skip));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+#ifdef CONFIG_X86_64
+ vcpu->msr_offset_kernel_gs_base =
+ __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+#endif
+ vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
}
/*
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vmx_msr_entry *msr;
+ int ret = 0;
+
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
- return kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ if (vcpu->vmx_host_state.loaded)
+ load_transition_efer(vcpu);
+ break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
case MSR_GS_BASE:
vmcs_writel(GUEST_GS_BASE, data);
break;
- case MSR_LSTAR:
- case MSR_SYSCALL_MASK:
- msr = find_msr_entry(vcpu, msr_index);
- if (msr)
- msr->data = data;
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
msr = find_msr_entry(vcpu, msr_index);
if (msr) {
msr->data = data;
+ if (vcpu->vmx_host_state.loaded)
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
break;
}
- return kvm_set_msr_common(vcpu, msr_index, data);
- msr->data = data;
- break;
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
}
- return 0;
+ return ret;
}
/*
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
unsigned long dr7 = 0x400;
- u32 exception_bitmap;
int old_singlestep;
- exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
old_singlestep = vcpu->guest_debug.singlestep;
vcpu->guest_debug.enabled = dbg->enabled;
dr7 |= 0 << (i*4+16); /* execution breakpoint */
}
- exception_bitmap |= (1u << 1); /* Trap debug exceptions */
-
vcpu->guest_debug.singlestep = dbg->singlestep;
- } else {
- exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
+ } else
vcpu->guest_debug.singlestep = 0;
- }
if (old_singlestep && !vcpu->guest_debug.singlestep) {
unsigned long flags;
vmcs_writel(GUEST_RFLAGS, flags);
}
- vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
+ update_exception_bitmap(vcpu);
vmcs_writel(GUEST_DR7, dr7);
return 0;
free_kvm_area();
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
- if (vcpu->rmode.active)
- vmcs_write32(EXCEPTION_BITMAP, ~0);
- else
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
-}
-
static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+ init_rmode_tss(vcpu->kvm);
}
#ifdef CONFIG_X86_64
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
+ vmx_fpu_deactivate(vcpu);
+
if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
enter_pmode(vcpu);
}
#endif
- if (!(cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
- }
-
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0,
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0;
+
+ if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
+ vmx_fpu_activate(vcpu);
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
vmcs_writel(GUEST_CR3, cr3);
-
- if (!(vcpu->cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 0;
- vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
- vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- }
+ if (vcpu->cr0 & CR0_PE_MASK)
+ vmx_fpu_deactivate(vcpu);
}
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
var->unusable = (ar >> 16) & 1;
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
- struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
u32 ar;
- vmcs_writel(sf->base, var->base);
- vmcs_write32(sf->limit, var->limit);
- vmcs_write16(sf->selector, var->selector);
- if (vcpu->rmode.active && var->s) {
- /*
- * Hack real-mode segments into vm86 compatibility.
- */
- if (var->base == 0xffff0000 && var->selector == 0xf000)
- vmcs_writel(sf->base, 0xf0000);
- ar = 0xf3;
- } else if (var->unusable)
+ if (var->unusable)
ar = 1 << 16;
else {
ar = var->type & 15;
}
if (ar == 0) /* a 0 value means unusable */
ar = AR_UNUSABLE_MASK;
+
+ return ar;
+}
+
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ u32 ar;
+
+ if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+ vcpu->rmode.tr.selector = var->selector;
+ vcpu->rmode.tr.base = var->base;
+ vcpu->rmode.tr.limit = var->limit;
+ vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+ return;
+ }
+ vmcs_writel(sf->base, var->base);
+ vmcs_write32(sf->limit, var->limit);
+ vmcs_write16(sf->selector, var->selector);
+ if (vcpu->rmode.active && var->s) {
+ /*
+ * Hack real-mode segments into vm86 compatibility.
+ */
+ if (var->base == 0xffff0000 && var->selector == 0xf000)
+ vmcs_writel(sf->base, 0xf0000);
+ ar = 0xf3;
+ } else
+ ar = vmx_segment_access_rights(var);
vmcs_write32(sf->ar_bytes, ar);
}
}
page = kmap_atomic(p1, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p2, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p3, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
kunmap_atomic(page, KM_USER0);
struct descriptor_table dt;
int i;
int ret = 0;
- extern asmlinkage void kvm_vmx_return(void);
+ unsigned long kvm_vmx_return;
if (!init_rmode_tss(vcpu->kvm)) {
ret = -ENOMEM;
memset(vcpu->regs, 0, sizeof(vcpu->regs));
vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
fx_init(vcpu);
| CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
);
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
get_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
-
- vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
+ asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+ vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
vcpu->host_msrs[j].reserved = 0;
vcpu->host_msrs[j].data = data;
vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
- if (index == MSR_KERNEL_GS_BASE)
- msr_offset_kernel_gs_base = j;
-#endif
++vcpu->nmsrs;
}
#ifdef CONFIG_X86_64
vmx_set_efer(vcpu, 0);
#endif
+ vmx_fpu_activate(vcpu);
+ update_exception_bitmap(vcpu);
return 0;
if (!vcpu->rmode.active)
return 0;
- if (vec == GP_VECTOR && err_code == 0)
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
return 1;
return 0;
}
if (is_no_device(intr_info)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- if (!(vcpu->cr0 & CR0_TS_MASK))
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_activate(vcpu);
return 1;
}
if (vcpu->rmode.active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code))
+ error_code)) {
+ if (vcpu->halt_request) {
+ vcpu->halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
return 1;
+ }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
break;
case 2: /* clts */
vcpu_load_rsp_rip(vcpu);
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_deactivate(vcpu);
vcpu->cr0 &= ~CR0_TS_MASK;
vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+ vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
static const int kvm_vmx_max_exit_handlers =
- sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
* The guest has exited. See if we can fix it or if we need userspace
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u8 fail;
int r;
preempted:
- if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
-
if (vcpu->guest_debug.enabled)
kvm_guest_debug_pre(vcpu);
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- }
-#endif
-
again:
+ if (!vcpu->mmio_read_completed)
+ do_interrupt_requests(vcpu, kvm_run);
+
vmx_save_host_state(vcpu);
kvm_load_guest_fpu(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ goto out;
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
+ local_irq_disable();
+
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ vmx_flush_tlb(vcpu);
+
asm (
/* Store host registers */
- "pushf \n\t"
#ifdef CONFIG_X86_64
"push %%rax; push %%rbx; push %%rdx;"
"push %%rsi; push %%rdi; push %%rbp;"
"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
#endif
/* Enter guest mode */
- "jne launched \n\t"
+ "jne .Llaunched \n\t"
ASM_VMX_VMLAUNCH "\n\t"
- "jmp kvm_vmx_return \n\t"
- "launched: " ASM_VMX_VMRESUME "\n\t"
- ".globl kvm_vmx_return \n\t"
- "kvm_vmx_return: "
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+ ".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */
#ifdef CONFIG_X86_64
"xchg %3, (%%rsp) \n\t"
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- "popf \n\t"
: "=q" (fail)
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
+ vcpu->guest_mode = 0;
+ local_irq_enable();
+
++vcpu->stat.exits;
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
return r;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
- vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
u32 err_code)
vmcs_clear(vmcs);
vcpu->vmcs = vmcs;
vcpu->launched = 0;
- vcpu->fpu_active = 1;
return 0;
iova = kmap(vmx_io_bitmap_a);
memset(iova, 0xff, PAGE_SIZE);
clear_bit(0x80, iova);
- kunmap(iova);
+ kunmap(vmx_io_bitmap_a);
iova = kmap(vmx_io_bitmap_b);
memset(iova, 0xff, PAGE_SIZE);
- kunmap(iova);
+ kunmap(vmx_io_bitmap_b);
r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
if (r)