#include "kvm.h"
#include "vmx.h"
+#include "segment_descriptor.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
+
#include <asm/io.h>
#include <asm/desc.h>
-#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int init_rmode_tss(struct kvm *kvm);
+
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
#else
#define HOST_IS_64 0
#endif
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
static struct vmcs_descriptor {
int size;
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
+static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
+{
+ return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+{
+ int efer_offset = vcpu->msr_offset_efer;
+ return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
+ msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+}
+
static inline int is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
vmcs_clear(vcpu->vmcs);
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ rdtscll(vcpu->host_tsc);
}
static void vcpu_clear(struct kvm_vcpu *vcpu)
#endif
}
+static void load_transition_efer(struct kvm_vcpu *vcpu)
+{
+ u64 trans_efer;
+ int efer_offset = vcpu->msr_offset_efer;
+
+ trans_efer = vcpu->host_msrs[efer_offset].data;
+ trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+ trans_efer |= msr_efer_save_restore_bits(
+ vcpu->guest_msrs[efer_offset]);
+ wrmsrl(MSR_EFER, trans_efer);
+ vcpu->stat.efer_reload++;
+}
+
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vmx_host_state *hs = &vcpu->vmx_host_state;
}
#endif
load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_transition_efer(vcpu);
}
static void vmx_load_host_state(struct kvm_vcpu *vcpu)
}
save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
}
/*
{
u64 phys_addr = __pa(vcpu->vmcs);
int cpu;
+ u64 tsc_this, delta;
cpu = get_cpu();
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ /*
+ * Make sure the time stamp counter is monotonous.
+ */
+ rdtscll(tsc_this);
+ delta = vcpu->host_tsc - tsc_this;
+ vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
}
}
*/
static void setup_msrs(struct kvm_vcpu *vcpu)
{
- int index, save_nmsrs;
+ int save_nmsrs;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(vcpu)) {
+ int index;
+
index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
if (index >= 0)
move_msr_up(vcpu, index, save_nmsrs++);
vcpu->msr_offset_kernel_gs_base =
__find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
#endif
- index = __find_msr_index(vcpu, MSR_EFER);
- if (index >= 0)
- save_nmsrs = 1;
- else {
- save_nmsrs = 0;
- index = 0;
- }
- vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + index));
- vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + index));
- vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + index));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, save_nmsrs);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, save_nmsrs);
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, save_nmsrs);
+ vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
}
/*
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vmx_msr_entry *msr;
+ int ret = 0;
+
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
- return kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ if (vcpu->vmx_host_state.loaded)
+ load_transition_efer(vcpu);
+ break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
if (msr) {
msr->data = data;
if (vcpu->vmx_host_state.loaded)
- load_msrs(vcpu->guest_msrs,vcpu->save_nmsrs);
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
break;
}
- return kvm_set_msr_common(vcpu, msr_index, data);
- msr->data = data;
- break;
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
}
- return 0;
+ return ret;
}
/*
fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+ init_rmode_tss(vcpu->kvm);
}
#ifdef CONFIG_X86_64
}
page = kmap_atomic(p1, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p2, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p3, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
kunmap_atomic(page, KM_USER0);
memset(vcpu->regs, 0, sizeof(vcpu->regs));
vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
fx_init(vcpu);
asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
if (vcpu->rmode.active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code))
+ error_code)) {
+ if (vcpu->halt_request) {
+ vcpu->halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
return 1;
+ }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
};
static const int kvm_vmx_max_exit_handlers =
- sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
* The guest has exited. See if we can fix it or if we need userspace
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u8 fail;
int r;
preempted:
- if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
-
if (vcpu->guest_debug.enabled)
kvm_guest_debug_pre(vcpu);
again:
+ if (!vcpu->mmio_read_completed)
+ do_interrupt_requests(vcpu, kvm_run);
+
vmx_save_host_state(vcpu);
kvm_load_guest_fpu(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ goto out;
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
+ local_irq_disable();
+
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ vmx_flush_tlb(vcpu);
+
asm (
/* Store host registers */
- "pushf \n\t"
#ifdef CONFIG_X86_64
"push %%rax; push %%rbx; push %%rdx;"
"push %%rsi; push %%rdi; push %%rbp;"
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- "popf \n\t"
: "=q" (fail)
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
+ vcpu->guest_mode = 0;
+ local_irq_enable();
+
++vcpu->stat.exits;
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
return r;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
- vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
u32 err_code)