KVM: x86: always use "acknowledge interrupt on exit"
[cascardo/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include "kvm_cache_regs.h"
37 #include "x86.h"
38
39 #include <asm/cpu.h>
40 #include <asm/io.h>
41 #include <asm/desc.h>
42 #include <asm/vmx.h>
43 #include <asm/virtext.h>
44 #include <asm/mce.h>
45 #include <asm/fpu/internal.h>
46 #include <asm/perf_event.h>
47 #include <asm/debugreg.h>
48 #include <asm/kexec.h>
49 #include <asm/apic.h>
50 #include <asm/irq_remapping.h>
51
52 #include "trace.h"
53 #include "pmu.h"
54
55 #define __ex(x) __kvm_handle_fault_on_reboot(x)
56 #define __ex_clear(x, reg) \
57         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
58
59 MODULE_AUTHOR("Qumranet");
60 MODULE_LICENSE("GPL");
61
62 static const struct x86_cpu_id vmx_cpu_id[] = {
63         X86_FEATURE_MATCH(X86_FEATURE_VMX),
64         {}
65 };
66 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
67
68 static bool __read_mostly enable_vpid = 1;
69 module_param_named(vpid, enable_vpid, bool, 0444);
70
71 static bool __read_mostly flexpriority_enabled = 1;
72 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
73
74 static bool __read_mostly enable_ept = 1;
75 module_param_named(ept, enable_ept, bool, S_IRUGO);
76
77 static bool __read_mostly enable_unrestricted_guest = 1;
78 module_param_named(unrestricted_guest,
79                         enable_unrestricted_guest, bool, S_IRUGO);
80
81 static bool __read_mostly enable_ept_ad_bits = 1;
82 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
83
84 static bool __read_mostly emulate_invalid_guest_state = true;
85 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
86
87 static bool __read_mostly vmm_exclusive = 1;
88 module_param(vmm_exclusive, bool, S_IRUGO);
89
90 static bool __read_mostly fasteoi = 1;
91 module_param(fasteoi, bool, S_IRUGO);
92
93 static bool __read_mostly enable_apicv = 1;
94 module_param(enable_apicv, bool, S_IRUGO);
95
96 static bool __read_mostly enable_shadow_vmcs = 1;
97 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
98 /*
99  * If nested=1, nested virtualization is supported, i.e., guests may use
100  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
101  * use VMX instructions.
102  */
103 static bool __read_mostly nested = 0;
104 module_param(nested, bool, S_IRUGO);
105
106 static u64 __read_mostly host_xss;
107
108 static bool __read_mostly enable_pml = 1;
109 module_param_named(pml, enable_pml, bool, S_IRUGO);
110
111 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
112
113 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
114 static int __read_mostly cpu_preemption_timer_multi;
115 static bool __read_mostly enable_preemption_timer = 1;
116 #ifdef CONFIG_X86_64
117 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
118 #endif
119
120 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
121 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
122 #define KVM_VM_CR0_ALWAYS_ON                                            \
123         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
124 #define KVM_CR4_GUEST_OWNED_BITS                                      \
125         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
126          | X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
127
128 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
129 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
130
131 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
132
133 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
134
135 /*
136  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
137  * ple_gap:    upper bound on the amount of time between two successive
138  *             executions of PAUSE in a loop. Also indicate if ple enabled.
139  *             According to test, this time is usually smaller than 128 cycles.
140  * ple_window: upper bound on the amount of time a guest is allowed to execute
141  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
142  *             less than 2^12 cycles
143  * Time is measured based on a counter that runs at the same rate as the TSC,
144  * refer SDM volume 3b section 21.6.13 & 22.1.3.
145  */
146 #define KVM_VMX_DEFAULT_PLE_GAP           128
147 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
148 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
149 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
150 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
151                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
152
153 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
154 module_param(ple_gap, int, S_IRUGO);
155
156 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
157 module_param(ple_window, int, S_IRUGO);
158
159 /* Default doubles per-vcpu window every exit. */
160 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
161 module_param(ple_window_grow, int, S_IRUGO);
162
163 /* Default resets per-vcpu window every exit to ple_window. */
164 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
165 module_param(ple_window_shrink, int, S_IRUGO);
166
167 /* Default is to compute the maximum so we can never overflow. */
168 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
169 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
170 module_param(ple_window_max, int, S_IRUGO);
171
172 extern const ulong vmx_return;
173
174 #define NR_AUTOLOAD_MSRS 8
175 #define VMCS02_POOL_SIZE 1
176
177 struct vmcs {
178         u32 revision_id;
179         u32 abort;
180         char data[0];
181 };
182
183 /*
184  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
185  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
186  * loaded on this CPU (so we can clear them if the CPU goes down).
187  */
188 struct loaded_vmcs {
189         struct vmcs *vmcs;
190         int cpu;
191         int launched;
192         struct list_head loaded_vmcss_on_cpu_link;
193 };
194
195 struct shared_msr_entry {
196         unsigned index;
197         u64 data;
198         u64 mask;
199 };
200
201 /*
202  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
203  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
204  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
205  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
206  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
207  * More than one of these structures may exist, if L1 runs multiple L2 guests.
208  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
209  * underlying hardware which will be used to run L2.
210  * This structure is packed to ensure that its layout is identical across
211  * machines (necessary for live migration).
212  * If there are changes in this struct, VMCS12_REVISION must be changed.
213  */
214 typedef u64 natural_width;
215 struct __packed vmcs12 {
216         /* According to the Intel spec, a VMCS region must start with the
217          * following two fields. Then follow implementation-specific data.
218          */
219         u32 revision_id;
220         u32 abort;
221
222         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
223         u32 padding[7]; /* room for future expansion */
224
225         u64 io_bitmap_a;
226         u64 io_bitmap_b;
227         u64 msr_bitmap;
228         u64 vm_exit_msr_store_addr;
229         u64 vm_exit_msr_load_addr;
230         u64 vm_entry_msr_load_addr;
231         u64 tsc_offset;
232         u64 virtual_apic_page_addr;
233         u64 apic_access_addr;
234         u64 posted_intr_desc_addr;
235         u64 ept_pointer;
236         u64 eoi_exit_bitmap0;
237         u64 eoi_exit_bitmap1;
238         u64 eoi_exit_bitmap2;
239         u64 eoi_exit_bitmap3;
240         u64 xss_exit_bitmap;
241         u64 guest_physical_address;
242         u64 vmcs_link_pointer;
243         u64 guest_ia32_debugctl;
244         u64 guest_ia32_pat;
245         u64 guest_ia32_efer;
246         u64 guest_ia32_perf_global_ctrl;
247         u64 guest_pdptr0;
248         u64 guest_pdptr1;
249         u64 guest_pdptr2;
250         u64 guest_pdptr3;
251         u64 guest_bndcfgs;
252         u64 host_ia32_pat;
253         u64 host_ia32_efer;
254         u64 host_ia32_perf_global_ctrl;
255         u64 padding64[8]; /* room for future expansion */
256         /*
257          * To allow migration of L1 (complete with its L2 guests) between
258          * machines of different natural widths (32 or 64 bit), we cannot have
259          * unsigned long fields with no explict size. We use u64 (aliased
260          * natural_width) instead. Luckily, x86 is little-endian.
261          */
262         natural_width cr0_guest_host_mask;
263         natural_width cr4_guest_host_mask;
264         natural_width cr0_read_shadow;
265         natural_width cr4_read_shadow;
266         natural_width cr3_target_value0;
267         natural_width cr3_target_value1;
268         natural_width cr3_target_value2;
269         natural_width cr3_target_value3;
270         natural_width exit_qualification;
271         natural_width guest_linear_address;
272         natural_width guest_cr0;
273         natural_width guest_cr3;
274         natural_width guest_cr4;
275         natural_width guest_es_base;
276         natural_width guest_cs_base;
277         natural_width guest_ss_base;
278         natural_width guest_ds_base;
279         natural_width guest_fs_base;
280         natural_width guest_gs_base;
281         natural_width guest_ldtr_base;
282         natural_width guest_tr_base;
283         natural_width guest_gdtr_base;
284         natural_width guest_idtr_base;
285         natural_width guest_dr7;
286         natural_width guest_rsp;
287         natural_width guest_rip;
288         natural_width guest_rflags;
289         natural_width guest_pending_dbg_exceptions;
290         natural_width guest_sysenter_esp;
291         natural_width guest_sysenter_eip;
292         natural_width host_cr0;
293         natural_width host_cr3;
294         natural_width host_cr4;
295         natural_width host_fs_base;
296         natural_width host_gs_base;
297         natural_width host_tr_base;
298         natural_width host_gdtr_base;
299         natural_width host_idtr_base;
300         natural_width host_ia32_sysenter_esp;
301         natural_width host_ia32_sysenter_eip;
302         natural_width host_rsp;
303         natural_width host_rip;
304         natural_width paddingl[8]; /* room for future expansion */
305         u32 pin_based_vm_exec_control;
306         u32 cpu_based_vm_exec_control;
307         u32 exception_bitmap;
308         u32 page_fault_error_code_mask;
309         u32 page_fault_error_code_match;
310         u32 cr3_target_count;
311         u32 vm_exit_controls;
312         u32 vm_exit_msr_store_count;
313         u32 vm_exit_msr_load_count;
314         u32 vm_entry_controls;
315         u32 vm_entry_msr_load_count;
316         u32 vm_entry_intr_info_field;
317         u32 vm_entry_exception_error_code;
318         u32 vm_entry_instruction_len;
319         u32 tpr_threshold;
320         u32 secondary_vm_exec_control;
321         u32 vm_instruction_error;
322         u32 vm_exit_reason;
323         u32 vm_exit_intr_info;
324         u32 vm_exit_intr_error_code;
325         u32 idt_vectoring_info_field;
326         u32 idt_vectoring_error_code;
327         u32 vm_exit_instruction_len;
328         u32 vmx_instruction_info;
329         u32 guest_es_limit;
330         u32 guest_cs_limit;
331         u32 guest_ss_limit;
332         u32 guest_ds_limit;
333         u32 guest_fs_limit;
334         u32 guest_gs_limit;
335         u32 guest_ldtr_limit;
336         u32 guest_tr_limit;
337         u32 guest_gdtr_limit;
338         u32 guest_idtr_limit;
339         u32 guest_es_ar_bytes;
340         u32 guest_cs_ar_bytes;
341         u32 guest_ss_ar_bytes;
342         u32 guest_ds_ar_bytes;
343         u32 guest_fs_ar_bytes;
344         u32 guest_gs_ar_bytes;
345         u32 guest_ldtr_ar_bytes;
346         u32 guest_tr_ar_bytes;
347         u32 guest_interruptibility_info;
348         u32 guest_activity_state;
349         u32 guest_sysenter_cs;
350         u32 host_ia32_sysenter_cs;
351         u32 vmx_preemption_timer_value;
352         u32 padding32[7]; /* room for future expansion */
353         u16 virtual_processor_id;
354         u16 posted_intr_nv;
355         u16 guest_es_selector;
356         u16 guest_cs_selector;
357         u16 guest_ss_selector;
358         u16 guest_ds_selector;
359         u16 guest_fs_selector;
360         u16 guest_gs_selector;
361         u16 guest_ldtr_selector;
362         u16 guest_tr_selector;
363         u16 guest_intr_status;
364         u16 host_es_selector;
365         u16 host_cs_selector;
366         u16 host_ss_selector;
367         u16 host_ds_selector;
368         u16 host_fs_selector;
369         u16 host_gs_selector;
370         u16 host_tr_selector;
371 };
372
373 /*
374  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
375  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
376  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
377  */
378 #define VMCS12_REVISION 0x11e57ed0
379
380 /*
381  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
382  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
383  * current implementation, 4K are reserved to avoid future complications.
384  */
385 #define VMCS12_SIZE 0x1000
386
387 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
388 struct vmcs02_list {
389         struct list_head list;
390         gpa_t vmptr;
391         struct loaded_vmcs vmcs02;
392 };
393
394 /*
395  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
396  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
397  */
398 struct nested_vmx {
399         /* Has the level1 guest done vmxon? */
400         bool vmxon;
401         gpa_t vmxon_ptr;
402
403         /* The guest-physical address of the current VMCS L1 keeps for L2 */
404         gpa_t current_vmptr;
405         /* The host-usable pointer to the above */
406         struct page *current_vmcs12_page;
407         struct vmcs12 *current_vmcs12;
408         struct vmcs *current_shadow_vmcs;
409         /*
410          * Indicates if the shadow vmcs must be updated with the
411          * data hold by vmcs12
412          */
413         bool sync_shadow_vmcs;
414
415         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
416         struct list_head vmcs02_pool;
417         int vmcs02_num;
418         u64 vmcs01_tsc_offset;
419         /* L2 must run next, and mustn't decide to exit to L1. */
420         bool nested_run_pending;
421         /*
422          * Guest pages referred to in vmcs02 with host-physical pointers, so
423          * we must keep them pinned while L2 runs.
424          */
425         struct page *apic_access_page;
426         struct page *virtual_apic_page;
427         struct page *pi_desc_page;
428         struct pi_desc *pi_desc;
429         bool pi_pending;
430         u16 posted_intr_nv;
431
432         struct hrtimer preemption_timer;
433         bool preemption_timer_expired;
434
435         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
436         u64 vmcs01_debugctl;
437
438         u16 vpid02;
439         u16 last_vpid;
440
441         u32 nested_vmx_procbased_ctls_low;
442         u32 nested_vmx_procbased_ctls_high;
443         u32 nested_vmx_true_procbased_ctls_low;
444         u32 nested_vmx_secondary_ctls_low;
445         u32 nested_vmx_secondary_ctls_high;
446         u32 nested_vmx_pinbased_ctls_low;
447         u32 nested_vmx_pinbased_ctls_high;
448         u32 nested_vmx_exit_ctls_low;
449         u32 nested_vmx_exit_ctls_high;
450         u32 nested_vmx_true_exit_ctls_low;
451         u32 nested_vmx_entry_ctls_low;
452         u32 nested_vmx_entry_ctls_high;
453         u32 nested_vmx_true_entry_ctls_low;
454         u32 nested_vmx_misc_low;
455         u32 nested_vmx_misc_high;
456         u32 nested_vmx_ept_caps;
457         u32 nested_vmx_vpid_caps;
458 };
459
460 #define POSTED_INTR_ON  0
461 #define POSTED_INTR_SN  1
462
463 /* Posted-Interrupt Descriptor */
464 struct pi_desc {
465         u32 pir[8];     /* Posted interrupt requested */
466         union {
467                 struct {
468                                 /* bit 256 - Outstanding Notification */
469                         u16     on      : 1,
470                                 /* bit 257 - Suppress Notification */
471                                 sn      : 1,
472                                 /* bit 271:258 - Reserved */
473                                 rsvd_1  : 14;
474                                 /* bit 279:272 - Notification Vector */
475                         u8      nv;
476                                 /* bit 287:280 - Reserved */
477                         u8      rsvd_2;
478                                 /* bit 319:288 - Notification Destination */
479                         u32     ndst;
480                 };
481                 u64 control;
482         };
483         u32 rsvd[6];
484 } __aligned(64);
485
486 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
487 {
488         return test_and_set_bit(POSTED_INTR_ON,
489                         (unsigned long *)&pi_desc->control);
490 }
491
492 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
493 {
494         return test_and_clear_bit(POSTED_INTR_ON,
495                         (unsigned long *)&pi_desc->control);
496 }
497
498 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
499 {
500         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
501 }
502
503 static inline void pi_clear_sn(struct pi_desc *pi_desc)
504 {
505         return clear_bit(POSTED_INTR_SN,
506                         (unsigned long *)&pi_desc->control);
507 }
508
509 static inline void pi_set_sn(struct pi_desc *pi_desc)
510 {
511         return set_bit(POSTED_INTR_SN,
512                         (unsigned long *)&pi_desc->control);
513 }
514
515 static inline int pi_test_on(struct pi_desc *pi_desc)
516 {
517         return test_bit(POSTED_INTR_ON,
518                         (unsigned long *)&pi_desc->control);
519 }
520
521 static inline int pi_test_sn(struct pi_desc *pi_desc)
522 {
523         return test_bit(POSTED_INTR_SN,
524                         (unsigned long *)&pi_desc->control);
525 }
526
527 struct vcpu_vmx {
528         struct kvm_vcpu       vcpu;
529         unsigned long         host_rsp;
530         u8                    fail;
531         bool                  nmi_known_unmasked;
532         u32                   exit_intr_info;
533         u32                   idt_vectoring_info;
534         ulong                 rflags;
535         struct shared_msr_entry *guest_msrs;
536         int                   nmsrs;
537         int                   save_nmsrs;
538         unsigned long         host_idt_base;
539 #ifdef CONFIG_X86_64
540         u64                   msr_host_kernel_gs_base;
541         u64                   msr_guest_kernel_gs_base;
542 #endif
543         u32 vm_entry_controls_shadow;
544         u32 vm_exit_controls_shadow;
545         /*
546          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
547          * non-nested (L1) guest, it always points to vmcs01. For a nested
548          * guest (L2), it points to a different VMCS.
549          */
550         struct loaded_vmcs    vmcs01;
551         struct loaded_vmcs   *loaded_vmcs;
552         bool                  __launched; /* temporary, used in vmx_vcpu_run */
553         struct msr_autoload {
554                 unsigned nr;
555                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
556                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
557         } msr_autoload;
558         struct {
559                 int           loaded;
560                 u16           fs_sel, gs_sel, ldt_sel;
561 #ifdef CONFIG_X86_64
562                 u16           ds_sel, es_sel;
563 #endif
564                 int           gs_ldt_reload_needed;
565                 int           fs_reload_needed;
566                 u64           msr_host_bndcfgs;
567                 unsigned long vmcs_host_cr4;    /* May not match real cr4 */
568         } host_state;
569         struct {
570                 int vm86_active;
571                 ulong save_rflags;
572                 struct kvm_segment segs[8];
573         } rmode;
574         struct {
575                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
576                 struct kvm_save_segment {
577                         u16 selector;
578                         unsigned long base;
579                         u32 limit;
580                         u32 ar;
581                 } seg[8];
582         } segment_cache;
583         int vpid;
584         bool emulation_required;
585
586         /* Support for vnmi-less CPUs */
587         int soft_vnmi_blocked;
588         ktime_t entry_time;
589         s64 vnmi_blocked_time;
590         u32 exit_reason;
591
592         /* Posted interrupt descriptor */
593         struct pi_desc pi_desc;
594
595         /* Support for a guest hypervisor (nested VMX) */
596         struct nested_vmx nested;
597
598         /* Dynamic PLE window. */
599         int ple_window;
600         bool ple_window_dirty;
601
602         /* Support for PML */
603 #define PML_ENTITY_NUM          512
604         struct page *pml_pg;
605
606         /* apic deadline value in host tsc */
607         u64 hv_deadline_tsc;
608
609         u64 current_tsc_ratio;
610
611         bool guest_pkru_valid;
612         u32 guest_pkru;
613         u32 host_pkru;
614
615         /*
616          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
617          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
618          * in msr_ia32_feature_control_valid_bits.
619          */
620         u64 msr_ia32_feature_control;
621         u64 msr_ia32_feature_control_valid_bits;
622 };
623
624 enum segment_cache_field {
625         SEG_FIELD_SEL = 0,
626         SEG_FIELD_BASE = 1,
627         SEG_FIELD_LIMIT = 2,
628         SEG_FIELD_AR = 3,
629
630         SEG_FIELD_NR = 4
631 };
632
633 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
634 {
635         return container_of(vcpu, struct vcpu_vmx, vcpu);
636 }
637
638 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
639 {
640         return &(to_vmx(vcpu)->pi_desc);
641 }
642
643 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
644 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
645 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
646                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
647
648
649 static unsigned long shadow_read_only_fields[] = {
650         /*
651          * We do NOT shadow fields that are modified when L0
652          * traps and emulates any vmx instruction (e.g. VMPTRLD,
653          * VMXON...) executed by L1.
654          * For example, VM_INSTRUCTION_ERROR is read
655          * by L1 if a vmx instruction fails (part of the error path).
656          * Note the code assumes this logic. If for some reason
657          * we start shadowing these fields then we need to
658          * force a shadow sync when L0 emulates vmx instructions
659          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
660          * by nested_vmx_failValid)
661          */
662         VM_EXIT_REASON,
663         VM_EXIT_INTR_INFO,
664         VM_EXIT_INSTRUCTION_LEN,
665         IDT_VECTORING_INFO_FIELD,
666         IDT_VECTORING_ERROR_CODE,
667         VM_EXIT_INTR_ERROR_CODE,
668         EXIT_QUALIFICATION,
669         GUEST_LINEAR_ADDRESS,
670         GUEST_PHYSICAL_ADDRESS
671 };
672 static int max_shadow_read_only_fields =
673         ARRAY_SIZE(shadow_read_only_fields);
674
675 static unsigned long shadow_read_write_fields[] = {
676         TPR_THRESHOLD,
677         GUEST_RIP,
678         GUEST_RSP,
679         GUEST_CR0,
680         GUEST_CR3,
681         GUEST_CR4,
682         GUEST_INTERRUPTIBILITY_INFO,
683         GUEST_RFLAGS,
684         GUEST_CS_SELECTOR,
685         GUEST_CS_AR_BYTES,
686         GUEST_CS_LIMIT,
687         GUEST_CS_BASE,
688         GUEST_ES_BASE,
689         GUEST_BNDCFGS,
690         CR0_GUEST_HOST_MASK,
691         CR0_READ_SHADOW,
692         CR4_READ_SHADOW,
693         TSC_OFFSET,
694         EXCEPTION_BITMAP,
695         CPU_BASED_VM_EXEC_CONTROL,
696         VM_ENTRY_EXCEPTION_ERROR_CODE,
697         VM_ENTRY_INTR_INFO_FIELD,
698         VM_ENTRY_INSTRUCTION_LEN,
699         VM_ENTRY_EXCEPTION_ERROR_CODE,
700         HOST_FS_BASE,
701         HOST_GS_BASE,
702         HOST_FS_SELECTOR,
703         HOST_GS_SELECTOR
704 };
705 static int max_shadow_read_write_fields =
706         ARRAY_SIZE(shadow_read_write_fields);
707
708 static const unsigned short vmcs_field_to_offset_table[] = {
709         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
710         FIELD(POSTED_INTR_NV, posted_intr_nv),
711         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
712         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
713         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
714         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
715         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
716         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
717         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
718         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
719         FIELD(GUEST_INTR_STATUS, guest_intr_status),
720         FIELD(HOST_ES_SELECTOR, host_es_selector),
721         FIELD(HOST_CS_SELECTOR, host_cs_selector),
722         FIELD(HOST_SS_SELECTOR, host_ss_selector),
723         FIELD(HOST_DS_SELECTOR, host_ds_selector),
724         FIELD(HOST_FS_SELECTOR, host_fs_selector),
725         FIELD(HOST_GS_SELECTOR, host_gs_selector),
726         FIELD(HOST_TR_SELECTOR, host_tr_selector),
727         FIELD64(IO_BITMAP_A, io_bitmap_a),
728         FIELD64(IO_BITMAP_B, io_bitmap_b),
729         FIELD64(MSR_BITMAP, msr_bitmap),
730         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
731         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
732         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
733         FIELD64(TSC_OFFSET, tsc_offset),
734         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
735         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
736         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
737         FIELD64(EPT_POINTER, ept_pointer),
738         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
739         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
740         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
741         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
742         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
743         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
744         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
745         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
746         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
747         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
748         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
749         FIELD64(GUEST_PDPTR0, guest_pdptr0),
750         FIELD64(GUEST_PDPTR1, guest_pdptr1),
751         FIELD64(GUEST_PDPTR2, guest_pdptr2),
752         FIELD64(GUEST_PDPTR3, guest_pdptr3),
753         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
754         FIELD64(HOST_IA32_PAT, host_ia32_pat),
755         FIELD64(HOST_IA32_EFER, host_ia32_efer),
756         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
757         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
758         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
759         FIELD(EXCEPTION_BITMAP, exception_bitmap),
760         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
761         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
762         FIELD(CR3_TARGET_COUNT, cr3_target_count),
763         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
764         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
765         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
766         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
767         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
768         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
769         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
770         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
771         FIELD(TPR_THRESHOLD, tpr_threshold),
772         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
773         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
774         FIELD(VM_EXIT_REASON, vm_exit_reason),
775         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
776         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
777         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
778         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
779         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
780         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
781         FIELD(GUEST_ES_LIMIT, guest_es_limit),
782         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
783         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
784         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
785         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
786         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
787         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
788         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
789         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
790         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
791         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
792         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
793         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
794         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
795         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
796         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
797         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
798         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
799         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
800         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
801         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
802         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
803         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
804         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
805         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
806         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
807         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
808         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
809         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
810         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
811         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
812         FIELD(EXIT_QUALIFICATION, exit_qualification),
813         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
814         FIELD(GUEST_CR0, guest_cr0),
815         FIELD(GUEST_CR3, guest_cr3),
816         FIELD(GUEST_CR4, guest_cr4),
817         FIELD(GUEST_ES_BASE, guest_es_base),
818         FIELD(GUEST_CS_BASE, guest_cs_base),
819         FIELD(GUEST_SS_BASE, guest_ss_base),
820         FIELD(GUEST_DS_BASE, guest_ds_base),
821         FIELD(GUEST_FS_BASE, guest_fs_base),
822         FIELD(GUEST_GS_BASE, guest_gs_base),
823         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
824         FIELD(GUEST_TR_BASE, guest_tr_base),
825         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
826         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
827         FIELD(GUEST_DR7, guest_dr7),
828         FIELD(GUEST_RSP, guest_rsp),
829         FIELD(GUEST_RIP, guest_rip),
830         FIELD(GUEST_RFLAGS, guest_rflags),
831         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
832         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
833         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
834         FIELD(HOST_CR0, host_cr0),
835         FIELD(HOST_CR3, host_cr3),
836         FIELD(HOST_CR4, host_cr4),
837         FIELD(HOST_FS_BASE, host_fs_base),
838         FIELD(HOST_GS_BASE, host_gs_base),
839         FIELD(HOST_TR_BASE, host_tr_base),
840         FIELD(HOST_GDTR_BASE, host_gdtr_base),
841         FIELD(HOST_IDTR_BASE, host_idtr_base),
842         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
843         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
844         FIELD(HOST_RSP, host_rsp),
845         FIELD(HOST_RIP, host_rip),
846 };
847
848 static inline short vmcs_field_to_offset(unsigned long field)
849 {
850         BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
851
852         if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
853             vmcs_field_to_offset_table[field] == 0)
854                 return -ENOENT;
855
856         return vmcs_field_to_offset_table[field];
857 }
858
859 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
860 {
861         return to_vmx(vcpu)->nested.current_vmcs12;
862 }
863
864 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
865 {
866         struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
867         if (is_error_page(page))
868                 return NULL;
869
870         return page;
871 }
872
873 static void nested_release_page(struct page *page)
874 {
875         kvm_release_page_dirty(page);
876 }
877
878 static void nested_release_page_clean(struct page *page)
879 {
880         kvm_release_page_clean(page);
881 }
882
883 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
884 static u64 construct_eptp(unsigned long root_hpa);
885 static void kvm_cpu_vmxon(u64 addr);
886 static void kvm_cpu_vmxoff(void);
887 static bool vmx_xsaves_supported(void);
888 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
889 static void vmx_set_segment(struct kvm_vcpu *vcpu,
890                             struct kvm_segment *var, int seg);
891 static void vmx_get_segment(struct kvm_vcpu *vcpu,
892                             struct kvm_segment *var, int seg);
893 static bool guest_state_valid(struct kvm_vcpu *vcpu);
894 static u32 vmx_segment_access_rights(struct kvm_segment *var);
895 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
896 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
897 static int alloc_identity_pagetable(struct kvm *kvm);
898
899 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
900 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
901 /*
902  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
903  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
904  */
905 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
906 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
907
908 /*
909  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
910  * can find which vCPU should be waken up.
911  */
912 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
913 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
914
915 static unsigned long *vmx_io_bitmap_a;
916 static unsigned long *vmx_io_bitmap_b;
917 static unsigned long *vmx_msr_bitmap_legacy;
918 static unsigned long *vmx_msr_bitmap_longmode;
919 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
920 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
921 static unsigned long *vmx_msr_bitmap_nested;
922 static unsigned long *vmx_vmread_bitmap;
923 static unsigned long *vmx_vmwrite_bitmap;
924
925 static bool cpu_has_load_ia32_efer;
926 static bool cpu_has_load_perf_global_ctrl;
927
928 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
929 static DEFINE_SPINLOCK(vmx_vpid_lock);
930
931 static struct vmcs_config {
932         int size;
933         int order;
934         u32 revision_id;
935         u32 pin_based_exec_ctrl;
936         u32 cpu_based_exec_ctrl;
937         u32 cpu_based_2nd_exec_ctrl;
938         u32 vmexit_ctrl;
939         u32 vmentry_ctrl;
940 } vmcs_config;
941
942 static struct vmx_capability {
943         u32 ept;
944         u32 vpid;
945 } vmx_capability;
946
947 #define VMX_SEGMENT_FIELD(seg)                                  \
948         [VCPU_SREG_##seg] = {                                   \
949                 .selector = GUEST_##seg##_SELECTOR,             \
950                 .base = GUEST_##seg##_BASE,                     \
951                 .limit = GUEST_##seg##_LIMIT,                   \
952                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
953         }
954
955 static const struct kvm_vmx_segment_field {
956         unsigned selector;
957         unsigned base;
958         unsigned limit;
959         unsigned ar_bytes;
960 } kvm_vmx_segment_fields[] = {
961         VMX_SEGMENT_FIELD(CS),
962         VMX_SEGMENT_FIELD(DS),
963         VMX_SEGMENT_FIELD(ES),
964         VMX_SEGMENT_FIELD(FS),
965         VMX_SEGMENT_FIELD(GS),
966         VMX_SEGMENT_FIELD(SS),
967         VMX_SEGMENT_FIELD(TR),
968         VMX_SEGMENT_FIELD(LDTR),
969 };
970
971 static u64 host_efer;
972
973 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
974
975 /*
976  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
977  * away by decrementing the array size.
978  */
979 static const u32 vmx_msr_index[] = {
980 #ifdef CONFIG_X86_64
981         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
982 #endif
983         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
984 };
985
986 static inline bool is_exception_n(u32 intr_info, u8 vector)
987 {
988         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
989                              INTR_INFO_VALID_MASK)) ==
990                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
991 }
992
993 static inline bool is_debug(u32 intr_info)
994 {
995         return is_exception_n(intr_info, DB_VECTOR);
996 }
997
998 static inline bool is_breakpoint(u32 intr_info)
999 {
1000         return is_exception_n(intr_info, BP_VECTOR);
1001 }
1002
1003 static inline bool is_page_fault(u32 intr_info)
1004 {
1005         return is_exception_n(intr_info, PF_VECTOR);
1006 }
1007
1008 static inline bool is_no_device(u32 intr_info)
1009 {
1010         return is_exception_n(intr_info, NM_VECTOR);
1011 }
1012
1013 static inline bool is_invalid_opcode(u32 intr_info)
1014 {
1015         return is_exception_n(intr_info, UD_VECTOR);
1016 }
1017
1018 static inline bool is_external_interrupt(u32 intr_info)
1019 {
1020         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1021                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1022 }
1023
1024 static inline bool is_machine_check(u32 intr_info)
1025 {
1026         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1027                              INTR_INFO_VALID_MASK)) ==
1028                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1029 }
1030
1031 static inline bool cpu_has_vmx_msr_bitmap(void)
1032 {
1033         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1034 }
1035
1036 static inline bool cpu_has_vmx_tpr_shadow(void)
1037 {
1038         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1039 }
1040
1041 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1042 {
1043         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1044 }
1045
1046 static inline bool cpu_has_secondary_exec_ctrls(void)
1047 {
1048         return vmcs_config.cpu_based_exec_ctrl &
1049                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1050 }
1051
1052 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1053 {
1054         return vmcs_config.cpu_based_2nd_exec_ctrl &
1055                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1056 }
1057
1058 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1059 {
1060         return vmcs_config.cpu_based_2nd_exec_ctrl &
1061                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1062 }
1063
1064 static inline bool cpu_has_vmx_apic_register_virt(void)
1065 {
1066         return vmcs_config.cpu_based_2nd_exec_ctrl &
1067                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1068 }
1069
1070 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1071 {
1072         return vmcs_config.cpu_based_2nd_exec_ctrl &
1073                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1074 }
1075
1076 /*
1077  * Comment's format: document - errata name - stepping - processor name.
1078  * Refer from
1079  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1080  */
1081 static u32 vmx_preemption_cpu_tfms[] = {
1082 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1083 0x000206E6,
1084 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1085 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1086 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1087 0x00020652,
1088 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1089 0x00020655,
1090 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1091 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1092 /*
1093  * 320767.pdf - AAP86  - B1 -
1094  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1095  */
1096 0x000106E5,
1097 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1098 0x000106A0,
1099 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1100 0x000106A1,
1101 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1102 0x000106A4,
1103  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1104  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1105  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1106 0x000106A5,
1107 };
1108
1109 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1110 {
1111         u32 eax = cpuid_eax(0x00000001), i;
1112
1113         /* Clear the reserved bits */
1114         eax &= ~(0x3U << 14 | 0xfU << 28);
1115         for (i = 0; i < sizeof(vmx_preemption_cpu_tfms)/sizeof(u32); i++)
1116                 if (eax == vmx_preemption_cpu_tfms[i])
1117                         return true;
1118
1119         return false;
1120 }
1121
1122 static inline bool cpu_has_vmx_preemption_timer(void)
1123 {
1124         if (cpu_has_broken_vmx_preemption_timer())
1125                 return false;
1126
1127         return vmcs_config.pin_based_exec_ctrl &
1128                 PIN_BASED_VMX_PREEMPTION_TIMER;
1129 }
1130
1131 static inline bool cpu_has_vmx_posted_intr(void)
1132 {
1133         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1134                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1135 }
1136
1137 static inline bool cpu_has_vmx_apicv(void)
1138 {
1139         return cpu_has_vmx_apic_register_virt() &&
1140                 cpu_has_vmx_virtual_intr_delivery() &&
1141                 cpu_has_vmx_posted_intr();
1142 }
1143
1144 static inline bool cpu_has_vmx_flexpriority(void)
1145 {
1146         return cpu_has_vmx_tpr_shadow() &&
1147                 cpu_has_vmx_virtualize_apic_accesses();
1148 }
1149
1150 static inline bool cpu_has_vmx_ept_execute_only(void)
1151 {
1152         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1153 }
1154
1155 static inline bool cpu_has_vmx_ept_2m_page(void)
1156 {
1157         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1158 }
1159
1160 static inline bool cpu_has_vmx_ept_1g_page(void)
1161 {
1162         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1163 }
1164
1165 static inline bool cpu_has_vmx_ept_4levels(void)
1166 {
1167         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1168 }
1169
1170 static inline bool cpu_has_vmx_ept_ad_bits(void)
1171 {
1172         return vmx_capability.ept & VMX_EPT_AD_BIT;
1173 }
1174
1175 static inline bool cpu_has_vmx_invept_context(void)
1176 {
1177         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1178 }
1179
1180 static inline bool cpu_has_vmx_invept_global(void)
1181 {
1182         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1183 }
1184
1185 static inline bool cpu_has_vmx_invvpid_single(void)
1186 {
1187         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1188 }
1189
1190 static inline bool cpu_has_vmx_invvpid_global(void)
1191 {
1192         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1193 }
1194
1195 static inline bool cpu_has_vmx_ept(void)
1196 {
1197         return vmcs_config.cpu_based_2nd_exec_ctrl &
1198                 SECONDARY_EXEC_ENABLE_EPT;
1199 }
1200
1201 static inline bool cpu_has_vmx_unrestricted_guest(void)
1202 {
1203         return vmcs_config.cpu_based_2nd_exec_ctrl &
1204                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1205 }
1206
1207 static inline bool cpu_has_vmx_ple(void)
1208 {
1209         return vmcs_config.cpu_based_2nd_exec_ctrl &
1210                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1211 }
1212
1213 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1214 {
1215         return flexpriority_enabled && lapic_in_kernel(vcpu);
1216 }
1217
1218 static inline bool cpu_has_vmx_vpid(void)
1219 {
1220         return vmcs_config.cpu_based_2nd_exec_ctrl &
1221                 SECONDARY_EXEC_ENABLE_VPID;
1222 }
1223
1224 static inline bool cpu_has_vmx_rdtscp(void)
1225 {
1226         return vmcs_config.cpu_based_2nd_exec_ctrl &
1227                 SECONDARY_EXEC_RDTSCP;
1228 }
1229
1230 static inline bool cpu_has_vmx_invpcid(void)
1231 {
1232         return vmcs_config.cpu_based_2nd_exec_ctrl &
1233                 SECONDARY_EXEC_ENABLE_INVPCID;
1234 }
1235
1236 static inline bool cpu_has_virtual_nmis(void)
1237 {
1238         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1239 }
1240
1241 static inline bool cpu_has_vmx_wbinvd_exit(void)
1242 {
1243         return vmcs_config.cpu_based_2nd_exec_ctrl &
1244                 SECONDARY_EXEC_WBINVD_EXITING;
1245 }
1246
1247 static inline bool cpu_has_vmx_shadow_vmcs(void)
1248 {
1249         u64 vmx_msr;
1250         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1251         /* check if the cpu supports writing r/o exit information fields */
1252         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1253                 return false;
1254
1255         return vmcs_config.cpu_based_2nd_exec_ctrl &
1256                 SECONDARY_EXEC_SHADOW_VMCS;
1257 }
1258
1259 static inline bool cpu_has_vmx_pml(void)
1260 {
1261         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1262 }
1263
1264 static inline bool cpu_has_vmx_tsc_scaling(void)
1265 {
1266         return vmcs_config.cpu_based_2nd_exec_ctrl &
1267                 SECONDARY_EXEC_TSC_SCALING;
1268 }
1269
1270 static inline bool report_flexpriority(void)
1271 {
1272         return flexpriority_enabled;
1273 }
1274
1275 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1276 {
1277         return vmcs12->cpu_based_vm_exec_control & bit;
1278 }
1279
1280 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1281 {
1282         return (vmcs12->cpu_based_vm_exec_control &
1283                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1284                 (vmcs12->secondary_vm_exec_control & bit);
1285 }
1286
1287 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1288 {
1289         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1290 }
1291
1292 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1293 {
1294         return vmcs12->pin_based_vm_exec_control &
1295                 PIN_BASED_VMX_PREEMPTION_TIMER;
1296 }
1297
1298 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1299 {
1300         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1301 }
1302
1303 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1304 {
1305         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) &&
1306                 vmx_xsaves_supported();
1307 }
1308
1309 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1310 {
1311         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1312 }
1313
1314 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1315 {
1316         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1317 }
1318
1319 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1320 {
1321         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1322 }
1323
1324 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1325 {
1326         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1327 }
1328
1329 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1330 {
1331         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1332 }
1333
1334 static inline bool is_exception(u32 intr_info)
1335 {
1336         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1337                 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1338 }
1339
1340 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1341                               u32 exit_intr_info,
1342                               unsigned long exit_qualification);
1343 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1344                         struct vmcs12 *vmcs12,
1345                         u32 reason, unsigned long qualification);
1346
1347 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1348 {
1349         int i;
1350
1351         for (i = 0; i < vmx->nmsrs; ++i)
1352                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1353                         return i;
1354         return -1;
1355 }
1356
1357 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1358 {
1359     struct {
1360         u64 vpid : 16;
1361         u64 rsvd : 48;
1362         u64 gva;
1363     } operand = { vpid, 0, gva };
1364
1365     asm volatile (__ex(ASM_VMX_INVVPID)
1366                   /* CF==1 or ZF==1 --> rc = -1 */
1367                   "; ja 1f ; ud2 ; 1:"
1368                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1369 }
1370
1371 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1372 {
1373         struct {
1374                 u64 eptp, gpa;
1375         } operand = {eptp, gpa};
1376
1377         asm volatile (__ex(ASM_VMX_INVEPT)
1378                         /* CF==1 or ZF==1 --> rc = -1 */
1379                         "; ja 1f ; ud2 ; 1:\n"
1380                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1381 }
1382
1383 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1384 {
1385         int i;
1386
1387         i = __find_msr_index(vmx, msr);
1388         if (i >= 0)
1389                 return &vmx->guest_msrs[i];
1390         return NULL;
1391 }
1392
1393 static void vmcs_clear(struct vmcs *vmcs)
1394 {
1395         u64 phys_addr = __pa(vmcs);
1396         u8 error;
1397
1398         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1399                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1400                       : "cc", "memory");
1401         if (error)
1402                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1403                        vmcs, phys_addr);
1404 }
1405
1406 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1407 {
1408         vmcs_clear(loaded_vmcs->vmcs);
1409         loaded_vmcs->cpu = -1;
1410         loaded_vmcs->launched = 0;
1411 }
1412
1413 static void vmcs_load(struct vmcs *vmcs)
1414 {
1415         u64 phys_addr = __pa(vmcs);
1416         u8 error;
1417
1418         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1419                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1420                         : "cc", "memory");
1421         if (error)
1422                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1423                        vmcs, phys_addr);
1424 }
1425
1426 #ifdef CONFIG_KEXEC_CORE
1427 /*
1428  * This bitmap is used to indicate whether the vmclear
1429  * operation is enabled on all cpus. All disabled by
1430  * default.
1431  */
1432 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1433
1434 static inline void crash_enable_local_vmclear(int cpu)
1435 {
1436         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1437 }
1438
1439 static inline void crash_disable_local_vmclear(int cpu)
1440 {
1441         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1442 }
1443
1444 static inline int crash_local_vmclear_enabled(int cpu)
1445 {
1446         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1447 }
1448
1449 static void crash_vmclear_local_loaded_vmcss(void)
1450 {
1451         int cpu = raw_smp_processor_id();
1452         struct loaded_vmcs *v;
1453
1454         if (!crash_local_vmclear_enabled(cpu))
1455                 return;
1456
1457         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1458                             loaded_vmcss_on_cpu_link)
1459                 vmcs_clear(v->vmcs);
1460 }
1461 #else
1462 static inline void crash_enable_local_vmclear(int cpu) { }
1463 static inline void crash_disable_local_vmclear(int cpu) { }
1464 #endif /* CONFIG_KEXEC_CORE */
1465
1466 static void __loaded_vmcs_clear(void *arg)
1467 {
1468         struct loaded_vmcs *loaded_vmcs = arg;
1469         int cpu = raw_smp_processor_id();
1470
1471         if (loaded_vmcs->cpu != cpu)
1472                 return; /* vcpu migration can race with cpu offline */
1473         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1474                 per_cpu(current_vmcs, cpu) = NULL;
1475         crash_disable_local_vmclear(cpu);
1476         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1477
1478         /*
1479          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1480          * is before setting loaded_vmcs->vcpu to -1 which is done in
1481          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1482          * then adds the vmcs into percpu list before it is deleted.
1483          */
1484         smp_wmb();
1485
1486         loaded_vmcs_init(loaded_vmcs);
1487         crash_enable_local_vmclear(cpu);
1488 }
1489
1490 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1491 {
1492         int cpu = loaded_vmcs->cpu;
1493
1494         if (cpu != -1)
1495                 smp_call_function_single(cpu,
1496                          __loaded_vmcs_clear, loaded_vmcs, 1);
1497 }
1498
1499 static inline void vpid_sync_vcpu_single(int vpid)
1500 {
1501         if (vpid == 0)
1502                 return;
1503
1504         if (cpu_has_vmx_invvpid_single())
1505                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1506 }
1507
1508 static inline void vpid_sync_vcpu_global(void)
1509 {
1510         if (cpu_has_vmx_invvpid_global())
1511                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1512 }
1513
1514 static inline void vpid_sync_context(int vpid)
1515 {
1516         if (cpu_has_vmx_invvpid_single())
1517                 vpid_sync_vcpu_single(vpid);
1518         else
1519                 vpid_sync_vcpu_global();
1520 }
1521
1522 static inline void ept_sync_global(void)
1523 {
1524         if (cpu_has_vmx_invept_global())
1525                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1526 }
1527
1528 static inline void ept_sync_context(u64 eptp)
1529 {
1530         if (enable_ept) {
1531                 if (cpu_has_vmx_invept_context())
1532                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1533                 else
1534                         ept_sync_global();
1535         }
1536 }
1537
1538 static __always_inline void vmcs_check16(unsigned long field)
1539 {
1540         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1541                          "16-bit accessor invalid for 64-bit field");
1542         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1543                          "16-bit accessor invalid for 64-bit high field");
1544         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1545                          "16-bit accessor invalid for 32-bit high field");
1546         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1547                          "16-bit accessor invalid for natural width field");
1548 }
1549
1550 static __always_inline void vmcs_check32(unsigned long field)
1551 {
1552         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1553                          "32-bit accessor invalid for 16-bit field");
1554         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1555                          "32-bit accessor invalid for natural width field");
1556 }
1557
1558 static __always_inline void vmcs_check64(unsigned long field)
1559 {
1560         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1561                          "64-bit accessor invalid for 16-bit field");
1562         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1563                          "64-bit accessor invalid for 64-bit high field");
1564         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1565                          "64-bit accessor invalid for 32-bit field");
1566         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1567                          "64-bit accessor invalid for natural width field");
1568 }
1569
1570 static __always_inline void vmcs_checkl(unsigned long field)
1571 {
1572         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1573                          "Natural width accessor invalid for 16-bit field");
1574         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1575                          "Natural width accessor invalid for 64-bit field");
1576         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1577                          "Natural width accessor invalid for 64-bit high field");
1578         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1579                          "Natural width accessor invalid for 32-bit field");
1580 }
1581
1582 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1583 {
1584         unsigned long value;
1585
1586         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1587                       : "=a"(value) : "d"(field) : "cc");
1588         return value;
1589 }
1590
1591 static __always_inline u16 vmcs_read16(unsigned long field)
1592 {
1593         vmcs_check16(field);
1594         return __vmcs_readl(field);
1595 }
1596
1597 static __always_inline u32 vmcs_read32(unsigned long field)
1598 {
1599         vmcs_check32(field);
1600         return __vmcs_readl(field);
1601 }
1602
1603 static __always_inline u64 vmcs_read64(unsigned long field)
1604 {
1605         vmcs_check64(field);
1606 #ifdef CONFIG_X86_64
1607         return __vmcs_readl(field);
1608 #else
1609         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1610 #endif
1611 }
1612
1613 static __always_inline unsigned long vmcs_readl(unsigned long field)
1614 {
1615         vmcs_checkl(field);
1616         return __vmcs_readl(field);
1617 }
1618
1619 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1620 {
1621         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1622                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1623         dump_stack();
1624 }
1625
1626 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1627 {
1628         u8 error;
1629
1630         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1631                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1632         if (unlikely(error))
1633                 vmwrite_error(field, value);
1634 }
1635
1636 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1637 {
1638         vmcs_check16(field);
1639         __vmcs_writel(field, value);
1640 }
1641
1642 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1643 {
1644         vmcs_check32(field);
1645         __vmcs_writel(field, value);
1646 }
1647
1648 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1649 {
1650         vmcs_check64(field);
1651         __vmcs_writel(field, value);
1652 #ifndef CONFIG_X86_64
1653         asm volatile ("");
1654         __vmcs_writel(field+1, value >> 32);
1655 #endif
1656 }
1657
1658 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1659 {
1660         vmcs_checkl(field);
1661         __vmcs_writel(field, value);
1662 }
1663
1664 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1665 {
1666         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1667                          "vmcs_clear_bits does not support 64-bit fields");
1668         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1669 }
1670
1671 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1672 {
1673         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1674                          "vmcs_set_bits does not support 64-bit fields");
1675         __vmcs_writel(field, __vmcs_readl(field) | mask);
1676 }
1677
1678 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1679 {
1680         vmcs_write32(VM_ENTRY_CONTROLS, val);
1681         vmx->vm_entry_controls_shadow = val;
1682 }
1683
1684 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1685 {
1686         if (vmx->vm_entry_controls_shadow != val)
1687                 vm_entry_controls_init(vmx, val);
1688 }
1689
1690 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1691 {
1692         return vmx->vm_entry_controls_shadow;
1693 }
1694
1695
1696 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1697 {
1698         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1699 }
1700
1701 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1702 {
1703         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1704 }
1705
1706 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1707 {
1708         vmcs_write32(VM_EXIT_CONTROLS, val);
1709         vmx->vm_exit_controls_shadow = val;
1710 }
1711
1712 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1713 {
1714         if (vmx->vm_exit_controls_shadow != val)
1715                 vm_exit_controls_init(vmx, val);
1716 }
1717
1718 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1719 {
1720         return vmx->vm_exit_controls_shadow;
1721 }
1722
1723
1724 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1725 {
1726         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1727 }
1728
1729 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1730 {
1731         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1732 }
1733
1734 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1735 {
1736         vmx->segment_cache.bitmask = 0;
1737 }
1738
1739 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1740                                        unsigned field)
1741 {
1742         bool ret;
1743         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1744
1745         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1746                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1747                 vmx->segment_cache.bitmask = 0;
1748         }
1749         ret = vmx->segment_cache.bitmask & mask;
1750         vmx->segment_cache.bitmask |= mask;
1751         return ret;
1752 }
1753
1754 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1755 {
1756         u16 *p = &vmx->segment_cache.seg[seg].selector;
1757
1758         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1759                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1760         return *p;
1761 }
1762
1763 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1764 {
1765         ulong *p = &vmx->segment_cache.seg[seg].base;
1766
1767         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1768                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1769         return *p;
1770 }
1771
1772 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1773 {
1774         u32 *p = &vmx->segment_cache.seg[seg].limit;
1775
1776         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1777                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1778         return *p;
1779 }
1780
1781 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1782 {
1783         u32 *p = &vmx->segment_cache.seg[seg].ar;
1784
1785         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1786                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1787         return *p;
1788 }
1789
1790 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1791 {
1792         u32 eb;
1793
1794         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1795              (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
1796         if ((vcpu->guest_debug &
1797              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1798             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1799                 eb |= 1u << BP_VECTOR;
1800         if (to_vmx(vcpu)->rmode.vm86_active)
1801                 eb = ~0;
1802         if (enable_ept)
1803                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1804         if (vcpu->fpu_active)
1805                 eb &= ~(1u << NM_VECTOR);
1806
1807         /* When we are running a nested L2 guest and L1 specified for it a
1808          * certain exception bitmap, we must trap the same exceptions and pass
1809          * them to L1. When running L2, we will only handle the exceptions
1810          * specified above if L1 did not want them.
1811          */
1812         if (is_guest_mode(vcpu))
1813                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1814
1815         vmcs_write32(EXCEPTION_BITMAP, eb);
1816 }
1817
1818 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1819                 unsigned long entry, unsigned long exit)
1820 {
1821         vm_entry_controls_clearbit(vmx, entry);
1822         vm_exit_controls_clearbit(vmx, exit);
1823 }
1824
1825 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1826 {
1827         unsigned i;
1828         struct msr_autoload *m = &vmx->msr_autoload;
1829
1830         switch (msr) {
1831         case MSR_EFER:
1832                 if (cpu_has_load_ia32_efer) {
1833                         clear_atomic_switch_msr_special(vmx,
1834                                         VM_ENTRY_LOAD_IA32_EFER,
1835                                         VM_EXIT_LOAD_IA32_EFER);
1836                         return;
1837                 }
1838                 break;
1839         case MSR_CORE_PERF_GLOBAL_CTRL:
1840                 if (cpu_has_load_perf_global_ctrl) {
1841                         clear_atomic_switch_msr_special(vmx,
1842                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1843                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1844                         return;
1845                 }
1846                 break;
1847         }
1848
1849         for (i = 0; i < m->nr; ++i)
1850                 if (m->guest[i].index == msr)
1851                         break;
1852
1853         if (i == m->nr)
1854                 return;
1855         --m->nr;
1856         m->guest[i] = m->guest[m->nr];
1857         m->host[i] = m->host[m->nr];
1858         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1859         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1860 }
1861
1862 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1863                 unsigned long entry, unsigned long exit,
1864                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1865                 u64 guest_val, u64 host_val)
1866 {
1867         vmcs_write64(guest_val_vmcs, guest_val);
1868         vmcs_write64(host_val_vmcs, host_val);
1869         vm_entry_controls_setbit(vmx, entry);
1870         vm_exit_controls_setbit(vmx, exit);
1871 }
1872
1873 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1874                                   u64 guest_val, u64 host_val)
1875 {
1876         unsigned i;
1877         struct msr_autoload *m = &vmx->msr_autoload;
1878
1879         switch (msr) {
1880         case MSR_EFER:
1881                 if (cpu_has_load_ia32_efer) {
1882                         add_atomic_switch_msr_special(vmx,
1883                                         VM_ENTRY_LOAD_IA32_EFER,
1884                                         VM_EXIT_LOAD_IA32_EFER,
1885                                         GUEST_IA32_EFER,
1886                                         HOST_IA32_EFER,
1887                                         guest_val, host_val);
1888                         return;
1889                 }
1890                 break;
1891         case MSR_CORE_PERF_GLOBAL_CTRL:
1892                 if (cpu_has_load_perf_global_ctrl) {
1893                         add_atomic_switch_msr_special(vmx,
1894                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1895                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1896                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1897                                         HOST_IA32_PERF_GLOBAL_CTRL,
1898                                         guest_val, host_val);
1899                         return;
1900                 }
1901                 break;
1902         case MSR_IA32_PEBS_ENABLE:
1903                 /* PEBS needs a quiescent period after being disabled (to write
1904                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
1905                  * provide that period, so a CPU could write host's record into
1906                  * guest's memory.
1907                  */
1908                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1909         }
1910
1911         for (i = 0; i < m->nr; ++i)
1912                 if (m->guest[i].index == msr)
1913                         break;
1914
1915         if (i == NR_AUTOLOAD_MSRS) {
1916                 printk_once(KERN_WARNING "Not enough msr switch entries. "
1917                                 "Can't add msr %x\n", msr);
1918                 return;
1919         } else if (i == m->nr) {
1920                 ++m->nr;
1921                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1922                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1923         }
1924
1925         m->guest[i].index = msr;
1926         m->guest[i].value = guest_val;
1927         m->host[i].index = msr;
1928         m->host[i].value = host_val;
1929 }
1930
1931 static void reload_tss(void)
1932 {
1933         /*
1934          * VT restores TR but not its size.  Useless.
1935          */
1936         struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1937         struct desc_struct *descs;
1938
1939         descs = (void *)gdt->address;
1940         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1941         load_TR_desc();
1942 }
1943
1944 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1945 {
1946         u64 guest_efer = vmx->vcpu.arch.efer;
1947         u64 ignore_bits = 0;
1948
1949         if (!enable_ept) {
1950                 /*
1951                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
1952                  * host CPUID is more efficient than testing guest CPUID
1953                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
1954                  */
1955                 if (boot_cpu_has(X86_FEATURE_SMEP))
1956                         guest_efer |= EFER_NX;
1957                 else if (!(guest_efer & EFER_NX))
1958                         ignore_bits |= EFER_NX;
1959         }
1960
1961         /*
1962          * LMA and LME handled by hardware; SCE meaningless outside long mode.
1963          */
1964         ignore_bits |= EFER_SCE;
1965 #ifdef CONFIG_X86_64
1966         ignore_bits |= EFER_LMA | EFER_LME;
1967         /* SCE is meaningful only in long mode on Intel */
1968         if (guest_efer & EFER_LMA)
1969                 ignore_bits &= ~(u64)EFER_SCE;
1970 #endif
1971
1972         clear_atomic_switch_msr(vmx, MSR_EFER);
1973
1974         /*
1975          * On EPT, we can't emulate NX, so we must switch EFER atomically.
1976          * On CPUs that support "load IA32_EFER", always switch EFER
1977          * atomically, since it's faster than switching it manually.
1978          */
1979         if (cpu_has_load_ia32_efer ||
1980             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1981                 if (!(guest_efer & EFER_LMA))
1982                         guest_efer &= ~EFER_LME;
1983                 if (guest_efer != host_efer)
1984                         add_atomic_switch_msr(vmx, MSR_EFER,
1985                                               guest_efer, host_efer);
1986                 return false;
1987         } else {
1988                 guest_efer &= ~ignore_bits;
1989                 guest_efer |= host_efer & ignore_bits;
1990
1991                 vmx->guest_msrs[efer_offset].data = guest_efer;
1992                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1993
1994                 return true;
1995         }
1996 }
1997
1998 static unsigned long segment_base(u16 selector)
1999 {
2000         struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
2001         struct desc_struct *d;
2002         unsigned long table_base;
2003         unsigned long v;
2004
2005         if (!(selector & ~3))
2006                 return 0;
2007
2008         table_base = gdt->address;
2009
2010         if (selector & 4) {           /* from ldt */
2011                 u16 ldt_selector = kvm_read_ldt();
2012
2013                 if (!(ldt_selector & ~3))
2014                         return 0;
2015
2016                 table_base = segment_base(ldt_selector);
2017         }
2018         d = (struct desc_struct *)(table_base + (selector & ~7));
2019         v = get_desc_base(d);
2020 #ifdef CONFIG_X86_64
2021        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
2022                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
2023 #endif
2024         return v;
2025 }
2026
2027 static inline unsigned long kvm_read_tr_base(void)
2028 {
2029         u16 tr;
2030         asm("str %0" : "=g"(tr));
2031         return segment_base(tr);
2032 }
2033
2034 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2035 {
2036         struct vcpu_vmx *vmx = to_vmx(vcpu);
2037         int i;
2038
2039         if (vmx->host_state.loaded)
2040                 return;
2041
2042         vmx->host_state.loaded = 1;
2043         /*
2044          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2045          * allow segment selectors with cpl > 0 or ti == 1.
2046          */
2047         vmx->host_state.ldt_sel = kvm_read_ldt();
2048         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2049         savesegment(fs, vmx->host_state.fs_sel);
2050         if (!(vmx->host_state.fs_sel & 7)) {
2051                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2052                 vmx->host_state.fs_reload_needed = 0;
2053         } else {
2054                 vmcs_write16(HOST_FS_SELECTOR, 0);
2055                 vmx->host_state.fs_reload_needed = 1;
2056         }
2057         savesegment(gs, vmx->host_state.gs_sel);
2058         if (!(vmx->host_state.gs_sel & 7))
2059                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2060         else {
2061                 vmcs_write16(HOST_GS_SELECTOR, 0);
2062                 vmx->host_state.gs_ldt_reload_needed = 1;
2063         }
2064
2065 #ifdef CONFIG_X86_64
2066         savesegment(ds, vmx->host_state.ds_sel);
2067         savesegment(es, vmx->host_state.es_sel);
2068 #endif
2069
2070 #ifdef CONFIG_X86_64
2071         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2072         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2073 #else
2074         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2075         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2076 #endif
2077
2078 #ifdef CONFIG_X86_64
2079         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2080         if (is_long_mode(&vmx->vcpu))
2081                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2082 #endif
2083         if (boot_cpu_has(X86_FEATURE_MPX))
2084                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2085         for (i = 0; i < vmx->save_nmsrs; ++i)
2086                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2087                                    vmx->guest_msrs[i].data,
2088                                    vmx->guest_msrs[i].mask);
2089 }
2090
2091 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2092 {
2093         if (!vmx->host_state.loaded)
2094                 return;
2095
2096         ++vmx->vcpu.stat.host_state_reload;
2097         vmx->host_state.loaded = 0;
2098 #ifdef CONFIG_X86_64
2099         if (is_long_mode(&vmx->vcpu))
2100                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2101 #endif
2102         if (vmx->host_state.gs_ldt_reload_needed) {
2103                 kvm_load_ldt(vmx->host_state.ldt_sel);
2104 #ifdef CONFIG_X86_64
2105                 load_gs_index(vmx->host_state.gs_sel);
2106 #else
2107                 loadsegment(gs, vmx->host_state.gs_sel);
2108 #endif
2109         }
2110         if (vmx->host_state.fs_reload_needed)
2111                 loadsegment(fs, vmx->host_state.fs_sel);
2112 #ifdef CONFIG_X86_64
2113         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2114                 loadsegment(ds, vmx->host_state.ds_sel);
2115                 loadsegment(es, vmx->host_state.es_sel);
2116         }
2117 #endif
2118         reload_tss();
2119 #ifdef CONFIG_X86_64
2120         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2121 #endif
2122         if (vmx->host_state.msr_host_bndcfgs)
2123                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2124         /*
2125          * If the FPU is not active (through the host task or
2126          * the guest vcpu), then restore the cr0.TS bit.
2127          */
2128         if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
2129                 stts();
2130         load_gdt(this_cpu_ptr(&host_gdt));
2131 }
2132
2133 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2134 {
2135         preempt_disable();
2136         __vmx_load_host_state(vmx);
2137         preempt_enable();
2138 }
2139
2140 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2141 {
2142         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2143         struct pi_desc old, new;
2144         unsigned int dest;
2145
2146         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2147                 !irq_remapping_cap(IRQ_POSTING_CAP))
2148                 return;
2149
2150         do {
2151                 old.control = new.control = pi_desc->control;
2152
2153                 /*
2154                  * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
2155                  * are two possible cases:
2156                  * 1. After running 'pre_block', context switch
2157                  *    happened. For this case, 'sn' was set in
2158                  *    vmx_vcpu_put(), so we need to clear it here.
2159                  * 2. After running 'pre_block', we were blocked,
2160                  *    and woken up by some other guy. For this case,
2161                  *    we don't need to do anything, 'pi_post_block'
2162                  *    will do everything for us. However, we cannot
2163                  *    check whether it is case #1 or case #2 here
2164                  *    (maybe, not needed), so we also clear sn here,
2165                  *    I think it is not a big deal.
2166                  */
2167                 if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
2168                         if (vcpu->cpu != cpu) {
2169                                 dest = cpu_physical_id(cpu);
2170
2171                                 if (x2apic_enabled())
2172                                         new.ndst = dest;
2173                                 else
2174                                         new.ndst = (dest << 8) & 0xFF00;
2175                         }
2176
2177                         /* set 'NV' to 'notification vector' */
2178                         new.nv = POSTED_INTR_VECTOR;
2179                 }
2180
2181                 /* Allow posting non-urgent interrupts */
2182                 new.sn = 0;
2183         } while (cmpxchg(&pi_desc->control, old.control,
2184                         new.control) != old.control);
2185 }
2186
2187 /*
2188  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2189  * vcpu mutex is already taken.
2190  */
2191 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2192 {
2193         struct vcpu_vmx *vmx = to_vmx(vcpu);
2194         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2195
2196         if (!vmm_exclusive)
2197                 kvm_cpu_vmxon(phys_addr);
2198         else if (vmx->loaded_vmcs->cpu != cpu)
2199                 loaded_vmcs_clear(vmx->loaded_vmcs);
2200
2201         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2202                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2203                 vmcs_load(vmx->loaded_vmcs->vmcs);
2204         }
2205
2206         if (vmx->loaded_vmcs->cpu != cpu) {
2207                 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
2208                 unsigned long sysenter_esp;
2209
2210                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2211                 local_irq_disable();
2212                 crash_disable_local_vmclear(cpu);
2213
2214                 /*
2215                  * Read loaded_vmcs->cpu should be before fetching
2216                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2217                  * See the comments in __loaded_vmcs_clear().
2218                  */
2219                 smp_rmb();
2220
2221                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2222                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2223                 crash_enable_local_vmclear(cpu);
2224                 local_irq_enable();
2225
2226                 /*
2227                  * Linux uses per-cpu TSS and GDT, so set these when switching
2228                  * processors.
2229                  */
2230                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
2231                 vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
2232
2233                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2234                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2235
2236                 vmx->loaded_vmcs->cpu = cpu;
2237         }
2238
2239         /* Setup TSC multiplier */
2240         if (kvm_has_tsc_control &&
2241             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
2242                 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2243                 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2244         }
2245
2246         vmx_vcpu_pi_load(vcpu, cpu);
2247         vmx->host_pkru = read_pkru();
2248 }
2249
2250 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2251 {
2252         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2253
2254         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2255                 !irq_remapping_cap(IRQ_POSTING_CAP))
2256                 return;
2257
2258         /* Set SN when the vCPU is preempted */
2259         if (vcpu->preempted)
2260                 pi_set_sn(pi_desc);
2261 }
2262
2263 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2264 {
2265         vmx_vcpu_pi_put(vcpu);
2266
2267         __vmx_load_host_state(to_vmx(vcpu));
2268         if (!vmm_exclusive) {
2269                 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
2270                 vcpu->cpu = -1;
2271                 kvm_cpu_vmxoff();
2272         }
2273 }
2274
2275 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
2276 {
2277         ulong cr0;
2278
2279         if (vcpu->fpu_active)
2280                 return;
2281         vcpu->fpu_active = 1;
2282         cr0 = vmcs_readl(GUEST_CR0);
2283         cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
2284         cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
2285         vmcs_writel(GUEST_CR0, cr0);
2286         update_exception_bitmap(vcpu);
2287         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
2288         if (is_guest_mode(vcpu))
2289                 vcpu->arch.cr0_guest_owned_bits &=
2290                         ~get_vmcs12(vcpu)->cr0_guest_host_mask;
2291         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2292 }
2293
2294 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2295
2296 /*
2297  * Return the cr0 value that a nested guest would read. This is a combination
2298  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2299  * its hypervisor (cr0_read_shadow).
2300  */
2301 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2302 {
2303         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2304                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2305 }
2306 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2307 {
2308         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2309                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2310 }
2311
2312 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
2313 {
2314         /* Note that there is no vcpu->fpu_active = 0 here. The caller must
2315          * set this *before* calling this function.
2316          */
2317         vmx_decache_cr0_guest_bits(vcpu);
2318         vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
2319         update_exception_bitmap(vcpu);
2320         vcpu->arch.cr0_guest_owned_bits = 0;
2321         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2322         if (is_guest_mode(vcpu)) {
2323                 /*
2324                  * L1's specified read shadow might not contain the TS bit,
2325                  * so now that we turned on shadowing of this bit, we need to
2326                  * set this bit of the shadow. Like in nested_vmx_run we need
2327                  * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
2328                  * up-to-date here because we just decached cr0.TS (and we'll
2329                  * only update vmcs12->guest_cr0 on nested exit).
2330                  */
2331                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2332                 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
2333                         (vcpu->arch.cr0 & X86_CR0_TS);
2334                 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2335         } else
2336                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2337 }
2338
2339 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2340 {
2341         unsigned long rflags, save_rflags;
2342
2343         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2344                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2345                 rflags = vmcs_readl(GUEST_RFLAGS);
2346                 if (to_vmx(vcpu)->rmode.vm86_active) {
2347                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2348                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2349                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2350                 }
2351                 to_vmx(vcpu)->rflags = rflags;
2352         }
2353         return to_vmx(vcpu)->rflags;
2354 }
2355
2356 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2357 {
2358         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2359         to_vmx(vcpu)->rflags = rflags;
2360         if (to_vmx(vcpu)->rmode.vm86_active) {
2361                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2362                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2363         }
2364         vmcs_writel(GUEST_RFLAGS, rflags);
2365 }
2366
2367 static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
2368 {
2369         return to_vmx(vcpu)->guest_pkru;
2370 }
2371
2372 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2373 {
2374         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2375         int ret = 0;
2376
2377         if (interruptibility & GUEST_INTR_STATE_STI)
2378                 ret |= KVM_X86_SHADOW_INT_STI;
2379         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2380                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2381
2382         return ret;
2383 }
2384
2385 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2386 {
2387         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2388         u32 interruptibility = interruptibility_old;
2389
2390         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2391
2392         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2393                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2394         else if (mask & KVM_X86_SHADOW_INT_STI)
2395                 interruptibility |= GUEST_INTR_STATE_STI;
2396
2397         if ((interruptibility != interruptibility_old))
2398                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2399 }
2400
2401 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2402 {
2403         unsigned long rip;
2404
2405         rip = kvm_rip_read(vcpu);
2406         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2407         kvm_rip_write(vcpu, rip);
2408
2409         /* skipping an emulated instruction also counts */
2410         vmx_set_interrupt_shadow(vcpu, 0);
2411 }
2412
2413 /*
2414  * KVM wants to inject page-faults which it got to the guest. This function
2415  * checks whether in a nested guest, we need to inject them to L1 or L2.
2416  */
2417 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2418 {
2419         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2420
2421         if (!(vmcs12->exception_bitmap & (1u << nr)))
2422                 return 0;
2423
2424         nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
2425                           vmcs_read32(VM_EXIT_INTR_INFO),
2426                           vmcs_readl(EXIT_QUALIFICATION));
2427         return 1;
2428 }
2429
2430 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
2431                                 bool has_error_code, u32 error_code,
2432                                 bool reinject)
2433 {
2434         struct vcpu_vmx *vmx = to_vmx(vcpu);
2435         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2436
2437         if (!reinject && is_guest_mode(vcpu) &&
2438             nested_vmx_check_exception(vcpu, nr))
2439                 return;
2440
2441         if (has_error_code) {
2442                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2443                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2444         }
2445
2446         if (vmx->rmode.vm86_active) {
2447                 int inc_eip = 0;
2448                 if (kvm_exception_is_soft(nr))
2449                         inc_eip = vcpu->arch.event_exit_inst_len;
2450                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2451                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2452                 return;
2453         }
2454
2455         if (kvm_exception_is_soft(nr)) {
2456                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2457                              vmx->vcpu.arch.event_exit_inst_len);
2458                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2459         } else
2460                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2461
2462         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2463 }
2464
2465 static bool vmx_rdtscp_supported(void)
2466 {
2467         return cpu_has_vmx_rdtscp();
2468 }
2469
2470 static bool vmx_invpcid_supported(void)
2471 {
2472         return cpu_has_vmx_invpcid() && enable_ept;
2473 }
2474
2475 /*
2476  * Swap MSR entry in host/guest MSR entry array.
2477  */
2478 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2479 {
2480         struct shared_msr_entry tmp;
2481
2482         tmp = vmx->guest_msrs[to];
2483         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2484         vmx->guest_msrs[from] = tmp;
2485 }
2486
2487 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2488 {
2489         unsigned long *msr_bitmap;
2490
2491         if (is_guest_mode(vcpu))
2492                 msr_bitmap = vmx_msr_bitmap_nested;
2493         else if (cpu_has_secondary_exec_ctrls() &&
2494                  (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2495                   SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
2496                 if (is_long_mode(vcpu))
2497                         msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2498                 else
2499                         msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2500         } else {
2501                 if (is_long_mode(vcpu))
2502                         msr_bitmap = vmx_msr_bitmap_longmode;
2503                 else
2504                         msr_bitmap = vmx_msr_bitmap_legacy;
2505         }
2506
2507         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2508 }
2509
2510 /*
2511  * Set up the vmcs to automatically save and restore system
2512  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2513  * mode, as fiddling with msrs is very expensive.
2514  */
2515 static void setup_msrs(struct vcpu_vmx *vmx)
2516 {
2517         int save_nmsrs, index;
2518
2519         save_nmsrs = 0;
2520 #ifdef CONFIG_X86_64
2521         if (is_long_mode(&vmx->vcpu)) {
2522                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2523                 if (index >= 0)
2524                         move_msr_up(vmx, index, save_nmsrs++);
2525                 index = __find_msr_index(vmx, MSR_LSTAR);
2526                 if (index >= 0)
2527                         move_msr_up(vmx, index, save_nmsrs++);
2528                 index = __find_msr_index(vmx, MSR_CSTAR);
2529                 if (index >= 0)
2530                         move_msr_up(vmx, index, save_nmsrs++);
2531                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2532                 if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
2533                         move_msr_up(vmx, index, save_nmsrs++);
2534                 /*
2535                  * MSR_STAR is only needed on long mode guests, and only
2536                  * if efer.sce is enabled.
2537                  */
2538                 index = __find_msr_index(vmx, MSR_STAR);
2539                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2540                         move_msr_up(vmx, index, save_nmsrs++);
2541         }
2542 #endif
2543         index = __find_msr_index(vmx, MSR_EFER);
2544         if (index >= 0 && update_transition_efer(vmx, index))
2545                 move_msr_up(vmx, index, save_nmsrs++);
2546
2547         vmx->save_nmsrs = save_nmsrs;
2548
2549         if (cpu_has_vmx_msr_bitmap())
2550                 vmx_set_msr_bitmap(&vmx->vcpu);
2551 }
2552
2553 /*
2554  * reads and returns guest's timestamp counter "register"
2555  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2556  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2557  */
2558 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2559 {
2560         u64 host_tsc, tsc_offset;
2561
2562         host_tsc = rdtsc();
2563         tsc_offset = vmcs_read64(TSC_OFFSET);
2564         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2565 }
2566
2567 /*
2568  * Like guest_read_tsc, but always returns L1's notion of the timestamp
2569  * counter, even if a nested guest (L2) is currently running.
2570  */
2571 static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2572 {
2573         u64 tsc_offset;
2574
2575         tsc_offset = is_guest_mode(vcpu) ?
2576                 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2577                 vmcs_read64(TSC_OFFSET);
2578         return host_tsc + tsc_offset;
2579 }
2580
2581 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2582 {
2583         return vmcs_read64(TSC_OFFSET);
2584 }
2585
2586 /*
2587  * writes 'offset' into guest's timestamp counter offset register
2588  */
2589 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2590 {
2591         if (is_guest_mode(vcpu)) {
2592                 /*
2593                  * We're here if L1 chose not to trap WRMSR to TSC. According
2594                  * to the spec, this should set L1's TSC; The offset that L1
2595                  * set for L2 remains unchanged, and still needs to be added
2596                  * to the newly set TSC to get L2's TSC.
2597                  */
2598                 struct vmcs12 *vmcs12;
2599                 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2600                 /* recalculate vmcs02.TSC_OFFSET: */
2601                 vmcs12 = get_vmcs12(vcpu);
2602                 vmcs_write64(TSC_OFFSET, offset +
2603                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2604                          vmcs12->tsc_offset : 0));
2605         } else {
2606                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2607                                            vmcs_read64(TSC_OFFSET), offset);
2608                 vmcs_write64(TSC_OFFSET, offset);
2609         }
2610 }
2611
2612 static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
2613 {
2614         u64 offset = vmcs_read64(TSC_OFFSET);
2615
2616         vmcs_write64(TSC_OFFSET, offset + adjustment);
2617         if (is_guest_mode(vcpu)) {
2618                 /* Even when running L2, the adjustment needs to apply to L1 */
2619                 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2620         } else
2621                 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2622                                            offset + adjustment);
2623 }
2624
2625 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2626 {
2627         struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2628         return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2629 }
2630
2631 /*
2632  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2633  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2634  * all guests if the "nested" module option is off, and can also be disabled
2635  * for a single guest by disabling its VMX cpuid bit.
2636  */
2637 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2638 {
2639         return nested && guest_cpuid_has_vmx(vcpu);
2640 }
2641
2642 /*
2643  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2644  * returned for the various VMX controls MSRs when nested VMX is enabled.
2645  * The same values should also be used to verify that vmcs12 control fields are
2646  * valid during nested entry from L1 to L2.
2647  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2648  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2649  * bit in the high half is on if the corresponding bit in the control field
2650  * may be on. See also vmx_control_verify().
2651  */
2652 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2653 {
2654         /*
2655          * Note that as a general rule, the high half of the MSRs (bits in
2656          * the control fields which may be 1) should be initialized by the
2657          * intersection of the underlying hardware's MSR (i.e., features which
2658          * can be supported) and the list of features we want to expose -
2659          * because they are known to be properly supported in our code.
2660          * Also, usually, the low half of the MSRs (bits which must be 1) can
2661          * be set to 0, meaning that L1 may turn off any of these bits. The
2662          * reason is that if one of these bits is necessary, it will appear
2663          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2664          * fields of vmcs01 and vmcs02, will turn these bits off - and
2665          * nested_vmx_exit_handled() will not pass related exits to L1.
2666          * These rules have exceptions below.
2667          */
2668
2669         /* pin-based controls */
2670         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2671                 vmx->nested.nested_vmx_pinbased_ctls_low,
2672                 vmx->nested.nested_vmx_pinbased_ctls_high);
2673         vmx->nested.nested_vmx_pinbased_ctls_low |=
2674                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2675         vmx->nested.nested_vmx_pinbased_ctls_high &=
2676                 PIN_BASED_EXT_INTR_MASK |
2677                 PIN_BASED_NMI_EXITING |
2678                 PIN_BASED_VIRTUAL_NMIS;
2679         vmx->nested.nested_vmx_pinbased_ctls_high |=
2680                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2681                 PIN_BASED_VMX_PREEMPTION_TIMER;
2682         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2683                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2684                         PIN_BASED_POSTED_INTR;
2685
2686         /* exit controls */
2687         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2688                 vmx->nested.nested_vmx_exit_ctls_low,
2689                 vmx->nested.nested_vmx_exit_ctls_high);
2690         vmx->nested.nested_vmx_exit_ctls_low =
2691                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2692
2693         vmx->nested.nested_vmx_exit_ctls_high &=
2694 #ifdef CONFIG_X86_64
2695                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2696 #endif
2697                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2698         vmx->nested.nested_vmx_exit_ctls_high |=
2699                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2700                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2701                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2702
2703         if (kvm_mpx_supported())
2704                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2705
2706         /* We support free control of debug control saving. */
2707         vmx->nested.nested_vmx_true_exit_ctls_low =
2708                 vmx->nested.nested_vmx_exit_ctls_low &
2709                 ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2710
2711         /* entry controls */
2712         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2713                 vmx->nested.nested_vmx_entry_ctls_low,
2714                 vmx->nested.nested_vmx_entry_ctls_high);
2715         vmx->nested.nested_vmx_entry_ctls_low =
2716                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2717         vmx->nested.nested_vmx_entry_ctls_high &=
2718 #ifdef CONFIG_X86_64
2719                 VM_ENTRY_IA32E_MODE |
2720 #endif
2721                 VM_ENTRY_LOAD_IA32_PAT;
2722         vmx->nested.nested_vmx_entry_ctls_high |=
2723                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2724         if (kvm_mpx_supported())
2725                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2726
2727         /* We support free control of debug control loading. */
2728         vmx->nested.nested_vmx_true_entry_ctls_low =
2729                 vmx->nested.nested_vmx_entry_ctls_low &
2730                 ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2731
2732         /* cpu-based controls */
2733         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2734                 vmx->nested.nested_vmx_procbased_ctls_low,
2735                 vmx->nested.nested_vmx_procbased_ctls_high);
2736         vmx->nested.nested_vmx_procbased_ctls_low =
2737                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2738         vmx->nested.nested_vmx_procbased_ctls_high &=
2739                 CPU_BASED_VIRTUAL_INTR_PENDING |
2740                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2741                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2742                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2743                 CPU_BASED_CR3_STORE_EXITING |
2744 #ifdef CONFIG_X86_64
2745                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2746 #endif
2747                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2748                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2749                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2750                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2751                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2752         /*
2753          * We can allow some features even when not supported by the
2754          * hardware. For example, L1 can specify an MSR bitmap - and we
2755          * can use it to avoid exits to L1 - even when L0 runs L2
2756          * without MSR bitmaps.
2757          */
2758         vmx->nested.nested_vmx_procbased_ctls_high |=
2759                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2760                 CPU_BASED_USE_MSR_BITMAPS;
2761
2762         /* We support free control of CR3 access interception. */
2763         vmx->nested.nested_vmx_true_procbased_ctls_low =
2764                 vmx->nested.nested_vmx_procbased_ctls_low &
2765                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2766
2767         /* secondary cpu-based controls */
2768         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2769                 vmx->nested.nested_vmx_secondary_ctls_low,
2770                 vmx->nested.nested_vmx_secondary_ctls_high);
2771         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2772         vmx->nested.nested_vmx_secondary_ctls_high &=
2773                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2774                 SECONDARY_EXEC_RDTSCP |
2775                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2776                 SECONDARY_EXEC_ENABLE_VPID |
2777                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2778                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2779                 SECONDARY_EXEC_WBINVD_EXITING |
2780                 SECONDARY_EXEC_XSAVES |
2781                 SECONDARY_EXEC_PCOMMIT;
2782
2783         if (enable_ept) {
2784                 /* nested EPT: emulate EPT also to L1 */
2785                 vmx->nested.nested_vmx_secondary_ctls_high |=
2786                         SECONDARY_EXEC_ENABLE_EPT;
2787                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2788                          VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2789                          VMX_EPT_INVEPT_BIT;
2790                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2791                 /*
2792                  * For nested guests, we don't do anything specific
2793                  * for single context invalidation. Hence, only advertise
2794                  * support for global context invalidation.
2795                  */
2796                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
2797         } else
2798                 vmx->nested.nested_vmx_ept_caps = 0;
2799
2800         /*
2801          * Old versions of KVM use the single-context version without
2802          * checking for support, so declare that it is supported even
2803          * though it is treated as global context.  The alternative is
2804          * not failing the single-context invvpid, and it is worse.
2805          */
2806         if (enable_vpid)
2807                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2808                                 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
2809                                 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
2810         else
2811                 vmx->nested.nested_vmx_vpid_caps = 0;
2812
2813         if (enable_unrestricted_guest)
2814                 vmx->nested.nested_vmx_secondary_ctls_high |=
2815                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2816
2817         /* miscellaneous data */
2818         rdmsr(MSR_IA32_VMX_MISC,
2819                 vmx->nested.nested_vmx_misc_low,
2820                 vmx->nested.nested_vmx_misc_high);
2821         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2822         vmx->nested.nested_vmx_misc_low |=
2823                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2824                 VMX_MISC_ACTIVITY_HLT;
2825         vmx->nested.nested_vmx_misc_high = 0;
2826 }
2827
2828 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2829 {
2830         /*
2831          * Bits 0 in high must be 0, and bits 1 in low must be 1.
2832          */
2833         return ((control & high) | low) == control;
2834 }
2835
2836 static inline u64 vmx_control_msr(u32 low, u32 high)
2837 {
2838         return low | ((u64)high << 32);
2839 }
2840
2841 /* Returns 0 on success, non-0 otherwise. */
2842 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2843 {
2844         struct vcpu_vmx *vmx = to_vmx(vcpu);
2845
2846         switch (msr_index) {
2847         case MSR_IA32_VMX_BASIC:
2848                 /*
2849                  * This MSR reports some information about VMX support. We
2850                  * should return information about the VMX we emulate for the
2851                  * guest, and the VMCS structure we give it - not about the
2852                  * VMX support of the underlying hardware.
2853                  */
2854                 *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
2855                            ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2856                            (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2857                 break;
2858         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2859         case MSR_IA32_VMX_PINBASED_CTLS:
2860                 *pdata = vmx_control_msr(
2861                         vmx->nested.nested_vmx_pinbased_ctls_low,
2862                         vmx->nested.nested_vmx_pinbased_ctls_high);
2863                 break;
2864         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2865                 *pdata = vmx_control_msr(
2866                         vmx->nested.nested_vmx_true_procbased_ctls_low,
2867                         vmx->nested.nested_vmx_procbased_ctls_high);
2868                 break;
2869         case MSR_IA32_VMX_PROCBASED_CTLS:
2870                 *pdata = vmx_control_msr(
2871                         vmx->nested.nested_vmx_procbased_ctls_low,
2872                         vmx->nested.nested_vmx_procbased_ctls_high);
2873                 break;
2874         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2875                 *pdata = vmx_control_msr(
2876                         vmx->nested.nested_vmx_true_exit_ctls_low,
2877                         vmx->nested.nested_vmx_exit_ctls_high);
2878                 break;
2879         case MSR_IA32_VMX_EXIT_CTLS:
2880                 *pdata = vmx_control_msr(
2881                         vmx->nested.nested_vmx_exit_ctls_low,
2882                         vmx->nested.nested_vmx_exit_ctls_high);
2883                 break;
2884         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2885                 *pdata = vmx_control_msr(
2886                         vmx->nested.nested_vmx_true_entry_ctls_low,
2887                         vmx->nested.nested_vmx_entry_ctls_high);
2888                 break;
2889         case MSR_IA32_VMX_ENTRY_CTLS:
2890                 *pdata = vmx_control_msr(
2891                         vmx->nested.nested_vmx_entry_ctls_low,
2892                         vmx->nested.nested_vmx_entry_ctls_high);
2893                 break;
2894         case MSR_IA32_VMX_MISC:
2895                 *pdata = vmx_control_msr(
2896                         vmx->nested.nested_vmx_misc_low,
2897                         vmx->nested.nested_vmx_misc_high);
2898                 break;
2899         /*
2900          * These MSRs specify bits which the guest must keep fixed (on or off)
2901          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2902          * We picked the standard core2 setting.
2903          */
2904 #define VMXON_CR0_ALWAYSON      (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2905 #define VMXON_CR4_ALWAYSON      X86_CR4_VMXE
2906         case MSR_IA32_VMX_CR0_FIXED0:
2907                 *pdata = VMXON_CR0_ALWAYSON;
2908                 break;
2909         case MSR_IA32_VMX_CR0_FIXED1:
2910                 *pdata = -1ULL;
2911                 break;
2912         case MSR_IA32_VMX_CR4_FIXED0:
2913                 *pdata = VMXON_CR4_ALWAYSON;
2914                 break;
2915         case MSR_IA32_VMX_CR4_FIXED1:
2916                 *pdata = -1ULL;
2917                 break;
2918         case MSR_IA32_VMX_VMCS_ENUM:
2919                 *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2920                 break;
2921         case MSR_IA32_VMX_PROCBASED_CTLS2:
2922                 *pdata = vmx_control_msr(
2923                         vmx->nested.nested_vmx_secondary_ctls_low,
2924                         vmx->nested.nested_vmx_secondary_ctls_high);
2925                 break;
2926         case MSR_IA32_VMX_EPT_VPID_CAP:
2927                 /* Currently, no nested vpid support */
2928                 *pdata = vmx->nested.nested_vmx_ept_caps |
2929                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
2930                 break;
2931         default:
2932                 return 1;
2933         }
2934
2935         return 0;
2936 }
2937
2938 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
2939                                                  uint64_t val)
2940 {
2941         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
2942
2943         return !(val & ~valid_bits);
2944 }
2945
2946 /*
2947  * Reads an msr value (of 'msr_index') into 'pdata'.
2948  * Returns 0 on success, non-0 otherwise.
2949  * Assumes vcpu_load() was already called.
2950  */
2951 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2952 {
2953         struct shared_msr_entry *msr;
2954
2955         switch (msr_info->index) {
2956 #ifdef CONFIG_X86_64
2957         case MSR_FS_BASE:
2958                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
2959                 break;
2960         case MSR_GS_BASE:
2961                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
2962                 break;
2963         case MSR_KERNEL_GS_BASE:
2964                 vmx_load_host_state(to_vmx(vcpu));
2965                 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2966                 break;
2967 #endif
2968         case MSR_EFER:
2969                 return kvm_get_msr_common(vcpu, msr_info);
2970         case MSR_IA32_TSC:
2971                 msr_info->data = guest_read_tsc(vcpu);
2972                 break;
2973         case MSR_IA32_SYSENTER_CS:
2974                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2975                 break;
2976         case MSR_IA32_SYSENTER_EIP:
2977                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2978                 break;
2979         case MSR_IA32_SYSENTER_ESP:
2980                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2981                 break;
2982         case MSR_IA32_BNDCFGS:
2983                 if (!kvm_mpx_supported())
2984                         return 1;
2985                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2986                 break;
2987         case MSR_IA32_MCG_EXT_CTL:
2988                 if (!msr_info->host_initiated &&
2989                     !(to_vmx(vcpu)->msr_ia32_feature_control &
2990                       FEATURE_CONTROL_LMCE))
2991                         return 1;
2992                 msr_info->data = vcpu->arch.mcg_ext_ctl;
2993                 break;
2994         case MSR_IA32_FEATURE_CONTROL:
2995                 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
2996                 break;
2997         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
2998                 if (!nested_vmx_allowed(vcpu))
2999                         return 1;
3000                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3001         case MSR_IA32_XSS:
3002                 if (!vmx_xsaves_supported())
3003                         return 1;
3004                 msr_info->data = vcpu->arch.ia32_xss;
3005                 break;
3006         case MSR_TSC_AUX:
3007                 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
3008                         return 1;
3009                 /* Otherwise falls through */
3010         default:
3011                 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3012                 if (msr) {
3013                         msr_info->data = msr->data;
3014                         break;
3015                 }
3016                 return kvm_get_msr_common(vcpu, msr_info);
3017         }
3018
3019         return 0;
3020 }
3021
3022 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3023
3024 /*
3025  * Writes msr value into into the appropriate "register".
3026  * Returns 0 on success, non-0 otherwise.
3027  * Assumes vcpu_load() was already called.
3028  */
3029 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3030 {
3031         struct vcpu_vmx *vmx = to_vmx(vcpu);
3032         struct shared_msr_entry *msr;
3033         int ret = 0;
3034         u32 msr_index = msr_info->index;
3035         u64 data = msr_info->data;
3036
3037         switch (msr_index) {
3038         case MSR_EFER:
3039                 ret = kvm_set_msr_common(vcpu, msr_info);
3040                 break;
3041 #ifdef CONFIG_X86_64
3042         case MSR_FS_BASE:
3043                 vmx_segment_cache_clear(vmx);
3044                 vmcs_writel(GUEST_FS_BASE, data);
3045                 break;
3046         case MSR_GS_BASE:
3047                 vmx_segment_cache_clear(vmx);
3048                 vmcs_writel(GUEST_GS_BASE, data);
3049                 break;
3050         case MSR_KERNEL_GS_BASE:
3051                 vmx_load_host_state(vmx);
3052                 vmx->msr_guest_kernel_gs_base = data;
3053                 break;
3054 #endif
3055         case MSR_IA32_SYSENTER_CS:
3056                 vmcs_write32(GUEST_SYSENTER_CS, data);
3057                 break;
3058         case MSR_IA32_SYSENTER_EIP:
3059                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3060                 break;
3061         case MSR_IA32_SYSENTER_ESP:
3062                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3063                 break;
3064         case MSR_IA32_BNDCFGS:
3065                 if (!kvm_mpx_supported())
3066                         return 1;
3067                 vmcs_write64(GUEST_BNDCFGS, data);
3068                 break;
3069         case MSR_IA32_TSC:
3070                 kvm_write_tsc(vcpu, msr_info);
3071                 break;
3072         case MSR_IA32_CR_PAT:
3073                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3074                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3075                                 return 1;
3076                         vmcs_write64(GUEST_IA32_PAT, data);
3077                         vcpu->arch.pat = data;
3078                         break;
3079                 }
3080                 ret = kvm_set_msr_common(vcpu, msr_info);
3081                 break;
3082         case MSR_IA32_TSC_ADJUST:
3083                 ret = kvm_set_msr_common(vcpu, msr_info);
3084                 break;
3085         case MSR_IA32_MCG_EXT_CTL:
3086                 if ((!msr_info->host_initiated &&
3087                      !(to_vmx(vcpu)->msr_ia32_feature_control &
3088                        FEATURE_CONTROL_LMCE)) ||
3089                     (data & ~MCG_EXT_CTL_LMCE_EN))
3090                         return 1;
3091                 vcpu->arch.mcg_ext_ctl = data;
3092                 break;
3093         case MSR_IA32_FEATURE_CONTROL:
3094                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3095                     (to_vmx(vcpu)->msr_ia32_feature_control &
3096                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3097                         return 1;
3098                 vmx->msr_ia32_feature_control = data;
3099                 if (msr_info->host_initiated && data == 0)
3100                         vmx_leave_nested(vcpu);
3101                 break;
3102         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3103                 return 1; /* they are read-only */
3104         case MSR_IA32_XSS:
3105                 if (!vmx_xsaves_supported())
3106                         return 1;
3107                 /*
3108                  * The only supported bit as of Skylake is bit 8, but
3109                  * it is not supported on KVM.
3110                  */
3111                 if (data != 0)
3112                         return 1;
3113                 vcpu->arch.ia32_xss = data;
3114                 if (vcpu->arch.ia32_xss != host_xss)
3115                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3116                                 vcpu->arch.ia32_xss, host_xss);
3117                 else
3118                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3119                 break;
3120         case MSR_TSC_AUX:
3121                 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
3122                         return 1;
3123                 /* Check reserved bit, higher 32 bits should be zero */
3124                 if ((data >> 32) != 0)
3125                         return 1;
3126                 /* Otherwise falls through */
3127         default:
3128                 msr = find_msr_entry(vmx, msr_index);
3129                 if (msr) {
3130                         u64 old_msr_data = msr->data;
3131                         msr->data = data;
3132                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
3133                                 preempt_disable();
3134                                 ret = kvm_set_shared_msr(msr->index, msr->data,
3135                                                          msr->mask);
3136                                 preempt_enable();
3137                                 if (ret)
3138                                         msr->data = old_msr_data;
3139                         }
3140                         break;
3141                 }
3142                 ret = kvm_set_msr_common(vcpu, msr_info);
3143         }
3144
3145         return ret;
3146 }
3147
3148 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
3149 {
3150         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
3151         switch (reg) {
3152         case VCPU_REGS_RSP:
3153                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
3154                 break;
3155         case VCPU_REGS_RIP:
3156                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
3157                 break;
3158         case VCPU_EXREG_PDPTR:
3159                 if (enable_ept)
3160                         ept_save_pdptrs(vcpu);
3161                 break;
3162         default:
3163                 break;
3164         }
3165 }
3166
3167 static __init int cpu_has_kvm_support(void)
3168 {
3169         return cpu_has_vmx();
3170 }
3171
3172 static __init int vmx_disabled_by_bios(void)
3173 {
3174         u64 msr;
3175
3176         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
3177         if (msr & FEATURE_CONTROL_LOCKED) {
3178                 /* launched w/ TXT and VMX disabled */
3179                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3180                         && tboot_enabled())
3181                         return 1;
3182                 /* launched w/o TXT and VMX only enabled w/ TXT */
3183                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3184                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3185                         && !tboot_enabled()) {
3186                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
3187                                 "activate TXT before enabling KVM\n");
3188                         return 1;
3189                 }
3190                 /* launched w/o TXT and VMX disabled */
3191                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3192                         && !tboot_enabled())
3193                         return 1;
3194         }
3195
3196         return 0;
3197 }
3198
3199 static void kvm_cpu_vmxon(u64 addr)
3200 {
3201         intel_pt_handle_vmx(1);
3202
3203         asm volatile (ASM_VMX_VMXON_RAX
3204                         : : "a"(&addr), "m"(addr)
3205                         : "memory", "cc");
3206 }
3207
3208 static int hardware_enable(void)
3209 {
3210         int cpu = raw_smp_processor_id();
3211         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
3212         u64 old, test_bits;
3213
3214         if (cr4_read_shadow() & X86_CR4_VMXE)
3215                 return -EBUSY;
3216
3217         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
3218         INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
3219         spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
3220
3221         /*
3222          * Now we can enable the vmclear operation in kdump
3223          * since the loaded_vmcss_on_cpu list on this cpu
3224          * has been initialized.
3225          *
3226          * Though the cpu is not in VMX operation now, there
3227          * is no problem to enable the vmclear operation
3228          * for the loaded_vmcss_on_cpu list is empty!
3229          */
3230         crash_enable_local_vmclear(cpu);
3231
3232         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
3233
3234         test_bits = FEATURE_CONTROL_LOCKED;
3235         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
3236         if (tboot_enabled())
3237                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
3238
3239         if ((old & test_bits) != test_bits) {
3240                 /* enable and lock */
3241                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
3242         }
3243         cr4_set_bits(X86_CR4_VMXE);
3244
3245         if (vmm_exclusive) {
3246                 kvm_cpu_vmxon(phys_addr);
3247                 ept_sync_global();
3248         }
3249
3250         native_store_gdt(this_cpu_ptr(&host_gdt));
3251
3252         return 0;
3253 }
3254
3255 static void vmclear_local_loaded_vmcss(void)
3256 {
3257         int cpu = raw_smp_processor_id();
3258         struct loaded_vmcs *v, *n;
3259
3260         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
3261                                  loaded_vmcss_on_cpu_link)
3262                 __loaded_vmcs_clear(v);
3263 }
3264
3265
3266 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
3267  * tricks.
3268  */
3269 static void kvm_cpu_vmxoff(void)
3270 {
3271         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3272
3273         intel_pt_handle_vmx(0);
3274 }
3275
3276 static void hardware_disable(void)
3277 {
3278         if (vmm_exclusive) {
3279                 vmclear_local_loaded_vmcss();
3280                 kvm_cpu_vmxoff();
3281         }
3282         cr4_clear_bits(X86_CR4_VMXE);
3283 }
3284
3285 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
3286                                       u32 msr, u32 *result)
3287 {
3288         u32 vmx_msr_low, vmx_msr_high;
3289         u32 ctl = ctl_min | ctl_opt;
3290
3291         rdmsr(msr, vmx_msr_low, vmx_msr_high);
3292
3293         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
3294         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
3295
3296         /* Ensure minimum (required) set of control bits are supported. */
3297         if (ctl_min & ~ctl)
3298                 return -EIO;
3299
3300         *result = ctl;
3301         return 0;
3302 }
3303
3304 static __init bool allow_1_setting(u32 msr, u32 ctl)
3305 {
3306         u32 vmx_msr_low, vmx_msr_high;
3307
3308         rdmsr(msr, vmx_msr_low, vmx_msr_high);
3309         return vmx_msr_high & ctl;
3310 }
3311
3312 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
3313 {
3314         u32 vmx_msr_low, vmx_msr_high;
3315         u32 min, opt, min2, opt2;
3316         u32 _pin_based_exec_control = 0;
3317         u32 _cpu_based_exec_control = 0;
3318         u32 _cpu_based_2nd_exec_control = 0;
3319         u32 _vmexit_control = 0;
3320         u32 _vmentry_control = 0;
3321
3322         min = CPU_BASED_HLT_EXITING |
3323 #ifdef CONFIG_X86_64
3324               CPU_BASED_CR8_LOAD_EXITING |
3325               CPU_BASED_CR8_STORE_EXITING |
3326 #endif
3327               CPU_BASED_CR3_LOAD_EXITING |
3328               CPU_BASED_CR3_STORE_EXITING |
3329               CPU_BASED_USE_IO_BITMAPS |
3330               CPU_BASED_MOV_DR_EXITING |
3331               CPU_BASED_USE_TSC_OFFSETING |
3332               CPU_BASED_MWAIT_EXITING |
3333               CPU_BASED_MONITOR_EXITING |
3334               CPU_BASED_INVLPG_EXITING |
3335               CPU_BASED_RDPMC_EXITING;
3336
3337         opt = CPU_BASED_TPR_SHADOW |
3338               CPU_BASED_USE_MSR_BITMAPS |
3339               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3340         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
3341                                 &_cpu_based_exec_control) < 0)
3342                 return -EIO;
3343 #ifdef CONFIG_X86_64
3344         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3345                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
3346                                            ~CPU_BASED_CR8_STORE_EXITING;
3347 #endif
3348         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
3349                 min2 = 0;
3350                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3351                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3352                         SECONDARY_EXEC_WBINVD_EXITING |
3353                         SECONDARY_EXEC_ENABLE_VPID |
3354                         SECONDARY_EXEC_ENABLE_EPT |
3355                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
3356                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
3357                         SECONDARY_EXEC_RDTSCP |
3358                         SECONDARY_EXEC_ENABLE_INVPCID |
3359                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
3360                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3361                         SECONDARY_EXEC_SHADOW_VMCS |
3362                         SECONDARY_EXEC_XSAVES |
3363                         SECONDARY_EXEC_ENABLE_PML |
3364                         SECONDARY_EXEC_PCOMMIT |
3365                         SECONDARY_EXEC_TSC_SCALING;
3366                 if (adjust_vmx_controls(min2, opt2,
3367                                         MSR_IA32_VMX_PROCBASED_CTLS2,
3368                                         &_cpu_based_2nd_exec_control) < 0)
3369                         return -EIO;
3370         }
3371 #ifndef CONFIG_X86_64
3372         if (!(_cpu_based_2nd_exec_control &
3373                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
3374                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
3375 #endif
3376
3377         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3378                 _cpu_based_2nd_exec_control &= ~(
3379                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3380                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3381                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
3382
3383         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
3384                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
3385                    enabled */
3386                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
3387                                              CPU_BASED_CR3_STORE_EXITING |
3388                                              CPU_BASED_INVLPG_EXITING);
3389                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
3390                       vmx_capability.ept, vmx_capability.vpid);
3391         }
3392
3393         min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
3394 #ifdef CONFIG_X86_64
3395         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
3396 #endif
3397         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
3398                 VM_EXIT_CLEAR_BNDCFGS;
3399         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
3400                                 &_vmexit_control) < 0)
3401                 return -EIO;
3402
3403         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
3404         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
3405                  PIN_BASED_VMX_PREEMPTION_TIMER;
3406         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
3407                                 &_pin_based_exec_control) < 0)
3408                 return -EIO;
3409
3410         if (!(_cpu_based_2nd_exec_control &
3411                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
3412                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
3413
3414         min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
3415         opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
3416         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
3417                                 &_vmentry_control) < 0)
3418                 return -EIO;
3419
3420         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
3421
3422         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
3423         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
3424                 return -EIO;
3425
3426 #ifdef CONFIG_X86_64
3427         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
3428         if (vmx_msr_high & (1u<<16))
3429                 return -EIO;
3430 #endif
3431
3432         /* Require Write-Back (WB) memory type for VMCS accesses. */
3433         if (((vmx_msr_high >> 18) & 15) != 6)
3434                 return -EIO;
3435
3436         vmcs_conf->size = vmx_msr_high & 0x1fff;
3437         vmcs_conf->order = get_order(vmcs_config.size);
3438         vmcs_conf->revision_id = vmx_msr_low;
3439
3440         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
3441         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
3442         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
3443         vmcs_conf->vmexit_ctrl         = _vmexit_control;
3444         vmcs_conf->vmentry_ctrl        = _vmentry_control;
3445
3446         cpu_has_load_ia32_efer =
3447                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3448                                 VM_ENTRY_LOAD_IA32_EFER)
3449                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3450                                    VM_EXIT_LOAD_IA32_EFER);
3451
3452         cpu_has_load_perf_global_ctrl =
3453                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3454                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
3455                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3456                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
3457
3458         /*
3459          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
3460          * but due to errata below it can't be used. Workaround is to use
3461          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
3462          *
3463          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
3464          *
3465          * AAK155             (model 26)
3466          * AAP115             (model 30)
3467          * AAT100             (model 37)
3468          * BC86,AAY89,BD102   (model 44)
3469          * BA97               (model 46)
3470          *
3471          */
3472         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
3473                 switch (boot_cpu_data.x86_model) {
3474                 case 26:
3475                 case 30:
3476                 case 37:
3477                 case 44:
3478                 case 46:
3479                         cpu_has_load_perf_global_ctrl = false;
3480                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
3481                                         "does not work properly. Using workaround\n");
3482                         break;
3483                 default:
3484                         break;
3485                 }
3486         }
3487
3488         if (boot_cpu_has(X86_FEATURE_XSAVES))
3489                 rdmsrl(MSR_IA32_XSS, host_xss);
3490
3491         return 0;
3492 }
3493
3494 static struct vmcs *alloc_vmcs_cpu(int cpu)
3495 {
3496         int node = cpu_to_node(cpu);
3497         struct page *pages;
3498         struct vmcs *vmcs;
3499
3500         pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
3501         if (!pages)
3502                 return NULL;
3503         vmcs = page_address(pages);
3504         memset(vmcs, 0, vmcs_config.size);
3505         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
3506         return vmcs;
3507 }
3508
3509 static struct vmcs *alloc_vmcs(void)
3510 {
3511         return alloc_vmcs_cpu(raw_smp_processor_id());
3512 }
3513
3514 static void free_vmcs(struct vmcs *vmcs)
3515 {
3516         free_pages((unsigned long)vmcs, vmcs_config.order);
3517 }
3518
3519 /*
3520  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
3521  */
3522 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3523 {
3524         if (!loaded_vmcs->vmcs)
3525                 return;
3526         loaded_vmcs_clear(loaded_vmcs);
3527         free_vmcs(loaded_vmcs->vmcs);
3528         loaded_vmcs->vmcs = NULL;
3529 }
3530
3531 static void free_kvm_area(void)
3532 {
3533         int cpu;
3534
3535         for_each_possible_cpu(cpu) {
3536                 free_vmcs(per_cpu(vmxarea, cpu));
3537                 per_cpu(vmxarea, cpu) = NULL;
3538         }
3539 }
3540
3541 static void init_vmcs_shadow_fields(void)
3542 {
3543         int i, j;
3544
3545         /* No checks for read only fields yet */
3546
3547         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3548                 switch (shadow_read_write_fields[i]) {
3549                 case GUEST_BNDCFGS:
3550                         if (!kvm_mpx_supported())
3551                                 continue;
3552                         break;
3553                 default:
3554                         break;
3555                 }
3556
3557                 if (j < i)
3558                         shadow_read_write_fields[j] =
3559                                 shadow_read_write_fields[i];
3560                 j++;
3561         }
3562         max_shadow_read_write_fields = j;
3563
3564         /* shadowed fields guest access without vmexit */
3565         for (i = 0; i < max_shadow_read_write_fields; i++) {
3566                 clear_bit(shadow_read_write_fields[i],
3567                           vmx_vmwrite_bitmap);
3568                 clear_bit(shadow_read_write_fields[i],
3569                           vmx_vmread_bitmap);
3570         }
3571         for (i = 0; i < max_shadow_read_only_fields; i++)
3572                 clear_bit(shadow_read_only_fields[i],
3573                           vmx_vmread_bitmap);
3574 }
3575
3576 static __init int alloc_kvm_area(void)
3577 {
3578         int cpu;
3579
3580         for_each_possible_cpu(cpu) {
3581                 struct vmcs *vmcs;
3582
3583                 vmcs = alloc_vmcs_cpu(cpu);
3584                 if (!vmcs) {
3585                         free_kvm_area();
3586                         return -ENOMEM;
3587                 }
3588
3589                 per_cpu(vmxarea, cpu) = vmcs;
3590         }
3591         return 0;
3592 }
3593
3594 static bool emulation_required(struct kvm_vcpu *vcpu)
3595 {
3596         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3597 }
3598
3599 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3600                 struct kvm_segment *save)
3601 {
3602         if (!emulate_invalid_guest_state) {
3603                 /*
3604                  * CS and SS RPL should be equal during guest entry according
3605                  * to VMX spec, but in reality it is not always so. Since vcpu
3606                  * is in the middle of the transition from real mode to
3607                  * protected mode it is safe to assume that RPL 0 is a good
3608                  * default value.
3609                  */
3610                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3611                         save->selector &= ~SEGMENT_RPL_MASK;
3612                 save->dpl = save->selector & SEGMENT_RPL_MASK;
3613                 save->s = 1;
3614         }
3615         vmx_set_segment(vcpu, save, seg);
3616 }
3617
3618 static void enter_pmode(struct kvm_vcpu *vcpu)
3619 {
3620         unsigned long flags;
3621         struct vcpu_vmx *vmx = to_vmx(vcpu);
3622
3623         /*
3624          * Update real mode segment cache. It may be not up-to-date if sement
3625          * register was written while vcpu was in a guest mode.
3626          */
3627         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3628         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3629         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3630         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3631         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3632         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3633
3634         vmx->rmode.vm86_active = 0;
3635
3636         vmx_segment_cache_clear(vmx);
3637
3638         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3639
3640         flags = vmcs_readl(GUEST_RFLAGS);
3641         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3642         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3643         vmcs_writel(GUEST_RFLAGS, flags);
3644
3645         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3646                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3647
3648         update_exception_bitmap(vcpu);
3649
3650         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3651         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3652         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3653         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3654         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3655         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3656 }
3657
3658 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3659 {
3660         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3661         struct kvm_segment var = *save;
3662
3663         var.dpl = 0x3;
3664         if (seg == VCPU_SREG_CS)
3665                 var.type = 0x3;
3666
3667         if (!emulate_invalid_guest_state) {
3668                 var.selector = var.base >> 4;
3669                 var.base = var.base & 0xffff0;
3670                 var.limit = 0xffff;
3671                 var.g = 0;
3672                 var.db = 0;
3673                 var.present = 1;
3674                 var.s = 1;
3675                 var.l = 0;
3676                 var.unusable = 0;
3677                 var.type = 0x3;
3678                 var.avl = 0;
3679                 if (save->base & 0xf)
3680                         printk_once(KERN_WARNING "kvm: segment base is not "
3681                                         "paragraph aligned when entering "
3682                                         "protected mode (seg=%d)", seg);
3683         }
3684
3685         vmcs_write16(sf->selector, var.selector);
3686         vmcs_write32(sf->base, var.base);
3687         vmcs_write32(sf->limit, var.limit);
3688         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3689 }
3690
3691 static void enter_rmode(struct kvm_vcpu *vcpu)
3692 {
3693         unsigned long flags;
3694         struct vcpu_vmx *vmx = to_vmx(vcpu);
3695
3696         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3697         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3698         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3699         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3700         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3701         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3702         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3703
3704         vmx->rmode.vm86_active = 1;
3705
3706         /*
3707          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
3708          * vcpu. Warn the user that an update is overdue.
3709          */
3710         if (!vcpu->kvm->arch.tss_addr)
3711                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
3712                              "called before entering vcpu\n");
3713
3714         vmx_segment_cache_clear(vmx);
3715
3716         vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
3717         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3718         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3719
3720         flags = vmcs_readl(GUEST_RFLAGS);
3721         vmx->rmode.save_rflags = flags;
3722
3723         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3724
3725         vmcs_writel(GUEST_RFLAGS, flags);
3726         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3727         update_exception_bitmap(vcpu);
3728
3729         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3730         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3731         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3732         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3733         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3734         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3735
3736         kvm_mmu_reset_context(vcpu);
3737 }
3738
3739 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3740 {
3741         struct vcpu_vmx *vmx = to_vmx(vcpu);
3742         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
3743
3744         if (!msr)
3745                 return;
3746
3747         /*
3748          * Force kernel_gs_base reloading before EFER changes, as control
3749          * of this msr depends on is_long_mode().
3750          */
3751         vmx_load_host_state(to_vmx(vcpu));
3752         vcpu->arch.efer = efer;
3753         if (efer & EFER_LMA) {
3754                 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3755                 msr->data = efer;
3756         } else {
3757                 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3758
3759                 msr->data = efer & ~EFER_LME;
3760         }
3761         setup_msrs(vmx);
3762 }
3763
3764 #ifdef CONFIG_X86_64
3765
3766 static void enter_lmode(struct kvm_vcpu *vcpu)
3767 {
3768         u32 guest_tr_ar;
3769
3770         vmx_segment_cache_clear(to_vmx(vcpu));
3771
3772         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3773         if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3774                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3775                                      __func__);
3776                 vmcs_write32(GUEST_TR_AR_BYTES,
3777                              (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3778                              | VMX_AR_TYPE_BUSY_64_TSS);
3779         }
3780         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3781 }
3782
3783 static void exit_lmode(struct kvm_vcpu *vcpu)
3784 {
3785         vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3786         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3787 }
3788
3789 #endif
3790
3791 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
3792 {
3793         vpid_sync_context(vpid);
3794         if (enable_ept) {
3795                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3796                         return;
3797                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3798         }
3799 }
3800
3801 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3802 {
3803         __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
3804 }
3805
3806 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3807 {
3808         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3809
3810         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3811         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3812 }
3813
3814 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3815 {
3816         if (enable_ept && is_paging(vcpu))
3817                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3818         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3819 }
3820
3821 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3822 {
3823         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3824
3825         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3826         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3827 }
3828
3829 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3830 {
3831         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3832
3833         if (!test_bit(VCPU_EXREG_PDPTR,
3834                       (unsigned long *)&vcpu->arch.regs_dirty))
3835                 return;
3836
3837         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3838                 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3839                 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3840                 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3841                 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3842         }
3843 }
3844
3845 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3846 {
3847         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3848
3849         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3850                 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3851                 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3852                 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3853                 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3854         }
3855
3856         __set_bit(VCPU_EXREG_PDPTR,
3857                   (unsigned long *)&vcpu->arch.regs_avail);
3858         __set_bit(VCPU_EXREG_PDPTR,
3859                   (unsigned long *)&vcpu->arch.regs_dirty);
3860 }
3861
3862 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3863
3864 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3865                                         unsigned long cr0,
3866                                         struct kvm_vcpu *vcpu)
3867 {
3868         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3869                 vmx_decache_cr3(vcpu);
3870         if (!(cr0 & X86_CR0_PG)) {
3871                 /* From paging/starting to nonpaging */
3872                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3873                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3874                              (CPU_BASED_CR3_LOAD_EXITING |
3875                               CPU_BASED_CR3_STORE_EXITING));
3876                 vcpu->arch.cr0 = cr0;
3877                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3878         } else if (!is_paging(vcpu)) {
3879                 /* From nonpaging to paging */
3880                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3881                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3882                              ~(CPU_BASED_CR3_LOAD_EXITING |
3883                                CPU_BASED_CR3_STORE_EXITING));
3884                 vcpu->arch.cr0 = cr0;
3885                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3886         }
3887
3888         if (!(cr0 & X86_CR0_WP))
3889                 *hw_cr0 &= ~X86_CR0_WP;
3890 }
3891
3892 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3893 {
3894         struct vcpu_vmx *vmx = to_vmx(vcpu);
3895         unsigned long hw_cr0;
3896
3897         hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3898         if (enable_unrestricted_guest)
3899                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3900         else {
3901                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3902
3903                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3904                         enter_pmode(vcpu);
3905
3906                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3907                         enter_rmode(vcpu);
3908         }
3909
3910 #ifdef CONFIG_X86_64
3911         if (vcpu->arch.efer & EFER_LME) {
3912                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3913                         enter_lmode(vcpu);
3914                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3915                         exit_lmode(vcpu);
3916         }
3917 #endif
3918
3919         if (enable_ept)
3920                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3921
3922         if (!vcpu->fpu_active)
3923                 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3924
3925         vmcs_writel(CR0_READ_SHADOW, cr0);
3926         vmcs_writel(GUEST_CR0, hw_cr0);
3927         vcpu->arch.cr0 = cr0;
3928
3929         /* depends on vcpu->arch.cr0 to be set to a new value */
3930         vmx->emulation_required = emulation_required(vcpu);
3931 }
3932
3933 static u64 construct_eptp(unsigned long root_hpa)
3934 {
3935         u64 eptp;
3936
3937         /* TODO write the value reading from MSR */
3938         eptp = VMX_EPT_DEFAULT_MT |
3939                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3940         if (enable_ept_ad_bits)
3941                 eptp |= VMX_EPT_AD_ENABLE_BIT;
3942         eptp |= (root_hpa & PAGE_MASK);
3943
3944         return eptp;
3945 }
3946
3947 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3948 {
3949         unsigned long guest_cr3;
3950         u64 eptp;
3951
3952         guest_cr3 = cr3;
3953         if (enable_ept) {
3954                 eptp = construct_eptp(cr3);
3955                 vmcs_write64(EPT_POINTER, eptp);
3956                 if (is_paging(vcpu) || is_guest_mode(vcpu))
3957                         guest_cr3 = kvm_read_cr3(vcpu);
3958                 else
3959                         guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
3960                 ept_load_pdptrs(vcpu);
3961         }
3962
3963         vmx_flush_tlb(vcpu);
3964         vmcs_writel(GUEST_CR3, guest_cr3);
3965 }
3966
3967 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3968 {
3969         /*
3970          * Pass through host's Machine Check Enable value to hw_cr4, which
3971          * is in force while we are in guest mode.  Do not let guests control
3972          * this bit, even if host CR4.MCE == 0.
3973          */
3974         unsigned long hw_cr4 =
3975                 (cr4_read_shadow() & X86_CR4_MCE) |
3976                 (cr4 & ~X86_CR4_MCE) |
3977                 (to_vmx(vcpu)->rmode.vm86_active ?
3978                  KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3979
3980         if (cr4 & X86_CR4_VMXE) {
3981                 /*
3982                  * To use VMXON (and later other VMX instructions), a guest
3983                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
3984                  * So basically the check on whether to allow nested VMX
3985                  * is here.
3986                  */
3987                 if (!nested_vmx_allowed(vcpu))
3988                         return 1;
3989         }
3990         if (to_vmx(vcpu)->nested.vmxon &&
3991             ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3992                 return 1;
3993
3994         vcpu->arch.cr4 = cr4;
3995         if (enable_ept) {
3996                 if (!is_paging(vcpu)) {
3997                         hw_cr4 &= ~X86_CR4_PAE;
3998                         hw_cr4 |= X86_CR4_PSE;
3999                 } else if (!(cr4 & X86_CR4_PAE)) {
4000                         hw_cr4 &= ~X86_CR4_PAE;
4001                 }
4002         }
4003
4004         if (!enable_unrestricted_guest && !is_paging(vcpu))
4005                 /*
4006                  * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
4007                  * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
4008                  * to be manually disabled when guest switches to non-paging
4009                  * mode.
4010                  *
4011                  * If !enable_unrestricted_guest, the CPU is always running
4012                  * with CR0.PG=1 and CR4 needs to be modified.
4013                  * If enable_unrestricted_guest, the CPU automatically
4014                  * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
4015                  */
4016                 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
4017
4018         vmcs_writel(CR4_READ_SHADOW, cr4);
4019         vmcs_writel(GUEST_CR4, hw_cr4);
4020         return 0;
4021 }
4022
4023 static void vmx_get_segment(struct kvm_vcpu *vcpu,
4024                             struct kvm_segment *var, int seg)
4025 {
4026         struct vcpu_vmx *vmx = to_vmx(vcpu);
4027         u32 ar;
4028
4029         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
4030                 *var = vmx->rmode.segs[seg];
4031                 if (seg == VCPU_SREG_TR
4032                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
4033                         return;
4034                 var->base = vmx_read_guest_seg_base(vmx, seg);
4035                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
4036                 return;
4037         }
4038         var->base = vmx_read_guest_seg_base(vmx, seg);
4039         var->limit = vmx_read_guest_seg_limit(vmx, seg);
4040         var->selector = vmx_read_guest_seg_selector(vmx, seg);
4041         ar = vmx_read_guest_seg_ar(vmx, seg);
4042         var->unusable = (ar >> 16) & 1;
4043         var->type = ar & 15;
4044         var->s = (ar >> 4) & 1;
4045         var->dpl = (ar >> 5) & 3;
4046         /*
4047          * Some userspaces do not preserve unusable property. Since usable
4048          * segment has to be present according to VMX spec we can use present
4049          * property to amend userspace bug by making unusable segment always
4050          * nonpresent. vmx_segment_access_rights() already marks nonpresent
4051          * segment as unusable.
4052          */
4053         var->present = !var->unusable;
4054         var->avl = (ar >> 12) & 1;
4055         var->l = (ar >> 13) & 1;
4056         var->db = (ar >> 14) & 1;
4057         var->g = (ar >> 15) & 1;
4058 }
4059
4060 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
4061 {
4062         struct kvm_segment s;
4063
4064         if (to_vmx(vcpu)->rmode.vm86_active) {
4065                 vmx_get_segment(vcpu, &s, seg);
4066                 return s.base;
4067         }
4068         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
4069 }
4070
4071 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
4072 {
4073         struct vcpu_vmx *vmx = to_vmx(vcpu);
4074
4075         if (unlikely(vmx->rmode.vm86_active))
4076                 return 0;
4077         else {
4078                 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
4079                 return VMX_AR_DPL(ar);
4080         }
4081 }
4082
4083 static u32 vmx_segment_access_rights(struct kvm_segment *var)
4084 {
4085         u32 ar;
4086
4087         if (var->unusable || !var->present)
4088                 ar = 1 << 16;
4089         else {
4090                 ar = var->type & 15;
4091                 ar |= (var->s & 1) << 4;
4092                 ar |= (var->dpl & 3) << 5;
4093                 ar |= (var->present & 1) << 7;
4094                 ar |= (var->avl & 1) << 12;
4095                 ar |= (var->l & 1) << 13;
4096                 ar |= (var->db & 1) << 14;
4097                 ar |= (var->g & 1) << 15;
4098         }
4099
4100         return ar;
4101 }
4102
4103 static void vmx_set_segment(struct kvm_vcpu *vcpu,
4104                             struct kvm_segment *var, int seg)
4105 {
4106         struct vcpu_vmx *vmx = to_vmx(vcpu);
4107         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4108
4109         vmx_segment_cache_clear(vmx);
4110
4111         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
4112                 vmx->rmode.segs[seg] = *var;
4113                 if (seg == VCPU_SREG_TR)
4114                         vmcs_write16(sf->selector, var->selector);
4115                 else if (var->s)
4116                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
4117                 goto out;
4118         }
4119
4120         vmcs_writel(sf->base, var->base);
4121         vmcs_write32(sf->limit, var->limit);
4122         vmcs_write16(sf->selector, var->selector);
4123
4124         /*
4125          *   Fix the "Accessed" bit in AR field of segment registers for older
4126          * qemu binaries.
4127          *   IA32 arch specifies that at the time of processor reset the
4128          * "Accessed" bit in the AR field of segment registers is 1. And qemu
4129          * is setting it to 0 in the userland code. This causes invalid guest
4130          * state vmexit when "unrestricted guest" mode is turned on.
4131          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
4132          * tree. Newer qemu binaries with that qemu fix would not need this
4133          * kvm hack.
4134          */
4135         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
4136                 var->type |= 0x1; /* Accessed */
4137
4138         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
4139
4140 out:
4141         vmx->emulation_required = emulation_required(vcpu);
4142 }
4143
4144 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4145 {
4146         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
4147
4148         *db = (ar >> 14) & 1;
4149         *l = (ar >> 13) & 1;
4150 }
4151
4152 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
4153 {
4154         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
4155         dt->address = vmcs_readl(GUEST_IDTR_BASE);
4156 }
4157
4158 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
4159 {
4160         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
4161         vmcs_writel(GUEST_IDTR_BASE, dt->address);
4162 }
4163
4164 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
4165 {
4166         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
4167         dt->address = vmcs_readl(GUEST_GDTR_BASE);
4168 }
4169
4170 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
4171 {
4172         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
4173         vmcs_writel(GUEST_GDTR_BASE, dt->address);
4174 }
4175
4176 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
4177 {
4178         struct kvm_segment var;
4179         u32 ar;
4180
4181         vmx_get_segment(vcpu, &var, seg);
4182         var.dpl = 0x3;
4183         if (seg == VCPU_SREG_CS)
4184                 var.type = 0x3;
4185         ar = vmx_segment_access_rights(&var);
4186
4187         if (var.base != (var.selector << 4))
4188                 return false;
4189         if (var.limit != 0xffff)
4190                 return false;
4191         if (ar != 0xf3)
4192                 return false;
4193
4194         return true;
4195 }
4196
4197 static bool code_segment_valid(struct kvm_vcpu *vcpu)
4198 {
4199         struct kvm_segment cs;
4200         unsigned int cs_rpl;
4201
4202         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4203         cs_rpl = cs.selector & SEGMENT_RPL_MASK;
4204
4205         if (cs.unusable)
4206                 return false;
4207         if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
4208                 return false;
4209         if (!cs.s)
4210                 return false;
4211         if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
4212                 if (cs.dpl > cs_rpl)
4213                         return false;
4214         } else {
4215                 if (cs.dpl != cs_rpl)
4216                         return false;
4217         }
4218         if (!cs.present)
4219                 return false;
4220
4221         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
4222         return true;
4223 }
4224
4225 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
4226 {
4227         struct kvm_segment ss;
4228         unsigned int ss_rpl;
4229
4230         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
4231         ss_rpl = ss.selector & SEGMENT_RPL_MASK;
4232
4233         if (ss.unusable)
4234                 return true;
4235         if (ss.type != 3 && ss.type != 7)
4236                 return false;
4237         if (!ss.s)
4238                 return false;
4239         if (ss.dpl != ss_rpl) /* DPL != RPL */
4240                 return false;
4241         if (!ss.present)
4242                 return false;
4243
4244         return true;
4245 }
4246
4247 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
4248 {
4249         struct kvm_segment var;
4250         unsigned int rpl;
4251
4252         vmx_get_segment(vcpu, &var, seg);
4253         rpl = var.selector & SEGMENT_RPL_MASK;
4254
4255         if (var.unusable)
4256                 return true;
4257         if (!var.s)
4258                 return false;
4259         if (!var.present)
4260                 return false;
4261         if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
4262                 if (var.dpl < rpl) /* DPL < RPL */
4263                         return false;
4264         }
4265
4266         /* TODO: Add other members to kvm_segment_field to allow checking for other access
4267          * rights flags
4268          */
4269         return true;
4270 }
4271
4272 static bool tr_valid(struct kvm_vcpu *vcpu)
4273 {
4274         struct kvm_segment tr;
4275
4276         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
4277
4278         if (tr.unusable)
4279                 return false;
4280         if (tr.selector & SEGMENT_TI_MASK)      /* TI = 1 */
4281                 return false;
4282         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
4283                 return false;
4284         if (!tr.present)
4285                 return false;
4286
4287         return true;
4288 }
4289
4290 static bool ldtr_valid(struct kvm_vcpu *vcpu)
4291 {
4292         struct kvm_segment ldtr;
4293
4294         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
4295
4296         if (ldtr.unusable)
4297                 return true;
4298         if (ldtr.selector & SEGMENT_TI_MASK)    /* TI = 1 */
4299                 return false;
4300         if (ldtr.type != 2)
4301                 return false;
4302         if (!ldtr.present)
4303                 return false;
4304
4305         return true;
4306 }
4307
4308 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
4309 {
4310         struct kvm_segment cs, ss;
4311
4312         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4313         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
4314
4315         return ((cs.selector & SEGMENT_RPL_MASK) ==
4316                  (ss.selector & SEGMENT_RPL_MASK));
4317 }
4318
4319 /*
4320  * Check if guest state is valid. Returns true if valid, false if
4321  * not.
4322  * We assume that registers are always usable
4323  */
4324 static bool guest_state_valid(struct kvm_vcpu *vcpu)
4325 {
4326         if (enable_unrestricted_guest)
4327                 return true;
4328
4329         /* real mode guest state checks */
4330         if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
4331                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
4332                         return false;
4333                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
4334                         return false;
4335                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
4336                         return false;
4337                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
4338                         return false;
4339                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
4340                         return false;
4341                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
4342                         return false;
4343         } else {
4344         /* protected mode guest state checks */
4345                 if (!cs_ss_rpl_check(vcpu))
4346                         return false;
4347                 if (!code_segment_valid(vcpu))
4348                         return false;
4349                 if (!stack_segment_valid(vcpu))
4350                         return false;
4351                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
4352                         return false;
4353                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
4354                         return false;
4355                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
4356                         return false;
4357                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
4358                         return false;
4359                 if (!tr_valid(vcpu))
4360                         return false;
4361                 if (!ldtr_valid(vcpu))
4362                         return false;
4363         }
4364         /* TODO:
4365          * - Add checks on RIP
4366          * - Add checks on RFLAGS
4367          */
4368
4369         return true;
4370 }
4371
4372 static int init_rmode_tss(struct kvm *kvm)
4373 {
4374         gfn_t fn;
4375         u16 data = 0;
4376         int idx, r;
4377
4378         idx = srcu_read_lock(&kvm->srcu);
4379         fn = kvm->arch.tss_addr >> PAGE_SHIFT;
4380         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
4381         if (r < 0)
4382                 goto out;
4383         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
4384         r = kvm_write_guest_page(kvm, fn++, &data,
4385                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
4386         if (r < 0)
4387                 goto out;
4388         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
4389         if (r < 0)
4390                 goto out;
4391         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
4392         if (r < 0)
4393                 goto out;
4394         data = ~0;
4395         r = kvm_write_guest_page(kvm, fn, &data,
4396                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
4397                                  sizeof(u8));
4398 out:
4399         srcu_read_unlock(&kvm->srcu, idx);
4400         return r;
4401 }
4402
4403 static int init_rmode_identity_map(struct kvm *kvm)
4404 {
4405         int i, idx, r = 0;
4406         kvm_pfn_t identity_map_pfn;
4407         u32 tmp;
4408
4409         if (!enable_ept)
4410                 return 0;
4411
4412         /* Protect kvm->arch.ept_identity_pagetable_done. */
4413         mutex_lock(&kvm->slots_lock);
4414
4415         if (likely(kvm->arch.ept_identity_pagetable_done))
4416                 goto out2;
4417
4418         identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
4419
4420         r = alloc_identity_pagetable(kvm);
4421         if (r < 0)
4422                 goto out2;
4423
4424         idx = srcu_read_lock(&kvm->srcu);
4425         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
4426         if (r < 0)
4427                 goto out;
4428         /* Set up identity-mapping pagetable for EPT in real mode */
4429         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
4430                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
4431                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
4432                 r = kvm_write_guest_page(kvm, identity_map_pfn,
4433                                 &tmp, i * sizeof(tmp), sizeof(tmp));
4434                 if (r < 0)
4435                         goto out;
4436         }
4437         kvm->arch.ept_identity_pagetable_done = true;
4438
4439 out:
4440         srcu_read_unlock(&kvm->srcu, idx);
4441
4442 out2:
4443         mutex_unlock(&kvm->slots_lock);
4444         return r;
4445 }
4446
4447 static void seg_setup(int seg)
4448 {
4449         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4450         unsigned int ar;
4451
4452         vmcs_write16(sf->selector, 0);
4453         vmcs_writel(sf->base, 0);
4454         vmcs_write32(sf->limit, 0xffff);
4455         ar = 0x93;
4456         if (seg == VCPU_SREG_CS)
4457                 ar |= 0x08; /* code segment */
4458
4459         vmcs_write32(sf->ar_bytes, ar);
4460 }
4461
4462 static int alloc_apic_access_page(struct kvm *kvm)
4463 {
4464         struct page *page;
4465         int r = 0;
4466
4467         mutex_lock(&kvm->slots_lock);
4468         if (kvm->arch.apic_access_page_done)
4469                 goto out;
4470         r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
4471                                     APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
4472         if (r)
4473                 goto out;
4474
4475         page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
4476         if (is_error_page(page)) {
4477                 r = -EFAULT;
4478                 goto out;
4479         }
4480
4481         /*
4482          * Do not pin the page in memory, so that memory hot-unplug
4483          * is able to migrate it.
4484          */
4485         put_page(page);
4486         kvm->arch.apic_access_page_done = true;
4487 out:
4488         mutex_unlock(&kvm->slots_lock);
4489         return r;
4490 }
4491
4492 static int alloc_identity_pagetable(struct kvm *kvm)
4493 {
4494         /* Called with kvm->slots_lock held. */
4495
4496         int r = 0;
4497
4498         BUG_ON(kvm->arch.ept_identity_pagetable_done);
4499
4500         r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
4501                                     kvm->arch.ept_identity_map_addr, PAGE_SIZE);
4502
4503         return r;
4504 }
4505
4506 static int allocate_vpid(void)
4507 {
4508         int vpid;
4509
4510         if (!enable_vpid)
4511                 return 0;
4512         spin_lock(&vmx_vpid_lock);
4513         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
4514         if (vpid < VMX_NR_VPIDS)
4515                 __set_bit(vpid, vmx_vpid_bitmap);
4516         else
4517                 vpid = 0;
4518         spin_unlock(&vmx_vpid_lock);
4519         return vpid;
4520 }
4521
4522 static void free_vpid(int vpid)
4523 {
4524         if (!enable_vpid || vpid == 0)
4525                 return;
4526         spin_lock(&vmx_vpid_lock);
4527         __clear_bit(vpid, vmx_vpid_bitmap);
4528         spin_unlock(&vmx_vpid_lock);
4529 }
4530
4531 #define MSR_TYPE_R      1
4532 #define MSR_TYPE_W      2
4533 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
4534                                                 u32 msr, int type)
4535 {
4536         int f = sizeof(unsigned long);
4537
4538         if (!cpu_has_vmx_msr_bitmap())
4539                 return;
4540
4541         /*
4542          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
4543          * have the write-low and read-high bitmap offsets the wrong way round.
4544          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
4545          */
4546         if (msr <= 0x1fff) {
4547                 if (type & MSR_TYPE_R)
4548                         /* read-low */
4549                         __clear_bit(msr, msr_bitmap + 0x000 / f);
4550
4551                 if (type & MSR_TYPE_W)
4552                         /* write-low */
4553                         __clear_bit(msr, msr_bitmap + 0x800 / f);
4554
4555         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4556                 msr &= 0x1fff;
4557                 if (type & MSR_TYPE_R)
4558                         /* read-high */
4559                         __clear_bit(msr, msr_bitmap + 0x400 / f);
4560
4561                 if (type & MSR_TYPE_W)
4562                         /* write-high */
4563                         __clear_bit(msr, msr_bitmap + 0xc00 / f);
4564
4565         }
4566 }
4567
4568 static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
4569                                                 u32 msr, int type)
4570 {
4571         int f = sizeof(unsigned long);
4572
4573         if (!cpu_has_vmx_msr_bitmap())
4574                 return;
4575
4576         /*
4577          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
4578          * have the write-low and read-high bitmap offsets the wrong way round.
4579          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
4580          */
4581         if (msr <= 0x1fff) {
4582                 if (type & MSR_TYPE_R)
4583                         /* read-low */
4584                         __set_bit(msr, msr_bitmap + 0x000 / f);
4585
4586                 if (type & MSR_TYPE_W)
4587                         /* write-low */
4588                         __set_bit(msr, msr_bitmap + 0x800 / f);
4589
4590         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4591                 msr &= 0x1fff;
4592                 if (type & MSR_TYPE_R)
4593                         /* read-high */
4594                         __set_bit(msr, msr_bitmap + 0x400 / f);
4595
4596                 if (type & MSR_TYPE_W)
4597                         /* write-high */
4598                         __set_bit(msr, msr_bitmap + 0xc00 / f);
4599
4600         }
4601 }
4602
4603 /*
4604  * If a msr is allowed by L0, we should check whether it is allowed by L1.
4605  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
4606  */
4607 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
4608                                                unsigned long *msr_bitmap_nested,
4609                                                u32 msr, int type)
4610 {
4611         int f = sizeof(unsigned long);
4612
4613         if (!cpu_has_vmx_msr_bitmap()) {
4614                 WARN_ON(1);
4615                 return;
4616         }
4617
4618         /*
4619          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
4620          * have the write-low and read-high bitmap offsets the wrong way round.
4621          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
4622          */
4623         if (msr <= 0x1fff) {
4624                 if (type & MSR_TYPE_R &&
4625                    !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
4626                         /* read-low */
4627                         __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
4628
4629                 if (type & MSR_TYPE_W &&
4630                    !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
4631                         /* write-low */
4632                         __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
4633
4634         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4635                 msr &= 0x1fff;
4636                 if (type & MSR_TYPE_R &&
4637                    !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
4638                         /* read-high */
4639                         __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
4640
4641                 if (type & MSR_TYPE_W &&
4642                    !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
4643                         /* write-high */
4644                         __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
4645
4646         }
4647 }
4648
4649 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
4650 {
4651         if (!longmode_only)
4652                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
4653                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
4654         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
4655                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
4656 }
4657
4658 static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
4659 {
4660         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4661                         msr, MSR_TYPE_R);
4662         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4663                         msr, MSR_TYPE_R);
4664 }
4665
4666 static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
4667 {
4668         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4669                         msr, MSR_TYPE_R);
4670         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4671                         msr, MSR_TYPE_R);
4672 }
4673
4674 static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4675 {
4676         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4677                         msr, MSR_TYPE_W);
4678         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4679                         msr, MSR_TYPE_W);
4680 }
4681
4682 static bool vmx_get_enable_apicv(void)
4683 {
4684         return enable_apicv;
4685 }
4686
4687 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4688 {
4689         struct vcpu_vmx *vmx = to_vmx(vcpu);
4690         int max_irr;
4691         void *vapic_page;
4692         u16 status;
4693
4694         if (vmx->nested.pi_desc &&
4695             vmx->nested.pi_pending) {
4696                 vmx->nested.pi_pending = false;
4697                 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
4698                         return 0;
4699
4700                 max_irr = find_last_bit(
4701                         (unsigned long *)vmx->nested.pi_desc->pir, 256);
4702
4703                 if (max_irr == 256)
4704                         return 0;
4705
4706                 vapic_page = kmap(vmx->nested.virtual_apic_page);
4707                 if (!vapic_page) {
4708                         WARN_ON(1);
4709                         return -ENOMEM;
4710                 }
4711                 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
4712                 kunmap(vmx->nested.virtual_apic_page);
4713
4714                 status = vmcs_read16(GUEST_INTR_STATUS);
4715                 if ((u8)max_irr > ((u8)status & 0xff)) {
4716                         status &= ~0xff;
4717                         status |= (u8)max_irr;
4718                         vmcs_write16(GUEST_INTR_STATUS, status);
4719                 }
4720         }
4721         return 0;
4722 }
4723
4724 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
4725 {
4726 #ifdef CONFIG_SMP
4727         if (vcpu->mode == IN_GUEST_MODE) {
4728                 struct vcpu_vmx *vmx = to_vmx(vcpu);
4729
4730                 /*
4731                  * Currently, we don't support urgent interrupt,
4732                  * all interrupts are recognized as non-urgent
4733                  * interrupt, so we cannot post interrupts when
4734                  * 'SN' is set.
4735                  *
4736                  * If the vcpu is in guest mode, it means it is
4737                  * running instead of being scheduled out and
4738                  * waiting in the run queue, and that's the only
4739                  * case when 'SN' is set currently, warning if
4740                  * 'SN' is set.
4741                  */
4742                 WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
4743
4744                 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4745                                 POSTED_INTR_VECTOR);
4746                 return true;
4747         }
4748 #endif
4749         return false;
4750 }
4751
4752 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4753                                                 int vector)
4754 {
4755         struct vcpu_vmx *vmx = to_vmx(vcpu);
4756
4757         if (is_guest_mode(vcpu) &&
4758             vector == vmx->nested.posted_intr_nv) {
4759                 /* the PIR and ON have been set by L1. */
4760                 kvm_vcpu_trigger_posted_interrupt(vcpu);
4761                 /*
4762                  * If a posted intr is not recognized by hardware,
4763                  * we will accomplish it in the next vmentry.
4764                  */
4765                 vmx->nested.pi_pending = true;
4766                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4767                 return 0;
4768         }
4769         return -1;
4770 }
4771 /*
4772  * Send interrupt to vcpu via posted interrupt way.
4773  * 1. If target vcpu is running(non-root mode), send posted interrupt
4774  * notification to vcpu and hardware will sync PIR to vIRR atomically.
4775  * 2. If target vcpu isn't running(root mode), kick it to pick up the
4776  * interrupt from PIR in next vmentry.
4777  */
4778 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4779 {
4780         struct vcpu_vmx *vmx = to_vmx(vcpu);
4781         int r;
4782
4783         r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4784         if (!r)
4785                 return;
4786
4787         if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4788                 return;
4789
4790         r = pi_test_and_set_on(&vmx->pi_desc);
4791         kvm_make_request(KVM_REQ_EVENT, vcpu);
4792         if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
4793                 kvm_vcpu_kick(vcpu);
4794 }
4795
4796 static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
4797 {
4798         struct vcpu_vmx *vmx = to_vmx(vcpu);
4799
4800         if (!pi_test_and_clear_on(&vmx->pi_desc))
4801                 return;
4802
4803         kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
4804 }
4805
4806 /*
4807  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4808  * will not change in the lifetime of the guest.
4809  * Note that host-state that does change is set elsewhere. E.g., host-state
4810  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4811  */
4812 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4813 {
4814         u32 low32, high32;
4815         unsigned long tmpl;
4816         struct desc_ptr dt;
4817         unsigned long cr4;
4818
4819         vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
4820         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
4821
4822         /* Save the most likely value for this task's CR4 in the VMCS. */
4823         cr4 = cr4_read_shadow();
4824         vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
4825         vmx->host_state.vmcs_host_cr4 = cr4;
4826
4827         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4828 #ifdef CONFIG_X86_64
4829         /*
4830          * Load null selectors, so we can avoid reloading them in
4831          * __vmx_load_host_state(), in case userspace uses the null selectors
4832          * too (the expected case).
4833          */
4834         vmcs_write16(HOST_DS_SELECTOR, 0);
4835         vmcs_write16(HOST_ES_SELECTOR, 0);
4836 #else
4837         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4838         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4839 #endif
4840         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4841         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4842
4843         native_store_idt(&dt);
4844         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
4845         vmx->host_idt_base = dt.address;
4846
4847         vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
4848
4849         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4850         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4851         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4852         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4853
4854         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4855                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4856                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4857         }
4858 }
4859
4860 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4861 {
4862         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
4863         if (enable_ept)
4864                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
4865         if (is_guest_mode(&vmx->vcpu))
4866                 vmx->vcpu.arch.cr4_guest_owned_bits &=
4867                         ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
4868         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
4869 }
4870
4871 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4872 {
4873         u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4874
4875         if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4876                 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4877         /* Enable the preemption timer dynamically */
4878         pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4879         return pin_based_exec_ctrl;
4880 }
4881
4882 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4883 {
4884         struct vcpu_vmx *vmx = to_vmx(vcpu);
4885
4886         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4887         if (cpu_has_secondary_exec_ctrls()) {
4888                 if (kvm_vcpu_apicv_active(vcpu))
4889                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4890                                       SECONDARY_EXEC_APIC_REGISTER_VIRT |
4891                                       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4892                 else
4893                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
4894                                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
4895                                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4896         }
4897
4898         if (cpu_has_vmx_msr_bitmap())
4899                 vmx_set_msr_bitmap(vcpu);
4900 }
4901
4902 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4903 {
4904         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4905
4906         if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4907                 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4908
4909         if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
4910                 exec_control &= ~CPU_BASED_TPR_SHADOW;
4911 #ifdef CONFIG_X86_64
4912                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4913                                 CPU_BASED_CR8_LOAD_EXITING;
4914 #endif
4915         }
4916         if (!enable_ept)
4917                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
4918                                 CPU_BASED_CR3_LOAD_EXITING  |
4919                                 CPU_BASED_INVLPG_EXITING;
4920         return exec_control;
4921 }
4922
4923 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4924 {
4925         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4926         if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
4927                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4928         if (vmx->vpid == 0)
4929                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4930         if (!enable_ept) {
4931                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4932                 enable_unrestricted_guest = 0;
4933                 /* Enable INVPCID for non-ept guests may cause performance regression. */
4934                 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
4935         }
4936         if (!enable_unrestricted_guest)
4937                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4938         if (!ple_gap)
4939                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4940         if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4941                 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4942                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4943         exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4944         /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4945            (handle_vmptrld).
4946            We can NOT enable shadow_vmcs here because we don't have yet
4947            a current VMCS12
4948         */
4949         exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4950
4951         if (!enable_pml)
4952                 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4953
4954         /* Currently, we allow L1 guest to directly run pcommit instruction. */
4955         exec_control &= ~SECONDARY_EXEC_PCOMMIT;
4956
4957         return exec_control;
4958 }
4959
4960 static void ept_set_mmio_spte_mask(void)
4961 {
4962         /*
4963          * EPT Misconfigurations can be generated if the value of bits 2:0
4964          * of an EPT paging-structure entry is 110b (write/execute).
4965          * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
4966          * spte.
4967          */
4968         kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
4969 }
4970
4971 #define VMX_XSS_EXIT_BITMAP 0
4972 /*
4973  * Sets up the vmcs for emulated real mode.
4974  */
4975 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4976 {
4977 #ifdef CONFIG_X86_64
4978         unsigned long a;
4979 #endif
4980         int i;
4981
4982         /* I/O */
4983         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
4984         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
4985
4986         if (enable_shadow_vmcs) {
4987                 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
4988                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
4989         }
4990         if (cpu_has_vmx_msr_bitmap())
4991                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
4992
4993         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4994
4995         /* Control */
4996         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4997         vmx->hv_deadline_tsc = -1;
4998
4999         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
5000
5001         if (cpu_has_secondary_exec_ctrls())
5002                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
5003                                 vmx_secondary_exec_control(vmx));
5004
5005         if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
5006                 vmcs_write64(EOI_EXIT_BITMAP0, 0);
5007                 vmcs_write64(EOI_EXIT_BITMAP1, 0);
5008                 vmcs_write64(EOI_EXIT_BITMAP2, 0);
5009                 vmcs_write64(EOI_EXIT_BITMAP3, 0);
5010
5011                 vmcs_write16(GUEST_INTR_STATUS, 0);
5012
5013                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
5014                 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
5015         }
5016
5017         if (ple_gap) {
5018                 vmcs_write32(PLE_GAP, ple_gap);
5019                 vmx->ple_window = ple_window;
5020                 vmx->ple_window_dirty = true;
5021         }
5022
5023         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
5024         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
5025         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
5026
5027         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
5028         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
5029         vmx_set_constant_host_state(vmx);
5030 #ifdef CONFIG_X86_64
5031         rdmsrl(MSR_FS_BASE, a);
5032         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
5033         rdmsrl(MSR_GS_BASE, a);
5034         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
5035 #else
5036         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
5037         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
5038 #endif
5039
5040         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
5041         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
5042         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
5043         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
5044         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
5045
5046         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
5047                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
5048
5049         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
5050                 u32 index = vmx_msr_index[i];
5051                 u32 data_low, data_high;
5052                 int j = vmx->nmsrs;
5053
5054                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
5055                         continue;
5056                 if (wrmsr_safe(index, data_low, data_high) < 0)
5057                         continue;
5058                 vmx->guest_msrs[j].index = i;
5059                 vmx->guest_msrs[j].data = 0;
5060                 vmx->guest_msrs[j].mask = -1ull;
5061                 ++vmx->nmsrs;
5062         }
5063
5064
5065         vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
5066
5067         /* 22.2.1, 20.8.1 */
5068         vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
5069
5070         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
5071         set_cr4_guest_host_mask(vmx);
5072
5073         if (vmx_xsaves_supported())
5074                 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
5075
5076         return 0;
5077 }
5078
5079 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
5080 {
5081         struct vcpu_vmx *vmx = to_vmx(vcpu);
5082         struct msr_data apic_base_msr;
5083         u64 cr0;
5084
5085         vmx->rmode.vm86_active = 0;
5086
5087         vmx->soft_vnmi_blocked = 0;
5088
5089         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
5090         kvm_set_cr8(vcpu, 0);
5091
5092         if (!init_event) {
5093                 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
5094                                      MSR_IA32_APICBASE_ENABLE;
5095                 if (kvm_vcpu_is_reset_bsp(vcpu))
5096                         apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
5097                 apic_base_msr.host_initiated = true;
5098                 kvm_set_apic_base(vcpu, &apic_base_msr);
5099         }
5100
5101         vmx_segment_cache_clear(vmx);
5102
5103         seg_setup(VCPU_SREG_CS);
5104         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
5105         vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
5106
5107         seg_setup(VCPU_SREG_DS);
5108         seg_setup(VCPU_SREG_ES);
5109         seg_setup(VCPU_SREG_FS);
5110         seg_setup(VCPU_SREG_GS);
5111         seg_setup(VCPU_SREG_SS);
5112
5113         vmcs_write16(GUEST_TR_SELECTOR, 0);
5114         vmcs_writel(GUEST_TR_BASE, 0);
5115         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
5116         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
5117
5118         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
5119         vmcs_writel(GUEST_LDTR_BASE, 0);
5120         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
5121         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
5122
5123         if (!init_event) {
5124                 vmcs_write32(GUEST_SYSENTER_CS, 0);
5125                 vmcs_writel(GUEST_SYSENTER_ESP, 0);
5126                 vmcs_writel(GUEST_SYSENTER_EIP, 0);
5127                 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
5128         }
5129
5130         vmcs_writel(GUEST_RFLAGS, 0x02);
5131         kvm_rip_write(vcpu, 0xfff0);
5132
5133         vmcs_writel(GUEST_GDTR_BASE, 0);
5134         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
5135
5136         vmcs_writel(GUEST_IDTR_BASE, 0);
5137         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
5138
5139         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
5140         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
5141         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
5142
5143         setup_msrs(vmx);
5144
5145         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
5146
5147         if (cpu_has_vmx_tpr_shadow() && !init_event) {
5148                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
5149                 if (cpu_need_tpr_shadow(vcpu))
5150                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
5151                                      __pa(vcpu->arch.apic->regs));
5152                 vmcs_write32(TPR_THRESHOLD, 0);
5153         }
5154
5155         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
5156
5157         if (kvm_vcpu_apicv_active(vcpu))
5158                 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
5159
5160         if (vmx->vpid != 0)
5161                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
5162
5163         cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
5164         vmx->vcpu.arch.cr0 = cr0;
5165         vmx_set_cr0(vcpu, cr0); /* enter rmode */
5166         vmx_set_cr4(vcpu, 0);
5167         vmx_set_efer(vcpu, 0);
5168         vmx_fpu_activate(vcpu);
5169         update_exception_bitmap(vcpu);
5170
5171         vpid_sync_context(vmx->vpid);
5172 }
5173
5174 /*
5175  * In nested virtualization, check if L1 asked to exit on external interrupts.
5176  * For most existing hypervisors, this will always return true.
5177  */
5178 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
5179 {
5180         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
5181                 PIN_BASED_EXT_INTR_MASK;
5182 }
5183
5184 /*
5185  * In nested virtualization, check if L1 has set
5186  * VM_EXIT_ACK_INTR_ON_EXIT
5187  */
5188 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
5189 {
5190         return get_vmcs12(vcpu)->vm_exit_controls &
5191                 VM_EXIT_ACK_INTR_ON_EXIT;
5192 }
5193
5194 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
5195 {
5196         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
5197                 PIN_BASED_NMI_EXITING;
5198 }
5199
5200 static void enable_irq_window(struct kvm_vcpu *vcpu)
5201 {
5202         u32 cpu_based_vm_exec_control;
5203
5204         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5205         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
5206         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5207 }
5208
5209 static void enable_nmi_window(struct kvm_vcpu *vcpu)
5210 {
5211         u32 cpu_based_vm_exec_control;
5212
5213         if (!cpu_has_virtual_nmis() ||
5214             vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
5215                 enable_irq_window(vcpu);
5216                 return;
5217         }
5218
5219         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5220         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
5221         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5222 }
5223
5224 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
5225 {
5226         struct vcpu_vmx *vmx = to_vmx(vcpu);
5227         uint32_t intr;
5228         int irq = vcpu->arch.interrupt.nr;
5229
5230         trace_kvm_inj_virq(irq);
5231
5232         ++vcpu->stat.irq_injections;
5233         if (vmx->rmode.vm86_active) {
5234                 int inc_eip = 0;
5235                 if (vcpu->arch.interrupt.soft)
5236                         inc_eip = vcpu->arch.event_exit_inst_len;
5237                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
5238                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5239                 return;
5240         }
5241         intr = irq | INTR_INFO_VALID_MASK;
5242         if (vcpu->arch.interrupt.soft) {
5243                 intr |= INTR_TYPE_SOFT_INTR;
5244                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
5245                              vmx->vcpu.arch.event_exit_inst_len);
5246         } else
5247                 intr |= INTR_TYPE_EXT_INTR;
5248         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
5249 }
5250
5251 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5252 {
5253         struct vcpu_vmx *vmx = to_vmx(vcpu);
5254
5255         if (is_guest_mode(vcpu))
5256                 return;
5257
5258         if (!cpu_has_virtual_nmis()) {
5259                 /*
5260                  * Tracking the NMI-blocked state in software is built upon
5261                  * finding the next open IRQ window. This, in turn, depends on
5262                  * well-behaving guests: They have to keep IRQs disabled at
5263                  * least as long as the NMI handler runs. Otherwise we may
5264                  * cause NMI nesting, maybe breaking the guest. But as this is
5265                  * highly unlikely, we can live with the residual risk.
5266                  */
5267                 vmx->soft_vnmi_blocked = 1;
5268                 vmx->vnmi_blocked_time = 0;
5269         }
5270
5271         ++vcpu->stat.nmi_injections;
5272         vmx->nmi_known_unmasked = false;
5273         if (vmx->rmode.vm86_active) {
5274                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
5275                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5276                 return;
5277         }
5278         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
5279                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
5280 }
5281
5282 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5283 {
5284         if (!cpu_has_virtual_nmis())
5285                 return to_vmx(vcpu)->soft_vnmi_blocked;
5286         if (to_vmx(vcpu)->nmi_known_unmasked)
5287                 return false;
5288         return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5289 }
5290
5291 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5292 {
5293         struct vcpu_vmx *vmx = to_vmx(vcpu);
5294
5295         if (!cpu_has_virtual_nmis()) {
5296                 if (vmx->soft_vnmi_blocked != masked) {
5297                         vmx->soft_vnmi_blocked = masked;
5298                         vmx->vnmi_blocked_time = 0;
5299                 }
5300         } else {
5301                 vmx->nmi_known_unmasked = !masked;
5302                 if (masked)
5303                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5304                                       GUEST_INTR_STATE_NMI);
5305                 else
5306                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5307                                         GUEST_INTR_STATE_NMI);
5308         }
5309 }
5310
5311 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
5312 {
5313         if (to_vmx(vcpu)->nested.nested_run_pending)
5314                 return 0;
5315
5316         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
5317                 return 0;
5318
5319         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5320                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
5321                    | GUEST_INTR_STATE_NMI));
5322 }
5323
5324 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
5325 {
5326         return (!to_vmx(vcpu)->nested.nested_run_pending &&
5327                 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
5328                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5329                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5330 }
5331
5332 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5333 {
5334         int ret;
5335
5336         ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5337                                     PAGE_SIZE * 3);
5338         if (ret)
5339                 return ret;
5340         kvm->arch.tss_addr = addr;
5341         return init_rmode_tss(kvm);
5342 }
5343
5344 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5345 {
5346         switch (vec) {
5347         case BP_VECTOR:
5348                 /*
5349                  * Update instruction length as we may reinject the exception
5350                  * from user space while in guest debugging mode.
5351                  */
5352                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5353                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5354                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5355                         return false;
5356                 /* fall through */
5357         case DB_VECTOR:
5358                 if (vcpu->guest_debug &
5359                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5360                         return false;
5361                 /* fall through */
5362         case DE_VECTOR:
5363         case OF_VECTOR:
5364         case BR_VECTOR:
5365         case UD_VECTOR:
5366         case DF_VECTOR:
5367         case SS_VECTOR:
5368         case GP_VECTOR:
5369         case MF_VECTOR:
5370                 return true;
5371         break;
5372         }
5373         return false;
5374 }
5375
5376 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5377                                   int vec, u32 err_code)
5378 {
5379         /*
5380          * Instruction with address size override prefix opcode 0x67
5381          * Cause the #SS fault with 0 error code in VM86 mode.
5382          */
5383         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5384                 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
5385                         if (vcpu->arch.halt_request) {
5386                                 vcpu->arch.halt_request = 0;
5387                                 return kvm_vcpu_halt(vcpu);
5388                         }
5389                         return 1;
5390                 }
5391                 return 0;
5392         }
5393
5394         /*
5395          * Forward all other exceptions that are valid in real mode.
5396          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5397          *        the required debugging infrastructure rework.
5398          */
5399         kvm_queue_exception(vcpu, vec);
5400         return 1;
5401 }
5402
5403 /*
5404  * Trigger machine check on the host. We assume all the MSRs are already set up
5405  * by the CPU and that we still run on the same CPU as the MCE occurred on.
5406  * We pass a fake environment to the machine check handler because we want
5407  * the guest to be always treated like user space, no matter what context
5408  * it used internally.
5409  */
5410 static void kvm_machine_check(void)
5411 {
5412 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
5413         struct pt_regs regs = {
5414                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
5415                 .flags = X86_EFLAGS_IF,
5416         };
5417
5418         do_machine_check(&regs, 0);
5419 #endif
5420 }
5421
5422 static int handle_machine_check(struct kvm_vcpu *vcpu)
5423 {
5424         /* already handled by vcpu_run */
5425         return 1;
5426 }
5427
5428 static int handle_exception(struct kvm_vcpu *vcpu)
5429 {
5430         struct vcpu_vmx *vmx = to_vmx(vcpu);
5431         struct kvm_run *kvm_run = vcpu->run;
5432         u32 intr_info, ex_no, error_code;
5433         unsigned long cr2, rip, dr6;
5434         u32 vect_info;
5435         enum emulation_result er;
5436
5437         vect_info = vmx->idt_vectoring_info;
5438         intr_info = vmx->exit_intr_info;
5439
5440         if (is_machine_check(intr_info))
5441                 return handle_machine_check(vcpu);
5442
5443         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
5444                 return 1;  /* already handled by vmx_vcpu_run() */
5445
5446         if (is_no_device(intr_info)) {
5447                 vmx_fpu_activate(vcpu);
5448                 return 1;
5449         }
5450
5451         if (is_invalid_opcode(intr_info)) {
5452                 if (is_guest_mode(vcpu)) {
5453                         kvm_queue_exception(vcpu, UD_VECTOR);
5454                         return 1;
5455                 }
5456                 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
5457                 if (er != EMULATE_DONE)
5458                         kvm_queue_exception(vcpu, UD_VECTOR);
5459                 return 1;
5460         }
5461
5462         error_code = 0;
5463         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5464                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5465
5466         /*
5467          * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5468          * MMIO, it is better to report an internal error.
5469          * See the comments in vmx_handle_exit.
5470          */
5471         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5472             !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5473                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5474                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5475                 vcpu->run->internal.ndata = 3;
5476                 vcpu->run->internal.data[0] = vect_info;
5477                 vcpu->run->internal.data[1] = intr_info;
5478                 vcpu->run->internal.data[2] = error_code;
5479                 return 0;
5480         }
5481
5482         if (is_page_fault(intr_info)) {
5483                 /* EPT won't cause page fault directly */
5484                 BUG_ON(enable_ept);
5485                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
5486                 trace_kvm_page_fault(cr2, error_code);
5487
5488                 if (kvm_event_needs_reinjection(vcpu))
5489                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
5490                 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
5491         }
5492
5493         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5494
5495         if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5496                 return handle_rmode_exception(vcpu, ex_no, error_code);
5497
5498         switch (ex_no) {
5499         case AC_VECTOR:
5500                 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5501                 return 1;
5502         case DB_VECTOR:
5503                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
5504                 if (!(vcpu->guest_debug &
5505                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5506                         vcpu->arch.dr6 &= ~15;
5507                         vcpu->arch.dr6 |= dr6 | DR6_RTM;
5508                         if (!(dr6 & ~DR6_RESERVED)) /* icebp */
5509                                 skip_emulated_instruction(vcpu);
5510
5511                         kvm_queue_exception(vcpu, DB_VECTOR);
5512                         return 1;
5513                 }
5514                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
5515                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5516                 /* fall through */
5517         case BP_VECTOR:
5518                 /*
5519                  * Update instruction length as we may reinject #BP from
5520                  * user space while in guest debugging mode. Reading it for
5521                  * #DB as well causes no harm, it is not used in that case.
5522                  */
5523                 vmx->vcpu.arch.event_exit_inst_len =
5524                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5525                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5526                 rip = kvm_rip_read(vcpu);
5527                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
5528                 kvm_run->debug.arch.exception = ex_no;
5529                 break;
5530         default:
5531                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5532                 kvm_run->ex.exception = ex_no;
5533                 kvm_run->ex.error_code = error_code;
5534                 break;
5535         }
5536         return 0;
5537 }
5538
5539 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
5540 {
5541         ++vcpu->stat.irq_exits;
5542         return 1;
5543 }
5544
5545 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5546 {
5547         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5548         return 0;
5549 }
5550
5551 static int handle_io(struct kvm_vcpu *vcpu)
5552 {
5553         unsigned long exit_qualification;
5554         int size, in, string;
5555         unsigned port;
5556
5557         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5558         string = (exit_qualification & 16) != 0;
5559         in = (exit_qualification & 8) != 0;
5560
5561         ++vcpu->stat.io_exits;
5562
5563         if (string || in)
5564                 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5565
5566         port = exit_qualification >> 16;
5567         size = (exit_qualification & 7) + 1;
5568         skip_emulated_instruction(vcpu);
5569
5570         return kvm_fast_pio_out(vcpu, size, port);
5571 }
5572
5573 static void
5574 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5575 {
5576         /*
5577          * Patch in the VMCALL instruction:
5578          */
5579         hypercall[0] = 0x0f;
5580         hypercall[1] = 0x01;
5581         hypercall[2] = 0xc1;
5582 }
5583
5584 static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5585 {
5586         unsigned long always_on = VMXON_CR0_ALWAYSON;
5587         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5588
5589         if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
5590                 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5591             nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5592                 always_on &= ~(X86_CR0_PE | X86_CR0_PG);
5593         return (val & always_on) == always_on;
5594 }
5595
5596 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5597 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5598 {
5599         if (is_guest_mode(vcpu)) {
5600                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5601                 unsigned long orig_val = val;
5602
5603                 /*
5604                  * We get here when L2 changed cr0 in a way that did not change
5605                  * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5606                  * but did change L0 shadowed bits. So we first calculate the
5607                  * effective cr0 value that L1 would like to write into the
5608                  * hardware. It consists of the L2-owned bits from the new
5609                  * value combined with the L1-owned bits from L1's guest_cr0.
5610                  */
5611                 val = (val & ~vmcs12->cr0_guest_host_mask) |
5612                         (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5613
5614                 if (!nested_cr0_valid(vcpu, val))
5615                         return 1;
5616
5617                 if (kvm_set_cr0(vcpu, val))
5618                         return 1;
5619                 vmcs_writel(CR0_READ_SHADOW, orig_val);
5620                 return 0;
5621         } else {
5622                 if (to_vmx(vcpu)->nested.vmxon &&
5623                     ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
5624                         return 1;
5625                 return kvm_set_cr0(vcpu, val);
5626         }
5627 }
5628
5629 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5630 {
5631         if (is_guest_mode(vcpu)) {
5632                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5633                 unsigned long orig_val = val;
5634
5635                 /* analogously to handle_set_cr0 */
5636                 val = (val & ~vmcs12->cr4_guest_host_mask) |
5637                         (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5638                 if (kvm_set_cr4(vcpu, val))
5639                         return 1;
5640                 vmcs_writel(CR4_READ_SHADOW, orig_val);
5641                 return 0;
5642         } else
5643                 return kvm_set_cr4(vcpu, val);
5644 }
5645
5646 /* called to set cr0 as appropriate for clts instruction exit. */
5647 static void handle_clts(struct kvm_vcpu *vcpu)
5648 {
5649         if (is_guest_mode(vcpu)) {
5650                 /*
5651                  * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
5652                  * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
5653                  * just pretend it's off (also in arch.cr0 for fpu_activate).
5654                  */
5655                 vmcs_writel(CR0_READ_SHADOW,
5656                         vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
5657                 vcpu->arch.cr0 &= ~X86_CR0_TS;
5658         } else
5659                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
5660 }
5661
5662 static int handle_cr(struct kvm_vcpu *vcpu)
5663 {
5664         unsigned long exit_qualification, val;
5665         int cr;
5666         int reg;
5667         int err;
5668
5669         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5670         cr = exit_qualification & 15;
5671         reg = (exit_qualification >> 8) & 15;
5672         switch ((exit_qualification >> 4) & 3) {
5673         case 0: /* mov to cr */
5674                 val = kvm_register_readl(vcpu, reg);
5675                 trace_kvm_cr_write(cr, val);
5676                 switch (cr) {
5677                 case 0:
5678                         err = handle_set_cr0(vcpu, val);
5679                         kvm_complete_insn_gp(vcpu, err);
5680                         return 1;
5681                 case 3:
5682                         err = kvm_set_cr3(vcpu, val);
5683                         kvm_complete_insn_gp(vcpu, err);
5684                         return 1;
5685                 case 4:
5686                         err = handle_set_cr4(vcpu, val);
5687                         kvm_complete_insn_gp(vcpu, err);
5688                         return 1;
5689                 case 8: {
5690                                 u8 cr8_prev = kvm_get_cr8(vcpu);
5691                                 u8 cr8 = (u8)val;
5692                                 err = kvm_set_cr8(vcpu, cr8);
5693                                 kvm_complete_insn_gp(vcpu, err);
5694                                 if (lapic_in_kernel(vcpu))
5695                                         return 1;
5696                                 if (cr8_prev <= cr8)
5697                                         return 1;
5698                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5699                                 return 0;
5700                         }
5701                 }
5702                 break;
5703         case 2: /* clts */
5704                 handle_clts(vcpu);
5705                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
5706                 skip_emulated_instruction(vcpu);
5707                 vmx_fpu_activate(vcpu);
5708                 return 1;
5709         case 1: /*mov from cr*/
5710                 switch (cr) {
5711                 case 3:
5712                         val = kvm_read_cr3(vcpu);
5713                         kvm_register_write(vcpu, reg, val);
5714                         trace_kvm_cr_read(cr, val);
5715                         skip_emulated_instruction(vcpu);
5716                         return 1;
5717                 case 8:
5718                         val = kvm_get_cr8(vcpu);
5719                         kvm_register_write(vcpu, reg, val);
5720                         trace_kvm_cr_read(cr, val);
5721                         skip_emulated_instruction(vcpu);
5722                         return 1;
5723                 }
5724                 break;
5725         case 3: /* lmsw */
5726                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5727                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5728                 kvm_lmsw(vcpu, val);
5729
5730                 skip_emulated_instruction(vcpu);
5731                 return 1;
5732         default:
5733                 break;
5734         }
5735         vcpu->run->exit_reason = 0;
5736         vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5737                (int)(exit_qualification >> 4) & 3, cr);
5738         return 0;
5739 }
5740
5741 static int handle_dr(struct kvm_vcpu *vcpu)
5742 {
5743         unsigned long exit_qualification;
5744         int dr, dr7, reg;
5745
5746         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5747         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5748
5749         /* First, if DR does not exist, trigger UD */
5750         if (!kvm_require_dr(vcpu, dr))
5751                 return 1;
5752
5753         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
5754         if (!kvm_require_cpl(vcpu, 0))
5755                 return 1;
5756         dr7 = vmcs_readl(GUEST_DR7);
5757         if (dr7 & DR7_GD) {
5758                 /*
5759                  * As the vm-exit takes precedence over the debug trap, we
5760                  * need to emulate the latter, either for the host or the
5761                  * guest debugging itself.
5762                  */
5763                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5764                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
5765                         vcpu->run->debug.arch.dr7 = dr7;
5766                         vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5767                         vcpu->run->debug.arch.exception = DB_VECTOR;
5768                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5769                         return 0;
5770                 } else {
5771                         vcpu->arch.dr6 &= ~15;
5772                         vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
5773                         kvm_queue_exception(vcpu, DB_VECTOR);
5774                         return 1;
5775                 }
5776         }
5777
5778         if (vcpu->guest_debug == 0) {
5779                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
5780                                 CPU_BASED_MOV_DR_EXITING);
5781
5782                 /*
5783                  * No more DR vmexits; force a reload of the debug registers
5784                  * and reenter on this instruction.  The next vmexit will
5785                  * retrieve the full state of the debug registers.
5786                  */
5787                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5788                 return 1;
5789         }
5790
5791         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5792         if (exit_qualification & TYPE_MOV_FROM_DR) {
5793                 unsigned long val;
5794
5795                 if (kvm_get_dr(vcpu, dr, &val))
5796                         return 1;
5797                 kvm_register_write(vcpu, reg, val);
5798         } else
5799                 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
5800                         return 1;
5801
5802         skip_emulated_instruction(vcpu);
5803         return 1;
5804 }
5805
5806 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
5807 {
5808         return vcpu->arch.dr6;
5809 }
5810
5811 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
5812 {
5813 }
5814
5815 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5816 {
5817         get_debugreg(vcpu->arch.db[0], 0);
5818         get_debugreg(vcpu->arch.db[1], 1);
5819         get_debugreg(vcpu->arch.db[2], 2);
5820         get_debugreg(vcpu->arch.db[3], 3);
5821         get_debugreg(vcpu->arch.dr6, 6);
5822         vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5823
5824         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5825         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
5826 }
5827
5828 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5829 {
5830         vmcs_writel(GUEST_DR7, val);
5831 }
5832
5833 static int handle_cpuid(struct kvm_vcpu *vcpu)
5834 {
5835         kvm_emulate_cpuid(vcpu);
5836         return 1;
5837 }
5838
5839 static int handle_rdmsr(struct kvm_vcpu *vcpu)
5840 {
5841         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5842         struct msr_data msr_info;
5843
5844         msr_info.index = ecx;
5845         msr_info.host_initiated = false;
5846         if (vmx_get_msr(vcpu, &msr_info)) {
5847                 trace_kvm_msr_read_ex(ecx);
5848                 kvm_inject_gp(vcpu, 0);
5849                 return 1;
5850         }
5851
5852         trace_kvm_msr_read(ecx, msr_info.data);
5853
5854         /* FIXME: handling of bits 32:63 of rax, rdx */
5855         vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
5856         vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
5857         skip_emulated_instruction(vcpu);
5858         return 1;
5859 }
5860
5861 static int handle_wrmsr(struct kvm_vcpu *vcpu)
5862 {
5863         struct msr_data msr;
5864         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5865         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
5866                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
5867
5868         msr.data = data;
5869         msr.index = ecx;
5870         msr.host_initiated = false;
5871         if (kvm_set_msr(vcpu, &msr) != 0) {
5872                 trace_kvm_msr_write_ex(ecx, data);
5873                 kvm_inject_gp(vcpu, 0);
5874                 return 1;
5875         }
5876
5877         trace_kvm_msr_write(ecx, data);
5878         skip_emulated_instruction(vcpu);
5879         return 1;
5880 }
5881
5882 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5883 {
5884         kvm_make_request(KVM_REQ_EVENT, vcpu);
5885         return 1;
5886 }
5887
5888 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5889 {
5890         u32 cpu_based_vm_exec_control;
5891
5892         /* clear pending irq */
5893         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5894         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
5895         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5896
5897         kvm_make_request(KVM_REQ_EVENT, vcpu);
5898
5899         ++vcpu->stat.irq_window_exits;
5900         return 1;
5901 }
5902
5903 static int handle_halt(struct kvm_vcpu *vcpu)
5904 {
5905         return kvm_emulate_halt(vcpu);
5906 }
5907
5908 static int handle_vmcall(struct kvm_vcpu *vcpu)
5909 {
5910         return kvm_emulate_hypercall(vcpu);
5911 }
5912
5913 static int handle_invd(struct kvm_vcpu *vcpu)
5914 {
5915         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5916 }
5917
5918 static int handle_invlpg(struct kvm_vcpu *vcpu)
5919 {
5920         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5921
5922         kvm_mmu_invlpg(vcpu, exit_qualification);
5923         skip_emulated_instruction(vcpu);
5924         return 1;
5925 }
5926
5927 static int handle_rdpmc(struct kvm_vcpu *vcpu)
5928 {
5929         int err;
5930
5931         err = kvm_rdpmc(vcpu);
5932         kvm_complete_insn_gp(vcpu, err);
5933
5934         return 1;
5935 }
5936
5937 static int handle_wbinvd(struct kvm_vcpu *vcpu)
5938 {
5939         kvm_emulate_wbinvd(vcpu);
5940         return 1;
5941 }
5942
5943 static int handle_xsetbv(struct kvm_vcpu *vcpu)
5944 {
5945         u64 new_bv = kvm_read_edx_eax(vcpu);
5946         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5947
5948         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
5949                 skip_emulated_instruction(vcpu);
5950         return 1;
5951 }
5952
5953 static int handle_xsaves(struct kvm_vcpu *vcpu)
5954 {
5955         skip_emulated_instruction(vcpu);
5956         WARN(1, "this should never happen\n");
5957         return 1;
5958 }
5959
5960 static int handle_xrstors(struct kvm_vcpu *vcpu)
5961 {
5962         skip_emulated_instruction(vcpu);
5963         WARN(1, "this should never happen\n");
5964         return 1;
5965 }
5966
5967 static int handle_apic_access(struct kvm_vcpu *vcpu)
5968 {
5969         if (likely(fasteoi)) {
5970                 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5971                 int access_type, offset;
5972
5973                 access_type = exit_qualification & APIC_ACCESS_TYPE;
5974                 offset = exit_qualification & APIC_ACCESS_OFFSET;
5975                 /*
5976                  * Sane guest uses MOV to write EOI, with written value
5977                  * not cared. So make a short-circuit here by avoiding
5978                  * heavy instruction emulation.
5979                  */
5980                 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5981                     (offset == APIC_EOI)) {
5982                         kvm_lapic_set_eoi(vcpu);
5983                         skip_emulated_instruction(vcpu);
5984                         return 1;
5985                 }
5986         }
5987         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5988 }
5989
5990 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5991 {
5992         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5993         int vector = exit_qualification & 0xff;
5994
5995         /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5996         kvm_apic_set_eoi_accelerated(vcpu, vector);
5997         return 1;
5998 }
5999
6000 static int handle_apic_write(struct kvm_vcpu *vcpu)
6001 {
6002         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6003         u32 offset = exit_qualification & 0xfff;
6004
6005         /* APIC-write VM exit is trap-like and thus no need to adjust IP */
6006         kvm_apic_write_nodecode(vcpu, offset);
6007         return 1;
6008 }
6009
6010 static int handle_task_switch(struct kvm_vcpu *vcpu)
6011 {
6012         struct vcpu_vmx *vmx = to_vmx(vcpu);
6013         unsigned long exit_qualification;
6014         bool has_error_code = false;
6015         u32 error_code = 0;
6016         u16 tss_selector;
6017         int reason, type, idt_v, idt_index;
6018
6019         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
6020         idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
6021         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
6022
6023         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6024
6025         reason = (u32)exit_qualification >> 30;
6026         if (reason == TASK_SWITCH_GATE && idt_v) {
6027                 switch (type) {
6028                 case INTR_TYPE_NMI_INTR:
6029                         vcpu->arch.nmi_injected = false;
6030                         vmx_set_nmi_mask(vcpu, true);
6031                         break;
6032                 case INTR_TYPE_EXT_INTR:
6033                 case INTR_TYPE_SOFT_INTR:
6034                         kvm_clear_interrupt_queue(vcpu);
6035                         break;
6036                 case INTR_TYPE_HARD_EXCEPTION:
6037                         if (vmx->idt_vectoring_info &
6038                             VECTORING_INFO_DELIVER_CODE_MASK) {
6039                                 has_error_code = true;
6040                                 error_code =
6041                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
6042                         }
6043                         /* fall through */
6044                 case INTR_TYPE_SOFT_EXCEPTION:
6045                         kvm_clear_exception_queue(vcpu);
6046                         break;
6047                 default:
6048                         break;
6049                 }
6050         }
6051         tss_selector = exit_qualification;
6052
6053         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
6054                        type != INTR_TYPE_EXT_INTR &&
6055                        type != INTR_TYPE_NMI_INTR))
6056                 skip_emulated_instruction(vcpu);
6057
6058         if (kvm_task_switch(vcpu, tss_selector,
6059                             type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
6060                             has_error_code, error_code) == EMULATE_FAIL) {
6061                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6062                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
6063                 vcpu->run->internal.ndata = 0;
6064                 return 0;
6065         }
6066
6067         /*
6068          * TODO: What about debug traps on tss switch?
6069          *       Are we supposed to inject them and update dr6?
6070          */
6071
6072         return 1;
6073 }
6074
6075 static int handle_ept_violation(struct kvm_vcpu *vcpu)
6076 {
6077         unsigned long exit_qualification;
6078         gpa_t gpa;
6079         u32 error_code;
6080         int gla_validity;
6081
6082         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6083
6084         gla_validity = (exit_qualification >> 7) & 0x3;
6085         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
6086                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
6087                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
6088                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
6089                         vmcs_readl(GUEST_LINEAR_ADDRESS));
6090                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
6091                         (long unsigned int)exit_qualification);
6092                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
6093                 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
6094                 return 0;
6095         }
6096
6097         /*
6098          * EPT violation happened while executing iret from NMI,
6099          * "blocked by NMI" bit has to be set before next VM entry.
6100          * There are errata that may cause this bit to not be set:
6101          * AAK134, BY25.
6102          */
6103         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
6104                         cpu_has_virtual_nmis() &&
6105                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
6106                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
6107
6108         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6109         trace_kvm_page_fault(gpa, exit_qualification);
6110
6111         /* It is a write fault? */
6112         error_code = exit_qualification & PFERR_WRITE_MASK;
6113         /* It is a fetch fault? */
6114         error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
6115         /* ept page table is present? */
6116         error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
6117
6118         vcpu->arch.exit_qualification = exit_qualification;
6119
6120         return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
6121 }
6122
6123 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
6124 {
6125         int ret;
6126         gpa_t gpa;
6127
6128         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6129         if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
6130                 skip_emulated_instruction(vcpu);
6131                 trace_kvm_fast_mmio(gpa);
6132                 return 1;
6133         }
6134
6135         ret = handle_mmio_page_fault(vcpu, gpa, true);
6136         if (likely(ret == RET_MMIO_PF_EMULATE))
6137                 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
6138                                               EMULATE_DONE;
6139
6140         if (unlikely(ret == RET_MMIO_PF_INVALID))
6141                 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
6142
6143         if (unlikely(ret == RET_MMIO_PF_RETRY))
6144                 return 1;
6145
6146         /* It is the real ept misconfig */
6147         WARN_ON(1);
6148
6149         vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
6150         vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
6151
6152         return 0;
6153 }
6154
6155 static int handle_nmi_window(struct kvm_vcpu *vcpu)
6156 {
6157         u32 cpu_based_vm_exec_control;
6158
6159         /* clear pending NMI */
6160         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6161         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
6162         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
6163         ++vcpu->stat.nmi_window_exits;
6164         kvm_make_request(KVM_REQ_EVENT, vcpu);
6165
6166         return 1;
6167 }
6168
6169 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
6170 {
6171         struct vcpu_vmx *vmx = to_vmx(vcpu);
6172         enum emulation_result err = EMULATE_DONE;
6173         int ret = 1;
6174         u32 cpu_exec_ctrl;
6175         bool intr_window_requested;
6176         unsigned count = 130;
6177
6178         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6179         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
6180
6181         while (vmx->emulation_required && count-- != 0) {
6182                 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
6183                         return handle_interrupt_window(&vmx->vcpu);
6184
6185                 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
6186                         return 1;
6187
6188                 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
6189
6190                 if (err == EMULATE_USER_EXIT) {
6191                         ++vcpu->stat.mmio_exits;
6192                         ret = 0;
6193                         goto out;
6194                 }
6195
6196                 if (err != EMULATE_DONE) {
6197                         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6198                         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
6199                         vcpu->run->internal.ndata = 0;
6200                         return 0;
6201                 }
6202
6203                 if (vcpu->arch.halt_request) {
6204                         vcpu->arch.halt_request = 0;
6205                         ret = kvm_vcpu_halt(vcpu);
6206                         goto out;
6207                 }
6208
6209                 if (signal_pending(current))
6210                         goto out;
6211                 if (need_resched())
6212                         schedule();
6213         }
6214
6215 out:
6216         return ret;
6217 }
6218
6219 static int __grow_ple_window(int val)
6220 {
6221         if (ple_window_grow < 1)
6222                 return ple_window;
6223
6224         val = min(val, ple_window_actual_max);
6225
6226         if (ple_window_grow < ple_window)
6227                 val *= ple_window_grow;
6228         else
6229                 val += ple_window_grow;
6230
6231         return val;
6232 }
6233
6234 static int __shrink_ple_window(int val, int modifier, int minimum)
6235 {
6236         if (modifier < 1)
6237                 return ple_window;
6238
6239         if (modifier < ple_window)
6240                 val /= modifier;
6241         else
6242                 val -= modifier;
6243
6244         return max(val, minimum);
6245 }
6246
6247 static void grow_ple_window(struct kvm_vcpu *vcpu)
6248 {
6249         struct vcpu_vmx *vmx = to_vmx(vcpu);
6250         int old = vmx->ple_window;
6251
6252         vmx->ple_window = __grow_ple_window(old);
6253
6254         if (vmx->ple_window != old)
6255                 vmx->ple_window_dirty = true;
6256
6257         trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
6258 }
6259
6260 static void shrink_ple_window(struct kvm_vcpu *vcpu)
6261 {
6262         struct vcpu_vmx *vmx = to_vmx(vcpu);
6263         int old = vmx->ple_window;
6264
6265         vmx->ple_window = __shrink_ple_window(old,
6266                                               ple_window_shrink, ple_window);
6267
6268         if (vmx->ple_window != old)
6269                 vmx->ple_window_dirty = true;
6270
6271         trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
6272 }
6273
6274 /*
6275  * ple_window_actual_max is computed to be one grow_ple_window() below
6276  * ple_window_max. (See __grow_ple_window for the reason.)
6277  * This prevents overflows, because ple_window_max is int.
6278  * ple_window_max effectively rounded down to a multiple of ple_window_grow in
6279  * this process.
6280  * ple_window_max is also prevented from setting vmx->ple_window < ple_window.
6281  */
6282 static void update_ple_window_actual_max(void)
6283 {
6284         ple_window_actual_max =
6285                         __shrink_ple_window(max(ple_window_max, ple_window),
6286                                             ple_window_grow, INT_MIN);
6287 }
6288
6289 /*
6290  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
6291  */
6292 static void wakeup_handler(void)
6293 {
6294         struct kvm_vcpu *vcpu;
6295         int cpu = smp_processor_id();
6296
6297         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
6298         list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
6299                         blocked_vcpu_list) {
6300                 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
6301
6302                 if (pi_test_on(pi_desc) == 1)
6303                         kvm_vcpu_kick(vcpu);
6304         }
6305         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
6306 }
6307
6308 static __init int hardware_setup(void)
6309 {
6310         int r = -ENOMEM, i, msr;
6311
6312         rdmsrl_safe(MSR_EFER, &host_efer);
6313
6314         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
6315                 kvm_define_shared_msr(i, vmx_msr_index[i]);
6316
6317         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
6318         if (!vmx_io_bitmap_a)
6319                 return r;
6320
6321         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
6322         if (!vmx_io_bitmap_b)
6323                 goto out;
6324
6325         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
6326         if (!vmx_msr_bitmap_legacy)
6327                 goto out1;
6328
6329         vmx_msr_bitmap_legacy_x2apic =
6330                                 (unsigned long *)__get_free_page(GFP_KERNEL);
6331         if (!vmx_msr_bitmap_legacy_x2apic)
6332                 goto out2;
6333
6334         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
6335         if (!vmx_msr_bitmap_longmode)
6336                 goto out3;
6337
6338         vmx_msr_bitmap_longmode_x2apic =
6339                                 (unsigned long *)__get_free_page(GFP_KERNEL);
6340         if (!vmx_msr_bitmap_longmode_x2apic)
6341                 goto out4;
6342
6343         if (nested) {
6344                 vmx_msr_bitmap_nested =
6345                         (unsigned long *)__get_free_page(GFP_KERNEL);
6346                 if (!vmx_msr_bitmap_nested)
6347                         goto out5;
6348         }
6349
6350         vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
6351         if (!vmx_vmread_bitmap)
6352                 goto out6;
6353
6354         vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
6355         if (!vmx_vmwrite_bitmap)
6356                 goto out7;
6357
6358         memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
6359         memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
6360
6361         /*
6362          * Allow direct access to the PC debug port (it is often used for I/O
6363          * delays, but the vmexits simply slow things down).
6364          */
6365         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
6366         clear_bit(0x80, vmx_io_bitmap_a);
6367
6368         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
6369
6370         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
6371         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
6372         if (nested)
6373                 memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
6374
6375         if (setup_vmcs_config(&vmcs_config) < 0) {
6376                 r = -EIO;
6377                 goto out8;
6378         }
6379
6380         if (boot_cpu_has(X86_FEATURE_NX))
6381                 kvm_enable_efer_bits(EFER_NX);
6382
6383         if (!cpu_has_vmx_vpid())
6384                 enable_vpid = 0;
6385         if (!cpu_has_vmx_shadow_vmcs())
6386                 enable_shadow_vmcs = 0;
6387         if (enable_shadow_vmcs)
6388                 init_vmcs_shadow_fields();
6389
6390         if (!cpu_has_vmx_ept() ||
6391             !cpu_has_vmx_ept_4levels()) {
6392                 enable_ept = 0;
6393                 enable_unrestricted_guest = 0;
6394                 enable_ept_ad_bits = 0;
6395         }
6396
6397         if (!cpu_has_vmx_ept_ad_bits())
6398                 enable_ept_ad_bits = 0;
6399
6400         if (!cpu_has_vmx_unrestricted_guest())
6401                 enable_unrestricted_guest = 0;
6402
6403         if (!cpu_has_vmx_flexpriority())
6404                 flexpriority_enabled = 0;
6405
6406         /*
6407          * set_apic_access_page_addr() is used to reload apic access
6408          * page upon invalidation.  No need to do anything if not
6409          * using the APIC_ACCESS_ADDR VMCS field.
6410          */
6411         if (!flexpriority_enabled)
6412                 kvm_x86_ops->set_apic_access_page_addr = NULL;
6413
6414         if (!cpu_has_vmx_tpr_shadow())
6415                 kvm_x86_ops->update_cr8_intercept = NULL;
6416
6417         if (enable_ept && !cpu_has_vmx_ept_2m_page())
6418                 kvm_disable_largepages();
6419
6420         if (!cpu_has_vmx_ple())
6421                 ple_gap = 0;
6422
6423         if (!cpu_has_vmx_apicv())
6424                 enable_apicv = 0;
6425
6426         if (cpu_has_vmx_tsc_scaling()) {
6427                 kvm_has_tsc_control = true;
6428                 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
6429                 kvm_tsc_scaling_ratio_frac_bits = 48;
6430         }
6431
6432         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
6433         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
6434         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
6435         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
6436         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
6437         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
6438         vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
6439
6440         memcpy(vmx_msr_bitmap_legacy_x2apic,
6441                         vmx_msr_bitmap_legacy, PAGE_SIZE);
6442         memcpy(vmx_msr_bitmap_longmode_x2apic,
6443                         vmx_msr_bitmap_longmode, PAGE_SIZE);
6444
6445         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
6446
6447         for (msr = 0x800; msr <= 0x8ff; msr++)
6448                 vmx_disable_intercept_msr_read_x2apic(msr);
6449
6450         /* According SDM, in x2apic mode, the whole id reg is used.  But in
6451          * KVM, it only use the highest eight bits. Need to intercept it */
6452         vmx_enable_intercept_msr_read_x2apic(0x802);
6453         /* TMCCT */
6454         vmx_enable_intercept_msr_read_x2apic(0x839);
6455         /* TPR */
6456         vmx_disable_intercept_msr_write_x2apic(0x808);
6457         /* EOI */
6458         vmx_disable_intercept_msr_write_x2apic(0x80b);
6459         /* SELF-IPI */
6460         vmx_disable_intercept_msr_write_x2apic(0x83f);
6461
6462         if (enable_ept) {
6463                 kvm_mmu_set_mask_ptes(0ull,
6464                         (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
6465                         (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
6466                         0ull, VMX_EPT_EXECUTABLE_MASK);
6467                 ept_set_mmio_spte_mask();
6468                 kvm_enable_tdp();
6469         } else
6470                 kvm_disable_tdp();
6471
6472         update_ple_window_actual_max();
6473
6474         /*
6475          * Only enable PML when hardware supports PML feature, and both EPT
6476          * and EPT A/D bit features are enabled -- PML depends on them to work.
6477          */
6478         if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
6479                 enable_pml = 0;
6480
6481         if (!enable_pml) {
6482                 kvm_x86_ops->slot_enable_log_dirty = NULL;
6483                 kvm_x86_ops->slot_disable_log_dirty = NULL;
6484                 kvm_x86_ops->flush_log_dirty = NULL;
6485                 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
6486         }
6487
6488         if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
6489                 u64 vmx_msr;
6490
6491                 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
6492                 cpu_preemption_timer_multi =
6493                          vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
6494         } else {
6495                 kvm_x86_ops->set_hv_timer = NULL;
6496                 kvm_x86_ops->cancel_hv_timer = NULL;
6497         }
6498
6499         kvm_set_posted_intr_wakeup_handler(wakeup_handler);
6500
6501         kvm_mce_cap_supported |= MCG_LMCE_P;
6502
6503         return alloc_kvm_area();
6504
6505 out8:
6506         free_page((unsigned long)vmx_vmwrite_bitmap);
6507 out7:
6508         free_page((unsigned long)vmx_vmread_bitmap);
6509 out6:
6510         if (nested)
6511                 free_page((unsigned long)vmx_msr_bitmap_nested);
6512 out5:
6513         free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6514 out4:
6515         free_page((unsigned long)vmx_msr_bitmap_longmode);
6516 out3:
6517         free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
6518 out2:
6519         free_page((unsigned long)vmx_msr_bitmap_legacy);
6520 out1:
6521         free_page((unsigned long)vmx_io_bitmap_b);
6522 out:
6523         free_page((unsigned long)vmx_io_bitmap_a);
6524
6525     return r;
6526 }
6527
6528 static __exit void hardware_unsetup(void)
6529 {
6530         free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
6531         free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6532         free_page((unsigned long)vmx_msr_bitmap_legacy);
6533         free_page((unsigned long)vmx_msr_bitmap_longmode);
6534         free_page((unsigned long)vmx_io_bitmap_b);
6535         free_page((unsigned long)vmx_io_bitmap_a);
6536         free_page((unsigned long)vmx_vmwrite_bitmap);
6537         free_page((unsigned long)vmx_vmread_bitmap);
6538         if (nested)
6539                 free_page((unsigned long)vmx_msr_bitmap_nested);
6540
6541         free_kvm_area();
6542 }
6543
6544 /*
6545  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
6546  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
6547  */
6548 static int handle_pause(struct kvm_vcpu *vcpu)
6549 {
6550         if (ple_gap)
6551                 grow_ple_window(vcpu);
6552
6553         skip_emulated_instruction(vcpu);
6554         kvm_vcpu_on_spin(vcpu);
6555
6556         return 1;
6557 }
6558
6559 static int handle_nop(struct kvm_vcpu *vcpu)
6560 {
6561         skip_emulated_instruction(vcpu);
6562         return 1;
6563 }
6564
6565 static int handle_mwait(struct kvm_vcpu *vcpu)
6566 {
6567         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
6568         return handle_nop(vcpu);
6569 }
6570
6571 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
6572 {
6573         return 1;
6574 }
6575
6576 static int handle_monitor(struct kvm_vcpu *vcpu)
6577 {
6578         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
6579         return handle_nop(vcpu);
6580 }
6581
6582 /*
6583  * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
6584  * We could reuse a single VMCS for all the L2 guests, but we also want the
6585  * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
6586  * allows keeping them loaded on the processor, and in the future will allow
6587  * optimizations where prepare_vmcs02 doesn't need to set all the fields on
6588  * every entry if they never change.
6589  * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
6590  * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
6591  *
6592  * The following functions allocate and free a vmcs02 in this pool.
6593  */
6594
6595 /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
6596 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
6597 {
6598         struct vmcs02_list *item;
6599         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
6600                 if (item->vmptr == vmx->nested.current_vmptr) {
6601                         list_move(&item->list, &vmx->nested.vmcs02_pool);
6602                         return &item->vmcs02;
6603                 }
6604
6605         if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
6606                 /* Recycle the least recently used VMCS. */
6607                 item = list_last_entry(&vmx->nested.vmcs02_pool,
6608                                        struct vmcs02_list, list);
6609                 item->vmptr = vmx->nested.current_vmptr;
6610                 list_move(&item->list, &vmx->nested.vmcs02_pool);
6611                 return &item->vmcs02;
6612         }
6613
6614         /* Create a new VMCS */
6615         item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
6616         if (!item)
6617                 return NULL;
6618         item->vmcs02.vmcs = alloc_vmcs();
6619         if (!item->vmcs02.vmcs) {
6620                 kfree(item);
6621                 return NULL;
6622         }
6623         loaded_vmcs_init(&item->vmcs02);
6624         item->vmptr = vmx->nested.current_vmptr;
6625         list_add(&(item->list), &(vmx->nested.vmcs02_pool));
6626         vmx->nested.vmcs02_num++;
6627         return &item->vmcs02;
6628 }
6629
6630 /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
6631 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
6632 {
6633         struct vmcs02_list *item;
6634         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
6635                 if (item->vmptr == vmptr) {
6636                         free_loaded_vmcs(&item->vmcs02);
6637                         list_del(&item->list);
6638                         kfree(item);
6639                         vmx->nested.vmcs02_num--;
6640                         return;
6641                 }
6642 }
6643
6644 /*
6645  * Free all VMCSs saved for this vcpu, except the one pointed by
6646  * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
6647  * must be &vmx->vmcs01.
6648  */
6649 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
6650 {
6651         struct vmcs02_list *item, *n;
6652
6653         WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
6654         list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
6655                 /*
6656                  * Something will leak if the above WARN triggers.  Better than
6657                  * a use-after-free.
6658                  */
6659                 if (vmx->loaded_vmcs == &item->vmcs02)
6660                         continue;
6661
6662                 free_loaded_vmcs(&item->vmcs02);
6663                 list_del(&item->list);
6664                 kfree(item);
6665                 vmx->nested.vmcs02_num--;
6666         }
6667 }
6668
6669 /*
6670  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
6671  * set the success or error code of an emulated VMX instruction, as specified
6672  * by Vol 2B, VMX Instruction Reference, "Conventions".
6673  */
6674 static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
6675 {
6676         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
6677                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
6678                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
6679 }
6680
6681 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
6682 {
6683         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
6684                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
6685                             X86_EFLAGS_SF | X86_EFLAGS_OF))
6686                         | X86_EFLAGS_CF);
6687 }
6688
6689 static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
6690                                         u32 vm_instruction_error)
6691 {
6692         if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
6693                 /*
6694                  * failValid writes the error number to the current VMCS, which
6695                  * can't be done there isn't a current VMCS.
6696                  */
6697                 nested_vmx_failInvalid(vcpu);
6698                 return;
6699         }
6700         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
6701                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
6702                             X86_EFLAGS_SF | X86_EFLAGS_OF))
6703                         | X86_EFLAGS_ZF);
6704         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
6705         /*
6706          * We don't need to force a shadow sync because
6707          * VM_INSTRUCTION_ERROR is not shadowed
6708          */
6709 }
6710
6711 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
6712 {
6713         /* TODO: not to reset guest simply here. */
6714         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6715         pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
6716 }
6717
6718 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
6719 {
6720         struct vcpu_vmx *vmx =
6721                 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
6722
6723         vmx->nested.preemption_timer_expired = true;
6724         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
6725         kvm_vcpu_kick(&vmx->vcpu);
6726
6727         return HRTIMER_NORESTART;
6728 }
6729
6730 /*
6731  * Decode the memory-address operand of a vmx instruction, as recorded on an
6732  * exit caused by such an instruction (run by a guest hypervisor).
6733  * On success, returns 0. When the operand is invalid, returns 1 and throws
6734  * #UD or #GP.
6735  */
6736 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6737                                  unsigned long exit_qualification,
6738                                  u32 vmx_instruction_info, bool wr, gva_t *ret)
6739 {
6740         gva_t off;
6741         bool exn;
6742         struct kvm_segment s;
6743
6744         /*
6745          * According to Vol. 3B, "Information for VM Exits Due to Instruction
6746          * Execution", on an exit, vmx_instruction_info holds most of the
6747          * addressing components of the operand. Only the displacement part
6748          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
6749          * For how an actual address is calculated from all these components,
6750          * refer to Vol. 1, "Operand Addressing".
6751          */
6752         int  scaling = vmx_instruction_info & 3;
6753         int  addr_size = (vmx_instruction_info >> 7) & 7;
6754         bool is_reg = vmx_instruction_info & (1u << 10);
6755         int  seg_reg = (vmx_instruction_info >> 15) & 7;
6756         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
6757         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
6758         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
6759         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
6760
6761         if (is_reg) {
6762                 kvm_queue_exception(vcpu, UD_VECTOR);
6763                 return 1;
6764         }
6765
6766         /* Addr = segment_base + offset */
6767         /* offset = base + [index * scale] + displacement */
6768         off = exit_qualification; /* holds the displacement */
6769         if (base_is_valid)
6770                 off += kvm_register_read(vcpu, base_reg);
6771         if (index_is_valid)
6772                 off += kvm_register_read(vcpu, index_reg)<<scaling;
6773         vmx_get_segment(vcpu, &s, seg_reg);
6774         *ret = s.base + off;
6775
6776         if (addr_size == 1) /* 32 bit */
6777                 *ret &= 0xffffffff;
6778
6779         /* Checks for #GP/#SS exceptions. */
6780         exn = false;
6781         if (is_protmode(vcpu)) {
6782                 /* Protected mode: apply checks for segment validity in the
6783                  * following order:
6784                  * - segment type check (#GP(0) may be thrown)
6785                  * - usability check (#GP(0)/#SS(0))
6786                  * - limit check (#GP(0)/#SS(0))
6787                  */
6788                 if (wr)
6789                         /* #GP(0) if the destination operand is located in a
6790                          * read-only data segment or any code segment.
6791                          */
6792                         exn = ((s.type & 0xa) == 0 || (s.type & 8));
6793                 else
6794                         /* #GP(0) if the source operand is located in an
6795                          * execute-only code segment
6796                          */
6797                         exn = ((s.type & 0xa) == 8);
6798         }
6799         if (exn) {
6800                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
6801                 return 1;
6802         }
6803         if (is_long_mode(vcpu)) {
6804                 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6805                  * non-canonical form. This is an only check for long mode.
6806                  */
6807                 exn = is_noncanonical_address(*ret);
6808         } else if (is_protmode(vcpu)) {
6809                 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
6810                  */
6811                 exn = (s.unusable != 0);
6812                 /* Protected mode: #GP(0)/#SS(0) if the memory
6813                  * operand is outside the segment limit.
6814                  */
6815                 exn = exn || (off + sizeof(u64) > s.limit);
6816         }
6817         if (exn) {
6818                 kvm_queue_exception_e(vcpu,
6819                                       seg_reg == VCPU_SREG_SS ?
6820                                                 SS_VECTOR : GP_VECTOR,
6821                                       0);
6822                 return 1;
6823         }
6824
6825         return 0;
6826 }
6827
6828 /*
6829  * This function performs the various checks including
6830  * - if it's 4KB aligned
6831  * - No bits beyond the physical address width are set
6832  * - Returns 0 on success or else 1
6833  * (Intel SDM Section 30.3)
6834  */
6835 static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6836                                   gpa_t *vmpointer)
6837 {
6838         gva_t gva;
6839         gpa_t vmptr;
6840         struct x86_exception e;
6841         struct page *page;
6842         struct vcpu_vmx *vmx = to_vmx(vcpu);
6843         int maxphyaddr = cpuid_maxphyaddr(vcpu);
6844
6845         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6846                         vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
6847                 return 1;
6848
6849         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6850                                 sizeof(vmptr), &e)) {
6851                 kvm_inject_page_fault(vcpu, &e);
6852                 return 1;
6853         }
6854
6855         switch (exit_reason) {
6856         case EXIT_REASON_VMON:
6857                 /*
6858                  * SDM 3: 24.11.5
6859                  * The first 4 bytes of VMXON region contain the supported
6860                  * VMCS revision identifier
6861                  *
6862                  * Note - IA32_VMX_BASIC[48] will never be 1
6863                  * for the nested case;
6864                  * which replaces physical address width with 32
6865                  *
6866                  */
6867                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6868                         nested_vmx_failInvalid(vcpu);
6869                         skip_emulated_instruction(vcpu);
6870                         return 1;
6871                 }
6872
6873                 page = nested_get_page(vcpu, vmptr);
6874                 if (page == NULL ||
6875                     *(u32 *)kmap(page) != VMCS12_REVISION) {
6876                         nested_vmx_failInvalid(vcpu);
6877                         kunmap(page);
6878                         skip_emulated_instruction(vcpu);
6879                         return 1;
6880                 }
6881                 kunmap(page);
6882                 vmx->nested.vmxon_ptr = vmptr;
6883                 break;
6884         case EXIT_REASON_VMCLEAR:
6885                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6886                         nested_vmx_failValid(vcpu,
6887                                              VMXERR_VMCLEAR_INVALID_ADDRESS);
6888                         skip_emulated_instruction(vcpu);
6889                         return 1;
6890                 }
6891
6892                 if (vmptr == vmx->nested.vmxon_ptr) {
6893                         nested_vmx_failValid(vcpu,
6894                                              VMXERR_VMCLEAR_VMXON_POINTER);
6895                         skip_emulated_instruction(vcpu);
6896                         return 1;
6897                 }
6898                 break;
6899         case EXIT_REASON_VMPTRLD:
6900                 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6901                         nested_vmx_failValid(vcpu,
6902                                              VMXERR_VMPTRLD_INVALID_ADDRESS);
6903                         skip_emulated_instruction(vcpu);
6904                         return 1;
6905                 }
6906
6907                 if (vmptr == vmx->nested.vmxon_ptr) {
6908                         nested_vmx_failValid(vcpu,
6909                                              VMXERR_VMCLEAR_VMXON_POINTER);
6910                         skip_emulated_instruction(vcpu);
6911                         return 1;
6912                 }
6913                 break;
6914         default:
6915                 return 1; /* shouldn't happen */
6916         }
6917
6918         if (vmpointer)
6919                 *vmpointer = vmptr;
6920         return 0;
6921 }
6922
6923 /*
6924  * Emulate the VMXON instruction.
6925  * Currently, we just remember that VMX is active, and do not save or even
6926  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
6927  * do not currently need to store anything in that guest-allocated memory
6928  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
6929  * argument is different from the VMXON pointer (which the spec says they do).
6930  */
6931 static int handle_vmon(struct kvm_vcpu *vcpu)
6932 {
6933         struct kvm_segment cs;
6934         struct vcpu_vmx *vmx = to_vmx(vcpu);
6935         struct vmcs *shadow_vmcs;
6936         const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
6937                 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
6938
6939         /* The Intel VMX Instruction Reference lists a bunch of bits that
6940          * are prerequisite to running VMXON, most notably cr4.VMXE must be
6941          * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
6942          * Otherwise, we should fail with #UD. We test these now:
6943          */
6944         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
6945             !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
6946             (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
6947                 kvm_queue_exception(vcpu, UD_VECTOR);
6948                 return 1;
6949         }
6950
6951         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
6952         if (is_long_mode(vcpu) && !cs.l) {
6953                 kvm_queue_exception(vcpu, UD_VECTOR);
6954                 return 1;
6955         }
6956
6957         if (vmx_get_cpl(vcpu)) {
6958                 kvm_inject_gp(vcpu, 0);
6959                 return 1;
6960         }
6961
6962         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
6963                 return 1;
6964
6965         if (vmx->nested.vmxon) {
6966                 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
6967                 skip_emulated_instruction(vcpu);
6968                 return 1;
6969         }
6970
6971         if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
6972                         != VMXON_NEEDED_FEATURES) {
6973                 kvm_inject_gp(vcpu, 0);
6974                 return 1;
6975         }
6976
6977         if (enable_shadow_vmcs) {
6978                 shadow_vmcs = alloc_vmcs();
6979                 if (!shadow_vmcs)
6980                         return -ENOMEM;
6981                 /* mark vmcs as shadow */
6982                 shadow_vmcs->revision_id |= (1u << 31);
6983                 /* init shadow vmcs */
6984                 vmcs_clear(shadow_vmcs);
6985                 vmx->nested.current_shadow_vmcs = shadow_vmcs;
6986         }
6987
6988         INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
6989         vmx->nested.vmcs02_num = 0;
6990
6991         hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
6992                      HRTIMER_MODE_REL);
6993         vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
6994
6995         vmx->nested.vmxon = true;
6996
6997         skip_emulated_instruction(vcpu);
6998         nested_vmx_succeed(vcpu);
6999         return 1;
7000 }
7001
7002 /*
7003  * Intel's VMX Instruction Reference specifies a common set of prerequisites
7004  * for running VMX instructions (except VMXON, whose prerequisites are
7005  * slightly different). It also specifies what exception to inject otherwise.
7006  */
7007 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
7008 {
7009         struct kvm_segment cs;
7010         struct vcpu_vmx *vmx = to_vmx(vcpu);
7011
7012         if (!vmx->nested.vmxon) {
7013                 kvm_queue_exception(vcpu, UD_VECTOR);
7014                 return 0;
7015         }
7016
7017         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
7018         if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
7019             (is_long_mode(vcpu) && !cs.l)) {
7020                 kvm_queue_exception(vcpu, UD_VECTOR);
7021                 return 0;
7022         }
7023
7024         if (vmx_get_cpl(vcpu)) {
7025                 kvm_inject_gp(vcpu, 0);
7026                 return 0;
7027         }
7028
7029         return 1;
7030 }
7031
7032 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
7033 {
7034         if (vmx->nested.current_vmptr == -1ull)
7035                 return;
7036
7037         /* current_vmptr and current_vmcs12 are always set/reset together */
7038         if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
7039                 return;
7040
7041         if (enable_shadow_vmcs) {
7042                 /* copy to memory all shadowed fields in case
7043                    they were modified */
7044                 copy_shadow_to_vmcs12(vmx);
7045                 vmx->nested.sync_shadow_vmcs = false;
7046                 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
7047                                 SECONDARY_EXEC_SHADOW_VMCS);
7048                 vmcs_write64(VMCS_LINK_POINTER, -1ull);
7049         }
7050         vmx->nested.posted_intr_nv = -1;
7051         kunmap(vmx->nested.current_vmcs12_page);
7052         nested_release_page(vmx->nested.current_vmcs12_page);
7053         vmx->nested.current_vmptr = -1ull;
7054         vmx->nested.current_vmcs12 = NULL;
7055 }
7056
7057 /*
7058  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
7059  * just stops using VMX.
7060  */
7061 static void free_nested(struct vcpu_vmx *vmx)
7062 {
7063         if (!vmx->nested.vmxon)
7064                 return;
7065
7066         vmx->nested.vmxon = false;
7067         free_vpid(vmx->nested.vpid02);
7068         nested_release_vmcs12(vmx);
7069         if (enable_shadow_vmcs)
7070                 free_vmcs(vmx->nested.current_shadow_vmcs);
7071         /* Unpin physical memory we referred to in current vmcs02 */
7072         if (vmx->nested.apic_access_page) {
7073                 nested_release_page(vmx->nested.apic_access_page);
7074                 vmx->nested.apic_access_page = NULL;
7075         }
7076         if (vmx->nested.virtual_apic_page) {
7077                 nested_release_page(vmx->nested.virtual_apic_page);
7078                 vmx->nested.virtual_apic_page = NULL;
7079         }
7080         if (vmx->nested.pi_desc_page) {
7081                 kunmap(vmx->nested.pi_desc_page);
7082                 nested_release_page(vmx->nested.pi_desc_page);
7083                 vmx->nested.pi_desc_page = NULL;
7084                 vmx->nested.pi_desc = NULL;
7085         }
7086
7087         nested_free_all_saved_vmcss(vmx);
7088 }
7089
7090 /* Emulate the VMXOFF instruction */
7091 static int handle_vmoff(struct kvm_vcpu *vcpu)
7092 {
7093         if (!nested_vmx_check_permission(vcpu))
7094                 return 1;
7095         free_nested(to_vmx(vcpu));
7096         skip_emulated_instruction(vcpu);
7097         nested_vmx_succeed(vcpu);
7098         return 1;
7099 }
7100
7101 /* Emulate the VMCLEAR instruction */
7102 static int handle_vmclear(struct kvm_vcpu *vcpu)
7103 {
7104         struct vcpu_vmx *vmx = to_vmx(vcpu);
7105         gpa_t vmptr;
7106         struct vmcs12 *vmcs12;
7107         struct page *page;
7108
7109         if (!nested_vmx_check_permission(vcpu))
7110                 return 1;
7111
7112         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
7113                 return 1;
7114
7115         if (vmptr == vmx->nested.current_vmptr)
7116                 nested_release_vmcs12(vmx);
7117
7118         page = nested_get_page(vcpu, vmptr);
7119         if (page == NULL) {
7120                 /*
7121                  * For accurate processor emulation, VMCLEAR beyond available
7122                  * physical memory should do nothing at all. However, it is
7123                  * possible that a nested vmx bug, not a guest hypervisor bug,
7124                  * resulted in this case, so let's shut down before doing any
7125                  * more damage:
7126                  */
7127                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7128                 return 1;
7129         }
7130         vmcs12 = kmap(page);
7131         vmcs12->launch_state = 0;
7132         kunmap(page);
7133         nested_release_page(page);
7134
7135         nested_free_vmcs02(vmx, vmptr);
7136
7137         skip_emulated_instruction(vcpu);
7138         nested_vmx_succeed(vcpu);
7139         return 1;
7140 }
7141
7142 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
7143
7144 /* Emulate the VMLAUNCH instruction */
7145 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
7146 {
7147         return nested_vmx_run(vcpu, true);
7148 }
7149
7150 /* Emulate the VMRESUME instruction */
7151 static int handle_vmresume(struct kvm_vcpu *vcpu)
7152 {
7153
7154         return nested_vmx_run(vcpu, false);
7155 }
7156
7157 enum vmcs_field_type {
7158         VMCS_FIELD_TYPE_U16 = 0,
7159         VMCS_FIELD_TYPE_U64 = 1,
7160         VMCS_FIELD_TYPE_U32 = 2,
7161         VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
7162 };
7163
7164 static inline int vmcs_field_type(unsigned long field)
7165 {
7166         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
7167                 return VMCS_FIELD_TYPE_U32;
7168         return (field >> 13) & 0x3 ;
7169 }
7170
7171 static inline int vmcs_field_readonly(unsigned long field)
7172 {
7173         return (((field >> 10) & 0x3) == 1);
7174 }
7175
7176 /*
7177  * Read a vmcs12 field. Since these can have varying lengths and we return
7178  * one type, we chose the biggest type (u64) and zero-extend the return value
7179  * to that size. Note that the caller, handle_vmread, might need to use only
7180  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
7181  * 64-bit fields are to be returned).
7182  */
7183 static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
7184                                   unsigned long field, u64 *ret)
7185 {
7186         short offset = vmcs_field_to_offset(field);
7187         char *p;
7188
7189         if (offset < 0)
7190                 return offset;
7191
7192         p = ((char *)(get_vmcs12(vcpu))) + offset;
7193
7194         switch (vmcs_field_type(field)) {
7195         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
7196                 *ret = *((natural_width *)p);
7197                 return 0;
7198         case VMCS_FIELD_TYPE_U16:
7199                 *ret = *((u16 *)p);
7200                 return 0;
7201         case VMCS_FIELD_TYPE_U32:
7202                 *ret = *((u32 *)p);
7203                 return 0;
7204         case VMCS_FIELD_TYPE_U64:
7205                 *ret = *((u64 *)p);
7206                 return 0;
7207         default:
7208                 WARN_ON(1);
7209                 return -ENOENT;
7210         }
7211 }
7212
7213
7214 static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
7215                                    unsigned long field, u64 field_value){
7216         short offset = vmcs_field_to_offset(field);
7217         char *p = ((char *) get_vmcs12(vcpu)) + offset;
7218         if (offset < 0)
7219                 return offset;
7220
7221         switch (vmcs_field_type(field)) {
7222         case VMCS_FIELD_TYPE_U16:
7223                 *(u16 *)p = field_value;
7224                 return 0;
7225         case VMCS_FIELD_TYPE_U32:
7226                 *(u32 *)p = field_value;
7227                 return 0;
7228         case VMCS_FIELD_TYPE_U64:
7229                 *(u64 *)p = field_value;
7230                 return 0;
7231         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
7232                 *(natural_width *)p = field_value;
7233                 return 0;
7234         default:
7235                 WARN_ON(1);
7236                 return -ENOENT;
7237         }
7238
7239 }
7240
7241 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
7242 {
7243         int i;
7244         unsigned long field;
7245         u64 field_value;
7246         struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
7247         const unsigned long *fields = shadow_read_write_fields;
7248         const int num_fields = max_shadow_read_write_fields;
7249
7250         preempt_disable();
7251
7252         vmcs_load(shadow_vmcs);
7253
7254         for (i = 0; i < num_fields; i++) {
7255                 field = fields[i];
7256                 switch (vmcs_field_type(field)) {
7257                 case VMCS_FIELD_TYPE_U16:
7258                         field_value = vmcs_read16(field);
7259                         break;
7260                 case VMCS_FIELD_TYPE_U32:
7261                         field_value = vmcs_read32(field);
7262                         break;
7263                 case VMCS_FIELD_TYPE_U64:
7264                         field_value = vmcs_read64(field);
7265                         break;
7266                 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
7267                         field_value = vmcs_readl(field);
7268                         break;
7269                 default:
7270                         WARN_ON(1);
7271                         continue;
7272                 }
7273                 vmcs12_write_any(&vmx->vcpu, field, field_value);
7274         }
7275
7276         vmcs_clear(shadow_vmcs);
7277         vmcs_load(vmx->loaded_vmcs->vmcs);
7278
7279         preempt_enable();
7280 }
7281
7282 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
7283 {
7284         const unsigned long *fields[] = {
7285                 shadow_read_write_fields,
7286                 shadow_read_only_fields
7287         };
7288         const int max_fields[] = {
7289                 max_shadow_read_write_fields,
7290                 max_shadow_read_only_fields
7291         };
7292         int i, q;
7293         unsigned long field;
7294         u64 field_value = 0;
7295         struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
7296
7297         vmcs_load(shadow_vmcs);
7298
7299         for (q = 0; q < ARRAY_SIZE(fields); q++) {
7300                 for (i = 0; i < max_fields[q]; i++) {
7301                         field = fields[q][i];
7302                         vmcs12_read_any(&vmx->vcpu, field, &field_value);
7303
7304                         switch (vmcs_field_type(field)) {
7305                         case VMCS_FIELD_TYPE_U16:
7306                                 vmcs_write16(field, (u16)field_value);
7307                                 break;
7308                         case VMCS_FIELD_TYPE_U32:
7309                                 vmcs_write32(field, (u32)field_value);
7310                                 break;
7311                         case VMCS_FIELD_TYPE_U64:
7312                                 vmcs_write64(field, (u64)field_value);
7313                                 break;
7314                         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
7315                                 vmcs_writel(field, (long)field_value);
7316                                 break;
7317                         default:
7318                                 WARN_ON(1);
7319                                 break;
7320                         }
7321                 }
7322         }
7323
7324         vmcs_clear(shadow_vmcs);
7325         vmcs_load(vmx->loaded_vmcs->vmcs);
7326 }
7327
7328 /*
7329  * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
7330  * used before) all generate the same failure when it is missing.
7331  */
7332 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
7333 {
7334         struct vcpu_vmx *vmx = to_vmx(vcpu);
7335         if (vmx->nested.current_vmptr == -1ull) {
7336                 nested_vmx_failInvalid(vcpu);
7337                 skip_emulated_instruction(vcpu);
7338                 return 0;
7339         }
7340         return 1;
7341 }
7342
7343 static int handle_vmread(struct kvm_vcpu *vcpu)
7344 {
7345         unsigned long field;
7346         u64 field_value;
7347         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7348         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7349         gva_t gva = 0;
7350
7351         if (!nested_vmx_check_permission(vcpu) ||
7352             !nested_vmx_check_vmcs12(vcpu))
7353                 return 1;
7354
7355         /* Decode instruction info and find the field to read */
7356         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
7357         /* Read the field, zero-extended to a u64 field_value */
7358         if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
7359                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
7360                 skip_emulated_instruction(vcpu);
7361                 return 1;
7362         }
7363         /*
7364          * Now copy part of this value to register or memory, as requested.
7365          * Note that the number of bits actually copied is 32 or 64 depending
7366          * on the guest's mode (32 or 64 bit), not on the given field's length.
7367          */
7368         if (vmx_instruction_info & (1u << 10)) {
7369                 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
7370                         field_value);
7371         } else {
7372                 if (get_vmx_mem_address(vcpu, exit_qualification,
7373                                 vmx_instruction_info, true, &gva))
7374                         return 1;
7375                 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
7376                 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
7377                              &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
7378         }
7379
7380         nested_vmx_succeed(vcpu);
7381         skip_emulated_instruction(vcpu);
7382         return 1;
7383 }
7384
7385
7386 static int handle_vmwrite(struct kvm_vcpu *vcpu)
7387 {
7388         unsigned long field;
7389         gva_t gva;
7390         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7391         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7392         /* The value to write might be 32 or 64 bits, depending on L1's long
7393          * mode, and eventually we need to write that into a field of several
7394          * possible lengths. The code below first zero-extends the value to 64
7395          * bit (field_value), and then copies only the appropriate number of
7396          * bits into the vmcs12 field.
7397          */
7398         u64 field_value = 0;
7399         struct x86_exception e;
7400
7401         if (!nested_vmx_check_permission(vcpu) ||
7402             !nested_vmx_check_vmcs12(vcpu))
7403                 return 1;
7404
7405         if (vmx_instruction_info & (1u << 10))
7406                 field_value = kvm_register_readl(vcpu,
7407                         (((vmx_instruction_info) >> 3) & 0xf));
7408         else {
7409                 if (get_vmx_mem_address(vcpu, exit_qualification,
7410                                 vmx_instruction_info, false, &gva))
7411                         return 1;
7412                 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
7413                            &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
7414                         kvm_inject_page_fault(vcpu, &e);
7415                         return 1;
7416                 }
7417         }
7418
7419
7420         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
7421         if (vmcs_field_readonly(field)) {
7422                 nested_vmx_failValid(vcpu,
7423                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
7424                 skip_emulated_instruction(vcpu);
7425                 return 1;
7426         }
7427
7428         if (vmcs12_write_any(vcpu, field, field_value) < 0) {
7429                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
7430                 skip_emulated_instruction(vcpu);
7431                 return 1;
7432         }
7433
7434         nested_vmx_succeed(vcpu);
7435         skip_emulated_instruction(vcpu);
7436         return 1;
7437 }
7438
7439 /* Emulate the VMPTRLD instruction */
7440 static int handle_vmptrld(struct kvm_vcpu *vcpu)
7441 {
7442         struct vcpu_vmx *vmx = to_vmx(vcpu);
7443         gpa_t vmptr;
7444
7445         if (!nested_vmx_check_permission(vcpu))
7446                 return 1;
7447
7448         if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
7449                 return 1;
7450
7451         if (vmx->nested.current_vmptr != vmptr) {
7452                 struct vmcs12 *new_vmcs12;
7453                 struct page *page;
7454                 page = nested_get_page(vcpu, vmptr);
7455                 if (page == NULL) {
7456                         nested_vmx_failInvalid(vcpu);
7457                         skip_emulated_instruction(vcpu);
7458                         return 1;
7459                 }
7460                 new_vmcs12 = kmap(page);
7461                 if (new_vmcs12->revision_id != VMCS12_REVISION) {
7462                         kunmap(page);
7463                         nested_release_page_clean(page);
7464                         nested_vmx_failValid(vcpu,
7465                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
7466                         skip_emulated_instruction(vcpu);
7467                         return 1;
7468                 }
7469
7470                 nested_release_vmcs12(vmx);
7471                 vmx->nested.current_vmptr = vmptr;
7472                 vmx->nested.current_vmcs12 = new_vmcs12;
7473                 vmx->nested.current_vmcs12_page = page;
7474                 if (enable_shadow_vmcs) {
7475                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
7476                                       SECONDARY_EXEC_SHADOW_VMCS);
7477                         vmcs_write64(VMCS_LINK_POINTER,
7478                                      __pa(vmx->nested.current_shadow_vmcs));
7479                         vmx->nested.sync_shadow_vmcs = true;
7480                 }
7481         }
7482
7483         nested_vmx_succeed(vcpu);
7484         skip_emulated_instruction(vcpu);
7485         return 1;
7486 }
7487
7488 /* Emulate the VMPTRST instruction */
7489 static int handle_vmptrst(struct kvm_vcpu *vcpu)
7490 {
7491         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7492         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7493         gva_t vmcs_gva;
7494         struct x86_exception e;
7495
7496         if (!nested_vmx_check_permission(vcpu))
7497                 return 1;
7498
7499         if (get_vmx_mem_address(vcpu, exit_qualification,
7500                         vmx_instruction_info, true, &vmcs_gva))
7501                 return 1;
7502         /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
7503         if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
7504                                  (void *)&to_vmx(vcpu)->nested.current_vmptr,
7505                                  sizeof(u64), &e)) {
7506                 kvm_inject_page_fault(vcpu, &e);
7507                 return 1;
7508         }
7509         nested_vmx_succeed(vcpu);
7510         skip_emulated_instruction(vcpu);
7511         return 1;
7512 }
7513
7514 /* Emulate the INVEPT instruction */
7515 static int handle_invept(struct kvm_vcpu *vcpu)
7516 {
7517         struct vcpu_vmx *vmx = to_vmx(vcpu);
7518         u32 vmx_instruction_info, types;
7519         unsigned long type;
7520         gva_t gva;
7521         struct x86_exception e;
7522         struct {
7523                 u64 eptp, gpa;
7524         } operand;
7525
7526         if (!(vmx->nested.nested_vmx_secondary_ctls_high &
7527               SECONDARY_EXEC_ENABLE_EPT) ||
7528             !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
7529                 kvm_queue_exception(vcpu, UD_VECTOR);
7530                 return 1;
7531         }
7532
7533         if (!nested_vmx_check_permission(vcpu))
7534                 return 1;
7535
7536         if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
7537                 kvm_queue_exception(vcpu, UD_VECTOR);
7538                 return 1;
7539         }
7540
7541         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7542         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
7543
7544         types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
7545
7546         if (!(types & (1UL << type))) {
7547                 nested_vmx_failValid(vcpu,
7548                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7549                 skip_emulated_instruction(vcpu);
7550                 return 1;
7551         }
7552
7553         /* According to the Intel VMX instruction reference, the memory
7554          * operand is read even if it isn't needed (e.g., for type==global)
7555          */
7556         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7557                         vmx_instruction_info, false, &gva))
7558                 return 1;
7559         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
7560                                 sizeof(operand), &e)) {
7561                 kvm_inject_page_fault(vcpu, &e);
7562                 return 1;
7563         }
7564
7565         switch (type) {
7566         case VMX_EPT_EXTENT_GLOBAL:
7567                 kvm_mmu_sync_roots(vcpu);
7568                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
7569                 nested_vmx_succeed(vcpu);
7570                 break;
7571         default:
7572                 /* Trap single context invalidation invept calls */
7573                 BUG_ON(1);
7574                 break;
7575         }
7576
7577         skip_emulated_instruction(vcpu);
7578         return 1;
7579 }
7580
7581 static int handle_invvpid(struct kvm_vcpu *vcpu)
7582 {
7583         struct vcpu_vmx *vmx = to_vmx(vcpu);
7584         u32 vmx_instruction_info;
7585         unsigned long type, types;
7586         gva_t gva;
7587         struct x86_exception e;
7588         int vpid;
7589
7590         if (!(vmx->nested.nested_vmx_secondary_ctls_high &
7591               SECONDARY_EXEC_ENABLE_VPID) ||
7592                         !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) {
7593                 kvm_queue_exception(vcpu, UD_VECTOR);
7594                 return 1;
7595         }
7596
7597         if (!nested_vmx_check_permission(vcpu))
7598                 return 1;
7599
7600         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7601         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
7602
7603         types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
7604
7605         if (!(types & (1UL << type))) {
7606                 nested_vmx_failValid(vcpu,
7607                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7608                 skip_emulated_instruction(vcpu);
7609                 return 1;
7610         }
7611
7612         /* according to the intel vmx instruction reference, the memory
7613          * operand is read even if it isn't needed (e.g., for type==global)
7614          */
7615         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7616                         vmx_instruction_info, false, &gva))
7617                 return 1;
7618         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
7619                                 sizeof(u32), &e)) {
7620                 kvm_inject_page_fault(vcpu, &e);
7621                 return 1;
7622         }
7623
7624         switch (type) {
7625         case VMX_VPID_EXTENT_SINGLE_CONTEXT:
7626                 /*
7627                  * Old versions of KVM use the single-context version so we
7628                  * have to support it; just treat it the same as all-context.
7629                  */
7630         case VMX_VPID_EXTENT_ALL_CONTEXT:
7631                 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7632                 nested_vmx_succeed(vcpu);
7633                 break;
7634         default:
7635                 /* Trap individual address invalidation invvpid calls */
7636                 BUG_ON(1);
7637                 break;
7638         }
7639
7640         skip_emulated_instruction(vcpu);
7641         return 1;
7642 }
7643
7644 static int handle_pml_full(struct kvm_vcpu *vcpu)
7645 {
7646         unsigned long exit_qualification;
7647
7648         trace_kvm_pml_full(vcpu->vcpu_id);
7649
7650         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7651
7652         /*
7653          * PML buffer FULL happened while executing iret from NMI,
7654          * "blocked by NMI" bit has to be set before next VM entry.
7655          */
7656         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
7657                         cpu_has_virtual_nmis() &&
7658                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
7659                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7660                                 GUEST_INTR_STATE_NMI);
7661
7662         /*
7663          * PML buffer already flushed at beginning of VMEXIT. Nothing to do
7664          * here.., and there's no userspace involvement needed for PML.
7665          */
7666         return 1;
7667 }
7668
7669 static int handle_pcommit(struct kvm_vcpu *vcpu)
7670 {
7671         /* we never catch pcommit instruct for L1 guest. */
7672         WARN_ON(1);
7673         return 1;
7674 }
7675
7676 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
7677 {
7678         kvm_lapic_expired_hv_timer(vcpu);
7679         return 1;
7680 }
7681
7682 /*
7683  * The exit handlers return 1 if the exit was handled fully and guest execution
7684  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
7685  * to be done to userspace and return 0.
7686  */
7687 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
7688         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
7689         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
7690         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
7691         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
7692         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
7693         [EXIT_REASON_CR_ACCESS]               = handle_cr,
7694         [EXIT_REASON_DR_ACCESS]               = handle_dr,
7695         [EXIT_REASON_CPUID]                   = handle_cpuid,
7696         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
7697         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
7698         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
7699         [EXIT_REASON_HLT]                     = handle_halt,
7700         [EXIT_REASON_INVD]                    = handle_invd,
7701         [EXIT_REASON_INVLPG]                  = handle_invlpg,
7702         [EXIT_REASON_RDPMC]                   = handle_rdpmc,
7703         [EXIT_REASON_VMCALL]                  = handle_vmcall,
7704         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
7705         [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
7706         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
7707         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
7708         [EXIT_REASON_VMREAD]                  = handle_vmread,
7709         [EXIT_REASON_VMRESUME]                = handle_vmresume,
7710         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
7711         [EXIT_REASON_VMOFF]                   = handle_vmoff,
7712         [EXIT_REASON_VMON]                    = handle_vmon,
7713         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
7714         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
7715         [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
7716         [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
7717         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
7718         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
7719         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
7720         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
7721         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
7722         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
7723         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
7724         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_mwait,
7725         [EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
7726         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
7727         [EXIT_REASON_INVEPT]                  = handle_invept,
7728         [EXIT_REASON_INVVPID]                 = handle_invvpid,
7729         [EXIT_REASON_XSAVES]                  = handle_xsaves,
7730         [EXIT_REASON_XRSTORS]                 = handle_xrstors,
7731         [EXIT_REASON_PML_FULL]                = handle_pml_full,
7732         [EXIT_REASON_PCOMMIT]                 = handle_pcommit,
7733         [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
7734 };
7735
7736 static const int kvm_vmx_max_exit_handlers =
7737         ARRAY_SIZE(kvm_vmx_exit_handlers);
7738
7739 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
7740                                        struct vmcs12 *vmcs12)
7741 {
7742         unsigned long exit_qualification;
7743         gpa_t bitmap, last_bitmap;
7744         unsigned int port;
7745         int size;
7746         u8 b;
7747
7748         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
7749                 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
7750
7751         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7752
7753         port = exit_qualification >> 16;
7754         size = (exit_qualification & 7) + 1;
7755
7756         last_bitmap = (gpa_t)-1;
7757         b = -1;
7758
7759         while (size > 0) {
7760                 if (port < 0x8000)
7761                         bitmap = vmcs12->io_bitmap_a;
7762                 else if (port < 0x10000)
7763                         bitmap = vmcs12->io_bitmap_b;
7764                 else
7765                         return true;
7766                 bitmap += (port & 0x7fff) / 8;
7767
7768                 if (last_bitmap != bitmap)
7769                         if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
7770                                 return true;
7771                 if (b & (1 << (port & 7)))
7772                         return true;
7773
7774                 port++;
7775                 size--;
7776                 last_bitmap = bitmap;
7777         }
7778
7779         return false;
7780 }
7781
7782 /*
7783  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
7784  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
7785  * disinterest in the current event (read or write a specific MSR) by using an
7786  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
7787  */
7788 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
7789         struct vmcs12 *vmcs12, u32 exit_reason)
7790 {
7791         u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
7792         gpa_t bitmap;
7793
7794         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
7795                 return true;
7796
7797         /*
7798          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
7799          * for the four combinations of read/write and low/high MSR numbers.
7800          * First we need to figure out which of the four to use:
7801          */
7802         bitmap = vmcs12->msr_bitmap;
7803         if (exit_reason == EXIT_REASON_MSR_WRITE)
7804                 bitmap += 2048;
7805         if (msr_index >= 0xc0000000) {
7806                 msr_index -= 0xc0000000;
7807                 bitmap += 1024;
7808         }
7809
7810         /* Then read the msr_index'th bit from this bitmap: */
7811         if (msr_index < 1024*8) {
7812                 unsigned char b;
7813                 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
7814                         return true;
7815                 return 1 & (b >> (msr_index & 7));
7816         } else
7817                 return true; /* let L1 handle the wrong parameter */
7818 }
7819
7820 /*
7821  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
7822  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
7823  * intercept (via guest_host_mask etc.) the current event.
7824  */
7825 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7826         struct vmcs12 *vmcs12)
7827 {
7828         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7829         int cr = exit_qualification & 15;
7830         int reg = (exit_qualification >> 8) & 15;
7831         unsigned long val = kvm_register_readl(vcpu, reg);
7832
7833         switch ((exit_qualification >> 4) & 3) {
7834         case 0: /* mov to cr */
7835                 switch (cr) {
7836                 case 0:
7837                         if (vmcs12->cr0_guest_host_mask &
7838                             (val ^ vmcs12->cr0_read_shadow))
7839                                 return true;
7840                         break;
7841                 case 3:
7842                         if ((vmcs12->cr3_target_count >= 1 &&
7843                                         vmcs12->cr3_target_value0 == val) ||
7844                                 (vmcs12->cr3_target_count >= 2 &&
7845                                         vmcs12->cr3_target_value1 == val) ||
7846                                 (vmcs12->cr3_target_count >= 3 &&
7847                                         vmcs12->cr3_target_value2 == val) ||
7848                                 (vmcs12->cr3_target_count >= 4 &&
7849                                         vmcs12->cr3_target_value3 == val))
7850                                 return false;
7851                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
7852                                 return true;
7853                         break;
7854                 case 4:
7855                         if (vmcs12->cr4_guest_host_mask &
7856                             (vmcs12->cr4_read_shadow ^ val))
7857                                 return true;
7858                         break;
7859                 case 8:
7860                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
7861                                 return true;
7862                         break;
7863                 }
7864                 break;
7865         case 2: /* clts */
7866                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
7867                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
7868                         return true;
7869                 break;
7870         case 1: /* mov from cr */
7871                 switch (cr) {
7872                 case 3:
7873                         if (vmcs12->cpu_based_vm_exec_control &
7874                             CPU_BASED_CR3_STORE_EXITING)
7875                                 return true;
7876                         break;
7877                 case 8:
7878                         if (vmcs12->cpu_based_vm_exec_control &
7879                             CPU_BASED_CR8_STORE_EXITING)
7880                                 return true;
7881                         break;
7882                 }
7883                 break;
7884         case 3: /* lmsw */
7885                 /*
7886                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
7887                  * cr0. Other attempted changes are ignored, with no exit.
7888                  */
7889                 if (vmcs12->cr0_guest_host_mask & 0xe &
7890                     (val ^ vmcs12->cr0_read_shadow))
7891                         return true;
7892                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
7893                     !(vmcs12->cr0_read_shadow & 0x1) &&
7894                     (val & 0x1))
7895                         return true;
7896                 break;
7897         }
7898         return false;
7899 }
7900
7901 /*
7902  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
7903  * should handle it ourselves in L0 (and then continue L2). Only call this
7904  * when in is_guest_mode (L2).
7905  */
7906 static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7907 {
7908         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7909         struct vcpu_vmx *vmx = to_vmx(vcpu);
7910         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7911         u32 exit_reason = vmx->exit_reason;
7912
7913         trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
7914                                 vmcs_readl(EXIT_QUALIFICATION),
7915                                 vmx->idt_vectoring_info,
7916                                 intr_info,
7917                                 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
7918                                 KVM_ISA_VMX);
7919
7920         if (vmx->nested.nested_run_pending)
7921                 return false;
7922
7923         if (unlikely(vmx->fail)) {
7924                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
7925                                     vmcs_read32(VM_INSTRUCTION_ERROR));
7926                 return true;
7927         }
7928
7929         switch (exit_reason) {
7930         case EXIT_REASON_EXCEPTION_NMI:
7931                 if (!is_exception(intr_info))
7932                         return false;
7933                 else if (is_page_fault(intr_info))
7934                         return enable_ept;
7935                 else if (is_no_device(intr_info) &&
7936                          !(vmcs12->guest_cr0 & X86_CR0_TS))
7937                         return false;
7938                 else if (is_debug(intr_info) &&
7939                          vcpu->guest_debug &
7940                          (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
7941                         return false;
7942                 else if (is_breakpoint(intr_info) &&
7943                          vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
7944                         return false;
7945                 return vmcs12->exception_bitmap &
7946                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
7947         case EXIT_REASON_EXTERNAL_INTERRUPT:
7948                 return false;
7949         case EXIT_REASON_TRIPLE_FAULT:
7950                 return true;
7951         case EXIT_REASON_PENDING_INTERRUPT:
7952                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
7953         case EXIT_REASON_NMI_WINDOW:
7954                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
7955         case EXIT_REASON_TASK_SWITCH:
7956                 return true;
7957         case EXIT_REASON_CPUID:
7958                 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
7959                         return false;
7960                 return true;
7961         case EXIT_REASON_HLT:
7962                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
7963         case EXIT_REASON_INVD:
7964                 return true;
7965         case EXIT_REASON_INVLPG:
7966                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
7967         case EXIT_REASON_RDPMC:
7968                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
7969         case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
7970                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
7971         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
7972         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
7973         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
7974         case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
7975         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
7976         case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
7977                 /*
7978                  * VMX instructions trap unconditionally. This allows L1 to
7979                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
7980                  */
7981                 return true;
7982         case EXIT_REASON_CR_ACCESS:
7983                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
7984         case EXIT_REASON_DR_ACCESS:
7985                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
7986         case EXIT_REASON_IO_INSTRUCTION:
7987                 return nested_vmx_exit_handled_io(vcpu, vmcs12);
7988         case EXIT_REASON_MSR_READ:
7989         case EXIT_REASON_MSR_WRITE:
7990                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
7991         case EXIT_REASON_INVALID_STATE:
7992                 return true;
7993         case EXIT_REASON_MWAIT_INSTRUCTION:
7994                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
7995         case EXIT_REASON_MONITOR_TRAP_FLAG:
7996                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
7997         case EXIT_REASON_MONITOR_INSTRUCTION:
7998                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
7999         case EXIT_REASON_PAUSE_INSTRUCTION:
8000                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
8001                         nested_cpu_has2(vmcs12,
8002                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
8003         case EXIT_REASON_MCE_DURING_VMENTRY:
8004                 return false;
8005         case EXIT_REASON_TPR_BELOW_THRESHOLD:
8006                 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
8007         case EXIT_REASON_APIC_ACCESS:
8008                 return nested_cpu_has2(vmcs12,
8009                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
8010         case EXIT_REASON_APIC_WRITE:
8011         case EXIT_REASON_EOI_INDUCED:
8012                 /* apic_write and eoi_induced should exit unconditionally. */
8013                 return true;
8014         case EXIT_REASON_EPT_VIOLATION:
8015                 /*
8016                  * L0 always deals with the EPT violation. If nested EPT is
8017                  * used, and the nested mmu code discovers that the address is
8018                  * missing in the guest EPT table (EPT12), the EPT violation
8019                  * will be injected with nested_ept_inject_page_fault()
8020                  */
8021                 return false;
8022         case EXIT_REASON_EPT_MISCONFIG:
8023                 /*
8024                  * L2 never uses directly L1's EPT, but rather L0's own EPT
8025                  * table (shadow on EPT) or a merged EPT table that L0 built
8026                  * (EPT on EPT). So any problems with the structure of the
8027                  * table is L0's fault.
8028                  */
8029                 return false;
8030         case EXIT_REASON_WBINVD:
8031                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
8032         case EXIT_REASON_XSETBV:
8033                 return true;
8034         case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
8035                 /*
8036                  * This should never happen, since it is not possible to
8037                  * set XSS to a non-zero value---neither in L1 nor in L2.
8038                  * If if it were, XSS would have to be checked against
8039                  * the XSS exit bitmap in vmcs12.
8040                  */
8041                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
8042         case EXIT_REASON_PCOMMIT:
8043                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
8044         default:
8045                 return true;
8046         }
8047 }
8048
8049 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
8050 {
8051         *info1 = vmcs_readl(EXIT_QUALIFICATION);
8052         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
8053 }
8054
8055 static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
8056 {
8057         struct page *pml_pg;
8058
8059         pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
8060         if (!pml_pg)
8061                 return -ENOMEM;
8062
8063         vmx->pml_pg = pml_pg;
8064
8065         vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
8066         vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
8067
8068         return 0;
8069 }
8070
8071 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
8072 {
8073         if (vmx->pml_pg) {
8074                 __free_page(vmx->pml_pg);
8075                 vmx->pml_pg = NULL;
8076         }
8077 }
8078
8079 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
8080 {
8081         struct vcpu_vmx *vmx = to_vmx(vcpu);
8082         u64 *pml_buf;
8083         u16 pml_idx;
8084
8085         pml_idx = vmcs_read16(GUEST_PML_INDEX);
8086
8087         /* Do nothing if PML buffer is empty */
8088         if (pml_idx == (PML_ENTITY_NUM - 1))
8089                 return;
8090
8091         /* PML index always points to next available PML buffer entity */
8092         if (pml_idx >= PML_ENTITY_NUM)
8093                 pml_idx = 0;
8094         else
8095                 pml_idx++;
8096
8097         pml_buf = page_address(vmx->pml_pg);
8098         for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
8099                 u64 gpa;
8100
8101                 gpa = pml_buf[pml_idx];
8102                 WARN_ON(gpa & (PAGE_SIZE - 1));
8103                 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
8104         }
8105
8106         /* reset PML index */
8107         vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
8108 }
8109
8110 /*
8111  * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
8112  * Called before reporting dirty_bitmap to userspace.
8113  */
8114 static void kvm_flush_pml_buffers(struct kvm *kvm)
8115 {
8116         int i;
8117         struct kvm_vcpu *vcpu;
8118         /*
8119          * We only need to kick vcpu out of guest mode here, as PML buffer
8120          * is flushed at beginning of all VMEXITs, and it's obvious that only
8121          * vcpus running in guest are possible to have unflushed GPAs in PML
8122          * buffer.
8123          */
8124         kvm_for_each_vcpu(i, vcpu, kvm)
8125                 kvm_vcpu_kick(vcpu);
8126 }
8127
8128 static void vmx_dump_sel(char *name, uint32_t sel)
8129 {
8130         pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
8131                name, vmcs_read32(sel),
8132                vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
8133                vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
8134                vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
8135 }
8136
8137 static void vmx_dump_dtsel(char *name, uint32_t limit)
8138 {
8139         pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
8140                name, vmcs_read32(limit),
8141                vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
8142 }
8143
8144 static void dump_vmcs(void)
8145 {
8146         u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
8147         u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
8148         u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
8149         u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
8150         u32 secondary_exec_control = 0;
8151         unsigned long cr4 = vmcs_readl(GUEST_CR4);
8152         u64 efer = vmcs_read64(GUEST_IA32_EFER);
8153         int i, n;
8154
8155         if (cpu_has_secondary_exec_ctrls())
8156                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
8157
8158         pr_err("*** Guest State ***\n");
8159         pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
8160                vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
8161                vmcs_readl(CR0_GUEST_HOST_MASK));
8162         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
8163                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
8164         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
8165         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
8166             (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
8167         {
8168                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
8169                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
8170                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
8171                        vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
8172         }
8173         pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
8174                vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
8175         pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
8176                vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
8177         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
8178                vmcs_readl(GUEST_SYSENTER_ESP),
8179                vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
8180         vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
8181         vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
8182         vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
8183         vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
8184         vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
8185         vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
8186         vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
8187         vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
8188         vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
8189         vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
8190         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
8191             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
8192                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
8193                        efer, vmcs_read64(GUEST_IA32_PAT));
8194         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
8195                vmcs_read64(GUEST_IA32_DEBUGCTL),
8196                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
8197         if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
8198                 pr_err("PerfGlobCtl = 0x%016llx\n",
8199                        vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
8200         if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
8201                 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
8202         pr_err("Interruptibility = %08x  ActivityState = %08x\n",
8203                vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
8204                vmcs_read32(GUEST_ACTIVITY_STATE));
8205         if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
8206                 pr_err("InterruptStatus = %04x\n",
8207                        vmcs_read16(GUEST_INTR_STATUS));
8208
8209         pr_err("*** Host State ***\n");
8210         pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
8211                vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
8212         pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
8213                vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
8214                vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
8215                vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
8216                vmcs_read16(HOST_TR_SELECTOR));
8217         pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
8218                vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
8219                vmcs_readl(HOST_TR_BASE));
8220         pr_err("GDTBase=%016lx IDTBase=%016lx\n",
8221                vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
8222         pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
8223                vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
8224                vmcs_readl(HOST_CR4));
8225         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
8226                vmcs_readl(HOST_IA32_SYSENTER_ESP),
8227                vmcs_read32(HOST_IA32_SYSENTER_CS),
8228                vmcs_readl(HOST_IA32_SYSENTER_EIP));
8229         if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
8230                 pr_err("EFER = 0x%016llx  PAT = 0x%016llx\n",
8231                        vmcs_read64(HOST_IA32_EFER),
8232                        vmcs_read64(HOST_IA32_PAT));
8233         if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
8234                 pr_err("PerfGlobCtl = 0x%016llx\n",
8235                        vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
8236
8237         pr_err("*** Control State ***\n");
8238         pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
8239                pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
8240         pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
8241         pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
8242                vmcs_read32(EXCEPTION_BITMAP),
8243                vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
8244                vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
8245         pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
8246                vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
8247                vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
8248                vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
8249         pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
8250                vmcs_read32(VM_EXIT_INTR_INFO),
8251                vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
8252                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
8253         pr_err("        reason=%08x qualification=%016lx\n",
8254                vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
8255         pr_err("IDTVectoring: info=%08x errcode=%08x\n",
8256                vmcs_read32(IDT_VECTORING_INFO_FIELD),
8257                vmcs_read32(IDT_VECTORING_ERROR_CODE));
8258         pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
8259         if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
8260                 pr_err("TSC Multiplier = 0x%016llx\n",
8261                        vmcs_read64(TSC_MULTIPLIER));
8262         if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
8263                 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
8264         if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
8265                 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
8266         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
8267                 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
8268         n = vmcs_read32(CR3_TARGET_COUNT);
8269         for (i = 0; i + 1 < n; i += 4)
8270                 pr_err("CR3 target%u=%016lx target%u=%016lx\n",
8271                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
8272                        i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
8273         if (i < n)
8274                 pr_err("CR3 target%u=%016lx\n",
8275                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
8276         if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
8277                 pr_err("PLE Gap=%08x Window=%08x\n",
8278                        vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
8279         if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
8280                 pr_err("Virtual processor ID = 0x%04x\n",
8281                        vmcs_read16(VIRTUAL_PROCESSOR_ID));
8282 }
8283
8284 /*
8285  * The guest has exited.  See if we can fix it or if we need userspace
8286  * assistance.
8287  */
8288 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8289 {
8290         struct vcpu_vmx *vmx = to_vmx(vcpu);
8291         u32 exit_reason = vmx->exit_reason;
8292         u32 vectoring_info = vmx->idt_vectoring_info;
8293
8294         trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8295
8296         /*
8297          * Flush logged GPAs PML buffer, this will make dirty_bitmap more
8298          * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
8299          * querying dirty_bitmap, we only need to kick all vcpus out of guest
8300          * mode as if vcpus is in root mode, the PML buffer must has been
8301          * flushed already.
8302          */
8303         if (enable_pml)
8304                 vmx_flush_pml_buffer(vcpu);
8305
8306         /* If guest state is invalid, start emulating */
8307         if (vmx->emulation_required)
8308                 return handle_invalid_guest_state(vcpu);
8309
8310         if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
8311                 nested_vmx_vmexit(vcpu, exit_reason,
8312                                   vmcs_read32(VM_EXIT_INTR_INFO),
8313                                   vmcs_readl(EXIT_QUALIFICATION));
8314                 return 1;
8315         }
8316
8317         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
8318                 dump_vmcs();
8319                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
8320                 vcpu->run->fail_entry.hardware_entry_failure_reason
8321                         = exit_reason;
8322                 return 0;
8323         }
8324
8325         if (unlikely(vmx->fail)) {
8326                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
8327                 vcpu->run->fail_entry.hardware_entry_failure_reason
8328                         = vmcs_read32(VM_INSTRUCTION_ERROR);
8329                 return 0;
8330         }
8331
8332         /*
8333          * Note:
8334          * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
8335          * delivery event since it indicates guest is accessing MMIO.
8336          * The vm-exit can be triggered again after return to guest that
8337          * will cause infinite loop.
8338          */
8339         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
8340                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
8341                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
8342                         exit_reason != EXIT_REASON_TASK_SWITCH)) {
8343                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8344                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
8345                 vcpu->run->internal.ndata = 2;
8346                 vcpu->run->internal.data[0] = vectoring_info;
8347                 vcpu->run->internal.data[1] = exit_reason;
8348                 return 0;
8349         }
8350
8351         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
8352             !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
8353                                         get_vmcs12(vcpu))))) {
8354                 if (vmx_interrupt_allowed(vcpu)) {
8355                         vmx->soft_vnmi_blocked = 0;
8356                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
8357                            vcpu->arch.nmi_pending) {
8358                         /*
8359                          * This CPU don't support us in finding the end of an
8360                          * NMI-blocked window if the guest runs with IRQs
8361                          * disabled. So we pull the trigger after 1 s of
8362                          * futile waiting, but inform the user about this.
8363                          */
8364                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
8365                                "state on VCPU %d after 1 s timeout\n",
8366                                __func__, vcpu->vcpu_id);
8367                         vmx->soft_vnmi_blocked = 0;
8368                 }
8369         }
8370
8371         if (exit_reason < kvm_vmx_max_exit_handlers
8372             && kvm_vmx_exit_handlers[exit_reason])
8373                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
8374         else {
8375                 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
8376                 kvm_queue_exception(vcpu, UD_VECTOR);
8377                 return 1;
8378         }
8379 }
8380
8381 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
8382 {
8383         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8384
8385         if (is_guest_mode(vcpu) &&
8386                 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
8387                 return;
8388
8389         if (irr == -1 || tpr < irr) {
8390                 vmcs_write32(TPR_THRESHOLD, 0);
8391                 return;
8392         }
8393
8394         vmcs_write32(TPR_THRESHOLD, irr);
8395 }
8396
8397 static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8398 {
8399         u32 sec_exec_control;
8400
8401         /*
8402          * There is not point to enable virtualize x2apic without enable
8403          * apicv
8404          */
8405         if (!cpu_has_vmx_virtualize_x2apic_mode() ||
8406                                 !kvm_vcpu_apicv_active(vcpu))
8407                 return;
8408
8409         if (!cpu_need_tpr_shadow(vcpu))
8410                 return;
8411
8412         sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
8413
8414         if (set) {
8415                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8416                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8417         } else {
8418                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8419                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8420         }
8421         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
8422
8423         vmx_set_msr_bitmap(vcpu);
8424 }
8425
8426 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
8427 {
8428         struct vcpu_vmx *vmx = to_vmx(vcpu);
8429
8430         /*
8431          * Currently we do not handle the nested case where L2 has an
8432          * APIC access page of its own; that page is still pinned.
8433          * Hence, we skip the case where the VCPU is in guest mode _and_
8434          * L1 prepared an APIC access page for L2.
8435          *
8436          * For the case where L1 and L2 share the same APIC access page
8437          * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
8438          * in the vmcs12), this function will only update either the vmcs01
8439          * or the vmcs02.  If the former, the vmcs02 will be updated by
8440          * prepare_vmcs02.  If the latter, the vmcs01 will be updated in
8441          * the next L2->L1 exit.
8442          */
8443         if (!is_guest_mode(vcpu) ||
8444             !nested_cpu_has2(vmx->nested.current_vmcs12,
8445                              SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
8446                 vmcs_write64(APIC_ACCESS_ADDR, hpa);
8447 }
8448
8449 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
8450 {
8451         u16 status;
8452         u8 old;
8453
8454         if (max_isr == -1)
8455                 max_isr = 0;
8456
8457         status = vmcs_read16(GUEST_INTR_STATUS);
8458         old = status >> 8;
8459         if (max_isr != old) {
8460                 status &= 0xff;
8461                 status |= max_isr << 8;
8462                 vmcs_write16(GUEST_INTR_STATUS, status);
8463         }
8464 }
8465
8466 static void vmx_set_rvi(int vector)
8467 {
8468         u16 status;
8469         u8 old;
8470
8471         if (vector == -1)
8472                 vector = 0;
8473
8474         status = vmcs_read16(GUEST_INTR_STATUS);
8475         old = (u8)status & 0xff;
8476         if ((u8)vector != old) {
8477                 status &= ~0xff;
8478                 status |= (u8)vector;
8479                 vmcs_write16(GUEST_INTR_STATUS, status);
8480         }
8481 }
8482
8483 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
8484 {
8485         if (!is_guest_mode(vcpu)) {
8486                 vmx_set_rvi(max_irr);
8487                 return;
8488         }
8489
8490         if (max_irr == -1)
8491                 return;
8492
8493         /*
8494          * In guest mode.  If a vmexit is needed, vmx_check_nested_events
8495          * handles it.
8496          */
8497         if (nested_exit_on_intr(vcpu))
8498                 return;
8499
8500         /*
8501          * Else, fall back to pre-APICv interrupt injection since L2
8502          * is run without virtual interrupt delivery.
8503          */
8504         if (!kvm_event_needs_reinjection(vcpu) &&
8505             vmx_interrupt_allowed(vcpu)) {
8506                 kvm_queue_interrupt(vcpu, max_irr, false);
8507                 vmx_inject_irq(vcpu);
8508         }
8509 }
8510
8511 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
8512 {
8513         if (!kvm_vcpu_apicv_active(vcpu))
8514                 return;
8515
8516         vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
8517         vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
8518         vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
8519         vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
8520 }
8521
8522 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
8523 {
8524         u32 exit_intr_info;
8525
8526         if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
8527               || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
8528                 return;
8529
8530         vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8531         exit_intr_info = vmx->exit_intr_info;
8532
8533         /* Handle machine checks before interrupts are enabled */
8534         if (is_machine_check(exit_intr_info))
8535                 kvm_machine_check();
8536
8537         /* We need to handle NMIs before interrupts are enabled */
8538         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
8539             (exit_intr_info & INTR_INFO_VALID_MASK)) {
8540                 kvm_before_handle_nmi(&vmx->vcpu);
8541                 asm("int $2");
8542                 kvm_after_handle_nmi(&vmx->vcpu);
8543         }
8544 }
8545
8546 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
8547 {
8548         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8549         register void *__sp asm(_ASM_SP);
8550
8551         /*
8552          * If external interrupt exists, IF bit is set in rflags/eflags on the
8553          * interrupt stack frame, and interrupt will be enabled on a return
8554          * from interrupt handler.
8555          */
8556         if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
8557                         == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
8558                 unsigned int vector;
8559                 unsigned long entry;
8560                 gate_desc *desc;
8561                 struct vcpu_vmx *vmx = to_vmx(vcpu);
8562 #ifdef CONFIG_X86_64
8563                 unsigned long tmp;
8564 #endif
8565
8566                 vector =  exit_intr_info & INTR_INFO_VECTOR_MASK;
8567                 desc = (gate_desc *)vmx->host_idt_base + vector;
8568                 entry = gate_offset(*desc);
8569                 asm volatile(
8570 #ifdef CONFIG_X86_64
8571                         "mov %%" _ASM_SP ", %[sp]\n\t"
8572                         "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
8573                         "push $%c[ss]\n\t"
8574                         "push %[sp]\n\t"
8575 #endif
8576                         "pushf\n\t"
8577                         "orl $0x200, (%%" _ASM_SP ")\n\t"
8578                         __ASM_SIZE(push) " $%c[cs]\n\t"
8579                         "call *%[entry]\n\t"
8580                         :
8581 #ifdef CONFIG_X86_64
8582                         [sp]"=&r"(tmp),
8583 #endif
8584                         "+r"(__sp)
8585                         :
8586                         [entry]"r"(entry),
8587                         [ss]"i"(__KERNEL_DS),
8588                         [cs]"i"(__KERNEL_CS)
8589                         );
8590         } else
8591                 local_irq_enable();
8592 }
8593
8594 static bool vmx_has_high_real_mode_segbase(void)
8595 {
8596         return enable_unrestricted_guest || emulate_invalid_guest_state;
8597 }
8598
8599 static bool vmx_mpx_supported(void)
8600 {
8601         return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
8602                 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
8603 }
8604
8605 static bool vmx_xsaves_supported(void)
8606 {
8607         return vmcs_config.cpu_based_2nd_exec_ctrl &
8608                 SECONDARY_EXEC_XSAVES;
8609 }
8610
8611 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
8612 {
8613         u32 exit_intr_info;
8614         bool unblock_nmi;
8615         u8 vector;
8616         bool idtv_info_valid;
8617
8618         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8619
8620         if (cpu_has_virtual_nmis()) {
8621                 if (vmx->nmi_known_unmasked)
8622                         return;
8623                 /*
8624                  * Can't use vmx->exit_intr_info since we're not sure what
8625                  * the exit reason is.
8626                  */
8627                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8628                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
8629                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
8630                 /*
8631                  * SDM 3: 27.7.1.2 (September 2008)
8632                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
8633                  * a guest IRET fault.
8634                  * SDM 3: 23.2.2 (September 2008)
8635                  * Bit 12 is undefined in any of the following cases:
8636                  *  If the VM exit sets the valid bit in the IDT-vectoring
8637                  *   information field.
8638                  *  If the VM exit is due to a double fault.
8639                  */
8640                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
8641                     vector != DF_VECTOR && !idtv_info_valid)
8642                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8643                                       GUEST_INTR_STATE_NMI);
8644                 else
8645                         vmx->nmi_known_unmasked =
8646                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
8647                                   & GUEST_INTR_STATE_NMI);
8648         } else if (unlikely(vmx->soft_vnmi_blocked))
8649                 vmx->vnmi_blocked_time +=
8650                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
8651 }
8652
8653 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
8654                                       u32 idt_vectoring_info,
8655                                       int instr_len_field,
8656                                       int error_code_field)
8657 {
8658         u8 vector;
8659         int type;
8660         bool idtv_info_valid;
8661
8662         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
8663
8664         vcpu->arch.nmi_injected = false;
8665         kvm_clear_exception_queue(vcpu);
8666         kvm_clear_interrupt_queue(vcpu);
8667
8668         if (!idtv_info_valid)
8669                 return;
8670
8671         kvm_make_request(KVM_REQ_EVENT, vcpu);
8672
8673         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
8674         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
8675
8676         switch (type) {
8677         case INTR_TYPE_NMI_INTR:
8678                 vcpu->arch.nmi_injected = true;
8679                 /*
8680                  * SDM 3: 27.7.1.2 (September 2008)
8681                  * Clear bit "block by NMI" before VM entry if a NMI
8682                  * delivery faulted.
8683                  */
8684                 vmx_set_nmi_mask(vcpu, false);
8685                 break;
8686         case INTR_TYPE_SOFT_EXCEPTION:
8687                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
8688                 /* fall through */
8689         case INTR_TYPE_HARD_EXCEPTION:
8690                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
8691                         u32 err = vmcs_read32(error_code_field);
8692                         kvm_requeue_exception_e(vcpu, vector, err);
8693                 } else
8694                         kvm_requeue_exception(vcpu, vector);
8695                 break;
8696         case INTR_TYPE_SOFT_INTR:
8697                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
8698                 /* fall through */
8699         case INTR_TYPE_EXT_INTR:
8700                 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
8701                 break;
8702         default:
8703                 break;
8704         }
8705 }
8706
8707 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
8708 {
8709         __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
8710                                   VM_EXIT_INSTRUCTION_LEN,
8711                                   IDT_VECTORING_ERROR_CODE);
8712 }
8713
8714 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
8715 {
8716         __vmx_complete_interrupts(vcpu,
8717                                   vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
8718                                   VM_ENTRY_INSTRUCTION_LEN,
8719                                   VM_ENTRY_EXCEPTION_ERROR_CODE);
8720
8721         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
8722 }
8723
8724 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
8725 {
8726         int i, nr_msrs;
8727         struct perf_guest_switch_msr *msrs;
8728
8729         msrs = perf_guest_get_msrs(&nr_msrs);
8730
8731         if (!msrs)
8732                 return;
8733
8734         for (i = 0; i < nr_msrs; i++)
8735                 if (msrs[i].host == msrs[i].guest)
8736                         clear_atomic_switch_msr(vmx, msrs[i].msr);
8737                 else
8738                         add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
8739                                         msrs[i].host);
8740 }
8741
8742 void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
8743 {
8744         struct vcpu_vmx *vmx = to_vmx(vcpu);
8745         u64 tscl;
8746         u32 delta_tsc;
8747
8748         if (vmx->hv_deadline_tsc == -1)
8749                 return;
8750
8751         tscl = rdtsc();
8752         if (vmx->hv_deadline_tsc > tscl)
8753                 /* sure to be 32 bit only because checked on set_hv_timer */
8754                 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
8755                         cpu_preemption_timer_multi);
8756         else
8757                 delta_tsc = 0;
8758
8759         vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
8760 }
8761
8762 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8763 {
8764         struct vcpu_vmx *vmx = to_vmx(vcpu);
8765         unsigned long debugctlmsr, cr4;
8766
8767         /* Record the guest's net vcpu time for enforced NMI injections. */
8768         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
8769                 vmx->entry_time = ktime_get();
8770
8771         /* Don't enter VMX if guest state is invalid, let the exit handler
8772            start emulation until we arrive back to a valid state */
8773         if (vmx->emulation_required)
8774                 return;
8775
8776         if (vmx->ple_window_dirty) {
8777                 vmx->ple_window_dirty = false;
8778                 vmcs_write32(PLE_WINDOW, vmx->ple_window);
8779         }
8780
8781         if (vmx->nested.sync_shadow_vmcs) {
8782                 copy_vmcs12_to_shadow(vmx);
8783                 vmx->nested.sync_shadow_vmcs = false;
8784         }
8785
8786         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
8787                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
8788         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
8789                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
8790
8791         cr4 = cr4_read_shadow();
8792         if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
8793                 vmcs_writel(HOST_CR4, cr4);
8794                 vmx->host_state.vmcs_host_cr4 = cr4;
8795         }
8796
8797         /* When single-stepping over STI and MOV SS, we must clear the
8798          * corresponding interruptibility bits in the guest state. Otherwise
8799          * vmentry fails as it then expects bit 14 (BS) in pending debug
8800          * exceptions being set, but that's not correct for the guest debugging
8801          * case. */
8802         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
8803                 vmx_set_interrupt_shadow(vcpu, 0);
8804
8805         if (vmx->guest_pkru_valid)
8806                 __write_pkru(vmx->guest_pkru);
8807
8808         atomic_switch_perf_msrs(vmx);
8809         debugctlmsr = get_debugctlmsr();
8810
8811         vmx_arm_hv_timer(vcpu);
8812
8813         vmx->__launched = vmx->loaded_vmcs->launched;
8814         asm(
8815                 /* Store host registers */
8816                 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
8817                 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
8818                 "push %%" _ASM_CX " \n\t"
8819                 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
8820                 "je 1f \n\t"
8821                 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
8822                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
8823                 "1: \n\t"
8824                 /* Reload cr2 if changed */
8825                 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
8826                 "mov %%cr2, %%" _ASM_DX " \n\t"
8827                 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
8828                 "je 2f \n\t"
8829                 "mov %%" _ASM_AX", %%cr2 \n\t"
8830                 "2: \n\t"
8831                 /* Check if vmlaunch of vmresume is needed */
8832                 "cmpl $0, %c[launched](%0) \n\t"
8833                 /* Load guest registers.  Don't clobber flags. */
8834                 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
8835                 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
8836                 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
8837                 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
8838                 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
8839                 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
8840 #ifdef CONFIG_X86_64
8841                 "mov %c[r8](%0),  %%r8  \n\t"
8842                 "mov %c[r9](%0),  %%r9  \n\t"
8843                 "mov %c[r10](%0), %%r10 \n\t"
8844                 "mov %c[r11](%0), %%r11 \n\t"
8845                 "mov %c[r12](%0), %%r12 \n\t"
8846                 "mov %c[r13](%0), %%r13 \n\t"
8847                 "mov %c[r14](%0), %%r14 \n\t"
8848                 "mov %c[r15](%0), %%r15 \n\t"
8849 #endif
8850                 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
8851
8852                 /* Enter guest mode */
8853                 "jne 1f \n\t"
8854                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
8855                 "jmp 2f \n\t"
8856                 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
8857                 "2: "
8858                 /* Save guest registers, load host registers, keep flags */
8859                 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
8860                 "pop %0 \n\t"
8861                 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
8862                 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
8863                 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
8864                 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
8865                 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
8866                 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
8867                 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
8868 #ifdef CONFIG_X86_64
8869                 "mov %%r8,  %c[r8](%0) \n\t"
8870                 "mov %%r9,  %c[r9](%0) \n\t"
8871                 "mov %%r10, %c[r10](%0) \n\t"
8872                 "mov %%r11, %c[r11](%0) \n\t"
8873                 "mov %%r12, %c[r12](%0) \n\t"
8874                 "mov %%r13, %c[r13](%0) \n\t"
8875                 "mov %%r14, %c[r14](%0) \n\t"
8876                 "mov %%r15, %c[r15](%0) \n\t"
8877 #endif
8878                 "mov %%cr2, %%" _ASM_AX "   \n\t"
8879                 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
8880
8881                 "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
8882                 "setbe %c[fail](%0) \n\t"
8883                 ".pushsection .rodata \n\t"
8884                 ".global vmx_return \n\t"
8885                 "vmx_return: " _ASM_PTR " 2b \n\t"
8886                 ".popsection"
8887               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
8888                 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
8889                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
8890                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
8891                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
8892                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
8893                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
8894                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
8895                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
8896                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
8897                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
8898 #ifdef CONFIG_X86_64
8899                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
8900                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
8901                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
8902                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
8903                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
8904                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
8905                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
8906                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
8907 #endif
8908                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
8909                 [wordsize]"i"(sizeof(ulong))
8910               : "cc", "memory"
8911 #ifdef CONFIG_X86_64
8912                 , "rax", "rbx", "rdi", "rsi"
8913                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
8914 #else
8915                 , "eax", "ebx", "edi", "esi"
8916 #endif
8917               );
8918
8919         /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
8920         if (debugctlmsr)
8921                 update_debugctlmsr(debugctlmsr);
8922
8923 #ifndef CONFIG_X86_64
8924         /*
8925          * The sysexit path does not restore ds/es, so we must set them to
8926          * a reasonable value ourselves.
8927          *
8928          * We can't defer this to vmx_load_host_state() since that function
8929          * may be executed in interrupt context, which saves and restore segments
8930          * around it, nullifying its effect.
8931          */
8932         loadsegment(ds, __USER_DS);
8933         loadsegment(es, __USER_DS);
8934 #endif
8935
8936         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
8937                                   | (1 << VCPU_EXREG_RFLAGS)
8938                                   | (1 << VCPU_EXREG_PDPTR)
8939                                   | (1 << VCPU_EXREG_SEGMENTS)
8940                                   | (1 << VCPU_EXREG_CR3));
8941         vcpu->arch.regs_dirty = 0;
8942
8943         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
8944
8945         vmx->loaded_vmcs->launched = 1;
8946
8947         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8948
8949         /*
8950          * eager fpu is enabled if PKEY is supported and CR4 is switched
8951          * back on host, so it is safe to read guest PKRU from current
8952          * XSAVE.
8953          */
8954         if (boot_cpu_has(X86_FEATURE_OSPKE)) {
8955                 vmx->guest_pkru = __read_pkru();
8956                 if (vmx->guest_pkru != vmx->host_pkru) {
8957                         vmx->guest_pkru_valid = true;
8958                         __write_pkru(vmx->host_pkru);
8959                 } else
8960                         vmx->guest_pkru_valid = false;
8961         }
8962
8963         /*
8964          * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
8965          * we did not inject a still-pending event to L1 now because of
8966          * nested_run_pending, we need to re-enable this bit.
8967          */
8968         if (vmx->nested.nested_run_pending)
8969                 kvm_make_request(KVM_REQ_EVENT, vcpu);
8970
8971         vmx->nested.nested_run_pending = 0;
8972
8973         vmx_complete_atomic_exit(vmx);
8974         vmx_recover_nmi_blocking(vmx);
8975         vmx_complete_interrupts(vmx);
8976 }
8977
8978 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
8979 {
8980         struct vcpu_vmx *vmx = to_vmx(vcpu);
8981         int cpu;
8982
8983         if (vmx->loaded_vmcs == &vmx->vmcs01)
8984                 return;
8985
8986         cpu = get_cpu();
8987         vmx->loaded_vmcs = &vmx->vmcs01;
8988         vmx_vcpu_put(vcpu);
8989         vmx_vcpu_load(vcpu, cpu);
8990         vcpu->cpu = cpu;
8991         put_cpu();
8992 }
8993
8994 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
8995 {
8996         struct vcpu_vmx *vmx = to_vmx(vcpu);
8997
8998         if (enable_pml)
8999                 vmx_destroy_pml_buffer(vmx);
9000         free_vpid(vmx->vpid);
9001         leave_guest_mode(vcpu);
9002         vmx_load_vmcs01(vcpu);
9003         free_nested(vmx);
9004         free_loaded_vmcs(vmx->loaded_vmcs);
9005         kfree(vmx->guest_msrs);
9006         kvm_vcpu_uninit(vcpu);
9007         kmem_cache_free(kvm_vcpu_cache, vmx);
9008 }
9009
9010 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
9011 {
9012         int err;
9013         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
9014         int cpu;
9015
9016         if (!vmx)
9017                 return ERR_PTR(-ENOMEM);
9018
9019         vmx->vpid = allocate_vpid();
9020
9021         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
9022         if (err)
9023                 goto free_vcpu;
9024
9025         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
9026         BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
9027                      > PAGE_SIZE);
9028
9029         err = -ENOMEM;
9030         if (!vmx->guest_msrs) {
9031                 goto uninit_vcpu;
9032         }
9033
9034         vmx->loaded_vmcs = &vmx->vmcs01;
9035         vmx->loaded_vmcs->vmcs = alloc_vmcs();
9036         if (!vmx->loaded_vmcs->vmcs)
9037                 goto free_msrs;
9038         if (!vmm_exclusive)
9039                 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
9040         loaded_vmcs_init(vmx->loaded_vmcs);
9041         if (!vmm_exclusive)
9042                 kvm_cpu_vmxoff();
9043
9044         cpu = get_cpu();
9045         vmx_vcpu_load(&vmx->vcpu, cpu);
9046         vmx->vcpu.cpu = cpu;
9047         err = vmx_vcpu_setup(vmx);
9048         vmx_vcpu_put(&vmx->vcpu);
9049         put_cpu();
9050         if (err)
9051                 goto free_vmcs;
9052         if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
9053                 err = alloc_apic_access_page(kvm);
9054                 if (err)
9055                         goto free_vmcs;
9056         }
9057
9058         if (enable_ept) {
9059                 if (!kvm->arch.ept_identity_map_addr)
9060                         kvm->arch.ept_identity_map_addr =
9061                                 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
9062                 err = init_rmode_identity_map(kvm);
9063                 if (err)
9064                         goto free_vmcs;
9065         }
9066
9067         if (nested) {
9068                 nested_vmx_setup_ctls_msrs(vmx);
9069                 vmx->nested.vpid02 = allocate_vpid();
9070         }
9071
9072         vmx->nested.posted_intr_nv = -1;
9073         vmx->nested.current_vmptr = -1ull;
9074         vmx->nested.current_vmcs12 = NULL;
9075
9076         /*
9077          * If PML is turned on, failure on enabling PML just results in failure
9078          * of creating the vcpu, therefore we can simplify PML logic (by
9079          * avoiding dealing with cases, such as enabling PML partially on vcpus
9080          * for the guest, etc.
9081          */
9082         if (enable_pml) {
9083                 err = vmx_create_pml_buffer(vmx);
9084                 if (err)
9085                         goto free_vmcs;
9086         }
9087
9088         vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
9089
9090         return &vmx->vcpu;
9091
9092 free_vmcs:
9093         free_vpid(vmx->nested.vpid02);
9094         free_loaded_vmcs(vmx->loaded_vmcs);
9095 free_msrs:
9096         kfree(vmx->guest_msrs);
9097 uninit_vcpu:
9098         kvm_vcpu_uninit(&vmx->vcpu);
9099 free_vcpu:
9100         free_vpid(vmx->vpid);
9101         kmem_cache_free(kvm_vcpu_cache, vmx);
9102         return ERR_PTR(err);
9103 }
9104
9105 static void __init vmx_check_processor_compat(void *rtn)
9106 {
9107         struct vmcs_config vmcs_conf;
9108
9109         *(int *)rtn = 0;
9110         if (setup_vmcs_config(&vmcs_conf) < 0)
9111                 *(int *)rtn = -EIO;
9112         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
9113                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
9114                                 smp_processor_id());
9115                 *(int *)rtn = -EIO;
9116         }
9117 }
9118
9119 static int get_ept_level(void)
9120 {
9121         return VMX_EPT_DEFAULT_GAW + 1;
9122 }
9123
9124 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
9125 {
9126         u8 cache;
9127         u64 ipat = 0;
9128
9129         /* For VT-d and EPT combination
9130          * 1. MMIO: always map as UC
9131          * 2. EPT with VT-d:
9132          *   a. VT-d without snooping control feature: can't guarantee the
9133          *      result, try to trust guest.
9134          *   b. VT-d with snooping control feature: snooping control feature of
9135          *      VT-d engine can guarantee the cache correctness. Just set it
9136          *      to WB to keep consistent with host. So the same as item 3.
9137          * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
9138          *    consistent with host MTRR
9139          */
9140         if (is_mmio) {
9141                 cache = MTRR_TYPE_UNCACHABLE;
9142                 goto exit;
9143         }
9144
9145         if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
9146                 ipat = VMX_EPT_IPAT_BIT;
9147                 cache = MTRR_TYPE_WRBACK;
9148                 goto exit;
9149         }
9150
9151         if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
9152                 ipat = VMX_EPT_IPAT_BIT;
9153                 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
9154                         cache = MTRR_TYPE_WRBACK;
9155                 else
9156                         cache = MTRR_TYPE_UNCACHABLE;
9157                 goto exit;
9158         }
9159
9160         cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
9161
9162 exit:
9163         return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
9164 }
9165
9166 static int vmx_get_lpage_level(void)
9167 {
9168         if (enable_ept && !cpu_has_vmx_ept_1g_page())
9169                 return PT_DIRECTORY_LEVEL;
9170         else
9171                 /* For shadow and EPT supported 1GB page */
9172                 return PT_PDPE_LEVEL;
9173 }
9174
9175 static void vmcs_set_secondary_exec_control(u32 new_ctl)
9176 {
9177         /*
9178          * These bits in the secondary execution controls field
9179          * are dynamic, the others are mostly based on the hypervisor
9180          * architecture and the guest's CPUID.  Do not touch the
9181          * dynamic bits.
9182          */
9183         u32 mask =
9184                 SECONDARY_EXEC_SHADOW_VMCS |
9185                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
9186                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9187
9188         u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
9189
9190         vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
9191                      (new_ctl & ~mask) | (cur_ctl & mask));
9192 }
9193
9194 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
9195 {
9196         struct kvm_cpuid_entry2 *best;
9197         struct vcpu_vmx *vmx = to_vmx(vcpu);
9198         u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
9199
9200         if (vmx_rdtscp_supported()) {
9201                 bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
9202                 if (!rdtscp_enabled)
9203                         secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
9204
9205                 if (nested) {
9206                         if (rdtscp_enabled)
9207                                 vmx->nested.nested_vmx_secondary_ctls_high |=
9208                                         SECONDARY_EXEC_RDTSCP;
9209                         else
9210                                 vmx->nested.nested_vmx_secondary_ctls_high &=
9211                                         ~SECONDARY_EXEC_RDTSCP;
9212                 }
9213         }
9214
9215         /* Exposing INVPCID only when PCID is exposed */
9216         best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
9217         if (vmx_invpcid_supported() &&
9218             (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
9219             !guest_cpuid_has_pcid(vcpu))) {
9220                 secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
9221
9222                 if (best)
9223                         best->ebx &= ~bit(X86_FEATURE_INVPCID);
9224         }
9225
9226         if (cpu_has_secondary_exec_ctrls())
9227                 vmcs_set_secondary_exec_control(secondary_exec_ctl);
9228
9229         if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
9230                 if (guest_cpuid_has_pcommit(vcpu))
9231                         vmx->nested.nested_vmx_secondary_ctls_high |=
9232                                 SECONDARY_EXEC_PCOMMIT;
9233                 else
9234                         vmx->nested.nested_vmx_secondary_ctls_high &=
9235                                 ~SECONDARY_EXEC_PCOMMIT;
9236         }
9237
9238         if (nested_vmx_allowed(vcpu))
9239                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
9240                         FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
9241         else
9242                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
9243                         ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
9244 }
9245
9246 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
9247 {
9248         if (func == 1 && nested)
9249                 entry->ecx |= bit(X86_FEATURE_VMX);
9250 }
9251
9252 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
9253                 struct x86_exception *fault)
9254 {
9255         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9256         u32 exit_reason;
9257
9258         if (fault->error_code & PFERR_RSVD_MASK)
9259                 exit_reason = EXIT_REASON_EPT_MISCONFIG;
9260         else
9261                 exit_reason = EXIT_REASON_EPT_VIOLATION;
9262         nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
9263         vmcs12->guest_physical_address = fault->address;
9264 }
9265
9266 /* Callbacks for nested_ept_init_mmu_context: */
9267
9268 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
9269 {
9270         /* return the page table to be shadowed - in our case, EPT12 */
9271         return get_vmcs12(vcpu)->ept_pointer;
9272 }
9273
9274 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
9275 {
9276         WARN_ON(mmu_is_nested(vcpu));
9277         kvm_init_shadow_ept_mmu(vcpu,
9278                         to_vmx(vcpu)->nested.nested_vmx_ept_caps &
9279                         VMX_EPT_EXECUTE_ONLY_BIT);
9280         vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
9281         vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
9282         vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
9283
9284         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
9285 }
9286
9287 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
9288 {
9289         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
9290 }
9291
9292 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
9293                                             u16 error_code)
9294 {
9295         bool inequality, bit;
9296
9297         bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
9298         inequality =
9299                 (error_code & vmcs12->page_fault_error_code_mask) !=
9300                  vmcs12->page_fault_error_code_match;
9301         return inequality ^ bit;
9302 }
9303
9304 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
9305                 struct x86_exception *fault)
9306 {
9307         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9308
9309         WARN_ON(!is_guest_mode(vcpu));
9310
9311         if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code))
9312                 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
9313                                   vmcs_read32(VM_EXIT_INTR_INFO),
9314                                   vmcs_readl(EXIT_QUALIFICATION));
9315         else
9316                 kvm_inject_page_fault(vcpu, fault);
9317 }
9318
9319 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
9320                                         struct vmcs12 *vmcs12)
9321 {
9322         struct vcpu_vmx *vmx = to_vmx(vcpu);
9323         int maxphyaddr = cpuid_maxphyaddr(vcpu);
9324
9325         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
9326                 if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
9327                     vmcs12->apic_access_addr >> maxphyaddr)
9328                         return false;
9329
9330                 /*
9331                  * Translate L1 physical address to host physical
9332                  * address for vmcs02. Keep the page pinned, so this
9333                  * physical address remains valid. We keep a reference
9334                  * to it so we can release it later.
9335                  */
9336                 if (vmx->nested.apic_access_page) /* shouldn't happen */
9337                         nested_release_page(vmx->nested.apic_access_page);
9338                 vmx->nested.apic_access_page =
9339                         nested_get_page(vcpu, vmcs12->apic_access_addr);
9340         }
9341
9342         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
9343                 if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
9344                     vmcs12->virtual_apic_page_addr >> maxphyaddr)
9345                         return false;
9346
9347                 if (vmx->nested.virtual_apic_page) /* shouldn't happen */
9348                         nested_release_page(vmx->nested.virtual_apic_page);
9349                 vmx->nested.virtual_apic_page =
9350                         nested_get_page(vcpu, vmcs12->virtual_apic_page_addr);
9351
9352                 /*
9353                  * Failing the vm entry is _not_ what the processor does
9354                  * but it's basically the only possibility we have.
9355                  * We could still enter the guest if CR8 load exits are
9356                  * enabled, CR8 store exits are enabled, and virtualize APIC
9357                  * access is disabled; in this case the processor would never
9358                  * use the TPR shadow and we could simply clear the bit from
9359                  * the execution control.  But such a configuration is useless,
9360                  * so let's keep the code simple.
9361                  */
9362                 if (!vmx->nested.virtual_apic_page)
9363                         return false;
9364         }
9365
9366         if (nested_cpu_has_posted_intr(vmcs12)) {
9367                 if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
9368                     vmcs12->posted_intr_desc_addr >> maxphyaddr)
9369                         return false;
9370
9371                 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
9372                         kunmap(vmx->nested.pi_desc_page);
9373                         nested_release_page(vmx->nested.pi_desc_page);
9374                 }
9375                 vmx->nested.pi_desc_page =
9376                         nested_get_page(vcpu, vmcs12->posted_intr_desc_addr);
9377                 if (!vmx->nested.pi_desc_page)
9378                         return false;
9379
9380                 vmx->nested.pi_desc =
9381                         (struct pi_desc *)kmap(vmx->nested.pi_desc_page);
9382                 if (!vmx->nested.pi_desc) {
9383                         nested_release_page_clean(vmx->nested.pi_desc_page);
9384                         return false;
9385                 }
9386                 vmx->nested.pi_desc =
9387                         (struct pi_desc *)((void *)vmx->nested.pi_desc +
9388                         (unsigned long)(vmcs12->posted_intr_desc_addr &
9389                         (PAGE_SIZE - 1)));
9390         }
9391
9392         return true;
9393 }
9394
9395 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
9396 {
9397         u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
9398         struct vcpu_vmx *vmx = to_vmx(vcpu);
9399
9400         if (vcpu->arch.virtual_tsc_khz == 0)
9401                 return;
9402
9403         /* Make sure short timeouts reliably trigger an immediate vmexit.
9404          * hrtimer_start does not guarantee this. */
9405         if (preemption_timeout <= 1) {
9406                 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
9407                 return;
9408         }
9409
9410         preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
9411         preemption_timeout *= 1000000;
9412         do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
9413         hrtimer_start(&vmx->nested.preemption_timer,
9414                       ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
9415 }
9416
9417 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
9418                                                 struct vmcs12 *vmcs12)
9419 {
9420         int maxphyaddr;
9421         u64 addr;
9422
9423         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
9424                 return 0;
9425
9426         if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
9427                 WARN_ON(1);
9428                 return -EINVAL;
9429         }
9430         maxphyaddr = cpuid_maxphyaddr(vcpu);
9431
9432         if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
9433            ((addr + PAGE_SIZE) >> maxphyaddr))
9434                 return -EINVAL;
9435
9436         return 0;
9437 }
9438
9439 /*
9440  * Merge L0's and L1's MSR bitmap, return false to indicate that
9441  * we do not use the hardware.
9442  */
9443 static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9444                                                struct vmcs12 *vmcs12)
9445 {
9446         int msr;
9447         struct page *page;
9448         unsigned long *msr_bitmap;
9449
9450         if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
9451                 return false;
9452
9453         page = nested_get_page(vcpu, vmcs12->msr_bitmap);
9454         if (!page) {
9455                 WARN_ON(1);
9456                 return false;
9457         }
9458         msr_bitmap = (unsigned long *)kmap(page);
9459         if (!msr_bitmap) {
9460                 nested_release_page_clean(page);
9461                 WARN_ON(1);
9462                 return false;
9463         }
9464
9465         if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
9466                 if (nested_cpu_has_apic_reg_virt(vmcs12))
9467                         for (msr = 0x800; msr <= 0x8ff; msr++)
9468                                 nested_vmx_disable_intercept_for_msr(
9469                                         msr_bitmap,
9470                                         vmx_msr_bitmap_nested,
9471                                         msr, MSR_TYPE_R);
9472                 /* TPR is allowed */
9473                 nested_vmx_disable_intercept_for_msr(msr_bitmap,
9474                                 vmx_msr_bitmap_nested,
9475                                 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9476                                 MSR_TYPE_R | MSR_TYPE_W);
9477                 if (nested_cpu_has_vid(vmcs12)) {
9478                         /* EOI and self-IPI are allowed */
9479                         nested_vmx_disable_intercept_for_msr(
9480                                 msr_bitmap,
9481                                 vmx_msr_bitmap_nested,
9482                                 APIC_BASE_MSR + (APIC_EOI >> 4),
9483                                 MSR_TYPE_W);
9484                         nested_vmx_disable_intercept_for_msr(
9485                                 msr_bitmap,
9486                                 vmx_msr_bitmap_nested,
9487                                 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9488                                 MSR_TYPE_W);
9489                 }
9490         } else {
9491                 /*
9492                  * Enable reading intercept of all the x2apic
9493                  * MSRs. We should not rely on vmcs12 to do any
9494                  * optimizations here, it may have been modified
9495                  * by L1.
9496                  */
9497                 for (msr = 0x800; msr <= 0x8ff; msr++)
9498                         __vmx_enable_intercept_for_msr(
9499                                 vmx_msr_bitmap_nested,
9500                                 msr,
9501                                 MSR_TYPE_R);
9502
9503                 __vmx_enable_intercept_for_msr(
9504                                 vmx_msr_bitmap_nested,
9505                                 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9506                                 MSR_TYPE_W);
9507                 __vmx_enable_intercept_for_msr(
9508                                 vmx_msr_bitmap_nested,
9509                                 APIC_BASE_MSR + (APIC_EOI >> 4),
9510                                 MSR_TYPE_W);
9511                 __vmx_enable_intercept_for_msr(
9512                                 vmx_msr_bitmap_nested,
9513                                 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9514                                 MSR_TYPE_W);
9515         }
9516         kunmap(page);
9517         nested_release_page_clean(page);
9518
9519         return true;
9520 }
9521
9522 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
9523                                            struct vmcs12 *vmcs12)
9524 {
9525         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
9526             !nested_cpu_has_apic_reg_virt(vmcs12) &&
9527             !nested_cpu_has_vid(vmcs12) &&
9528             !nested_cpu_has_posted_intr(vmcs12))
9529                 return 0;
9530
9531         /*
9532          * If virtualize x2apic mode is enabled,
9533          * virtualize apic access must be disabled.
9534          */
9535         if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
9536             nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
9537                 return -EINVAL;
9538
9539         /*
9540          * If virtual interrupt delivery is enabled,
9541          * we must exit on external interrupts.
9542          */
9543         if (nested_cpu_has_vid(vmcs12) &&
9544            !nested_exit_on_intr(vcpu))
9545                 return -EINVAL;
9546
9547         /*
9548          * bits 15:8 should be zero in posted_intr_nv,
9549          * the descriptor address has been already checked
9550          * in nested_get_vmcs12_pages.
9551          */
9552         if (nested_cpu_has_posted_intr(vmcs12) &&
9553            (!nested_cpu_has_vid(vmcs12) ||
9554             !nested_exit_intr_ack_set(vcpu) ||
9555             vmcs12->posted_intr_nv & 0xff00))
9556                 return -EINVAL;
9557
9558         /* tpr shadow is needed by all apicv features. */
9559         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
9560                 return -EINVAL;
9561
9562         return 0;
9563 }
9564
9565 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
9566                                        unsigned long count_field,
9567                                        unsigned long addr_field)
9568 {
9569         int maxphyaddr;
9570         u64 count, addr;
9571
9572         if (vmcs12_read_any(vcpu, count_field, &count) ||
9573             vmcs12_read_any(vcpu, addr_field, &addr)) {
9574                 WARN_ON(1);
9575                 return -EINVAL;
9576         }
9577         if (count == 0)
9578                 return 0;
9579         maxphyaddr = cpuid_maxphyaddr(vcpu);
9580         if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
9581             (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
9582                 pr_warn_ratelimited(
9583                         "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
9584                         addr_field, maxphyaddr, count, addr);
9585                 return -EINVAL;
9586         }
9587         return 0;
9588 }
9589
9590 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
9591                                                 struct vmcs12 *vmcs12)
9592 {
9593         if (vmcs12->vm_exit_msr_load_count == 0 &&
9594             vmcs12->vm_exit_msr_store_count == 0 &&
9595             vmcs12->vm_entry_msr_load_count == 0)
9596                 return 0; /* Fast path */
9597         if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
9598                                         VM_EXIT_MSR_LOAD_ADDR) ||
9599             nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
9600                                         VM_EXIT_MSR_STORE_ADDR) ||
9601             nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
9602                                         VM_ENTRY_MSR_LOAD_ADDR))
9603                 return -EINVAL;
9604         return 0;
9605 }
9606
9607 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
9608                                        struct vmx_msr_entry *e)
9609 {
9610         /* x2APIC MSR accesses are not allowed */
9611         if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
9612                 return -EINVAL;
9613         if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
9614             e->index == MSR_IA32_UCODE_REV)
9615                 return -EINVAL;
9616         if (e->reserved != 0)
9617                 return -EINVAL;
9618         return 0;
9619 }
9620
9621 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
9622                                      struct vmx_msr_entry *e)
9623 {
9624         if (e->index == MSR_FS_BASE ||
9625             e->index == MSR_GS_BASE ||
9626             e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
9627             nested_vmx_msr_check_common(vcpu, e))
9628                 return -EINVAL;
9629         return 0;
9630 }
9631
9632 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
9633                                       struct vmx_msr_entry *e)
9634 {
9635         if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
9636             nested_vmx_msr_check_common(vcpu, e))
9637                 return -EINVAL;
9638         return 0;
9639 }
9640
9641 /*
9642  * Load guest's/host's msr at nested entry/exit.
9643  * return 0 for success, entry index for failure.
9644  */
9645 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9646 {
9647         u32 i;
9648         struct vmx_msr_entry e;
9649         struct msr_data msr;
9650
9651         msr.host_initiated = false;
9652         for (i = 0; i < count; i++) {
9653                 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
9654                                         &e, sizeof(e))) {
9655                         pr_warn_ratelimited(
9656                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9657                                 __func__, i, gpa + i * sizeof(e));
9658                         goto fail;
9659                 }
9660                 if (nested_vmx_load_msr_check(vcpu, &e)) {
9661                         pr_warn_ratelimited(
9662                                 "%s check failed (%u, 0x%x, 0x%x)\n",
9663                                 __func__, i, e.index, e.reserved);
9664                         goto fail;
9665                 }
9666                 msr.index = e.index;
9667                 msr.data = e.value;
9668                 if (kvm_set_msr(vcpu, &msr)) {
9669                         pr_warn_ratelimited(
9670                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9671                                 __func__, i, e.index, e.value);
9672                         goto fail;
9673                 }
9674         }
9675         return 0;
9676 fail:
9677         return i + 1;
9678 }
9679
9680 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
9681 {
9682         u32 i;
9683         struct vmx_msr_entry e;
9684
9685         for (i = 0; i < count; i++) {
9686                 struct msr_data msr_info;
9687                 if (kvm_vcpu_read_guest(vcpu,
9688                                         gpa + i * sizeof(e),
9689                                         &e, 2 * sizeof(u32))) {
9690                         pr_warn_ratelimited(
9691                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
9692                                 __func__, i, gpa + i * sizeof(e));
9693                         return -EINVAL;
9694                 }
9695                 if (nested_vmx_store_msr_check(vcpu, &e)) {
9696                         pr_warn_ratelimited(
9697                                 "%s check failed (%u, 0x%x, 0x%x)\n",
9698                                 __func__, i, e.index, e.reserved);
9699                         return -EINVAL;
9700                 }
9701                 msr_info.host_initiated = false;
9702                 msr_info.index = e.index;
9703                 if (kvm_get_msr(vcpu, &msr_info)) {
9704                         pr_warn_ratelimited(
9705                                 "%s cannot read MSR (%u, 0x%x)\n",
9706                                 __func__, i, e.index);
9707                         return -EINVAL;
9708                 }
9709                 if (kvm_vcpu_write_guest(vcpu,
9710                                          gpa + i * sizeof(e) +
9711                                              offsetof(struct vmx_msr_entry, value),
9712                                          &msr_info.data, sizeof(msr_info.data))) {
9713                         pr_warn_ratelimited(
9714                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9715                                 __func__, i, e.index, msr_info.data);
9716                         return -EINVAL;
9717                 }
9718         }
9719         return 0;
9720 }
9721
9722 /*
9723  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
9724  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
9725  * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
9726  * guest in a way that will both be appropriate to L1's requests, and our
9727  * needs. In addition to modifying the active vmcs (which is vmcs02), this
9728  * function also has additional necessary side-effects, like setting various
9729  * vcpu->arch fields.
9730  */
9731 static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9732 {
9733         struct vcpu_vmx *vmx = to_vmx(vcpu);
9734         u32 exec_control;
9735
9736         vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9737         vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
9738         vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
9739         vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
9740         vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
9741         vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
9742         vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
9743         vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
9744         vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
9745         vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
9746         vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
9747         vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
9748         vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
9749         vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
9750         vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
9751         vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
9752         vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
9753         vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
9754         vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
9755         vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
9756         vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
9757         vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
9758         vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
9759         vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
9760         vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
9761         vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
9762         vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
9763         vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
9764         vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
9765         vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
9766         vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
9767         vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
9768         vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
9769         vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
9770         vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
9771         vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
9772
9773         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
9774                 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
9775                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
9776         } else {
9777                 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
9778                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
9779         }
9780         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
9781                 vmcs12->vm_entry_intr_info_field);
9782         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
9783                 vmcs12->vm_entry_exception_error_code);
9784         vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
9785                 vmcs12->vm_entry_instruction_len);
9786         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
9787                 vmcs12->guest_interruptibility_info);
9788         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
9789         vmx_set_rflags(vcpu, vmcs12->guest_rflags);
9790         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
9791                 vmcs12->guest_pending_dbg_exceptions);
9792         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
9793         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
9794
9795         if (nested_cpu_has_xsaves(vmcs12))
9796                 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
9797         vmcs_write64(VMCS_LINK_POINTER, -1ull);
9798
9799         exec_control = vmcs12->pin_based_vm_exec_control;
9800         exec_control |= vmcs_config.pin_based_exec_ctrl;
9801         exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
9802
9803         if (nested_cpu_has_posted_intr(vmcs12)) {
9804                 /*
9805                  * Note that we use L0's vector here and in
9806                  * vmx_deliver_nested_posted_interrupt.
9807                  */
9808                 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
9809                 vmx->nested.pi_pending = false;
9810                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
9811                 vmcs_write64(POSTED_INTR_DESC_ADDR,
9812                         page_to_phys(vmx->nested.pi_desc_page) +
9813                         (unsigned long)(vmcs12->posted_intr_desc_addr &
9814                         (PAGE_SIZE - 1)));
9815         } else
9816                 exec_control &= ~PIN_BASED_POSTED_INTR;
9817
9818         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
9819
9820         vmx->nested.preemption_timer_expired = false;
9821         if (nested_cpu_has_preemption_timer(vmcs12))
9822                 vmx_start_preemption_timer(vcpu);
9823
9824         /*
9825          * Whether page-faults are trapped is determined by a combination of
9826          * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
9827          * If enable_ept, L0 doesn't care about page faults and we should
9828          * set all of these to L1's desires. However, if !enable_ept, L0 does
9829          * care about (at least some) page faults, and because it is not easy
9830          * (if at all possible?) to merge L0 and L1's desires, we simply ask
9831          * to exit on each and every L2 page fault. This is done by setting
9832          * MASK=MATCH=0 and (see below) EB.PF=1.
9833          * Note that below we don't need special code to set EB.PF beyond the
9834          * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
9835          * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
9836          * !enable_ept, EB.PF is 1, so the "or" will always be 1.
9837          *
9838          * A problem with this approach (when !enable_ept) is that L1 may be
9839          * injected with more page faults than it asked for. This could have
9840          * caused problems, but in practice existing hypervisors don't care.
9841          * To fix this, we will need to emulate the PFEC checking (on the L1
9842          * page tables), using walk_addr(), when injecting PFs to L1.
9843          */
9844         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
9845                 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
9846         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
9847                 enable_ept ? vmcs12->page_fault_error_code_match : 0);
9848
9849         if (cpu_has_secondary_exec_ctrls()) {
9850                 exec_control = vmx_secondary_exec_control(vmx);
9851
9852                 /* Take the following fields only from vmcs12 */
9853                 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
9854                                   SECONDARY_EXEC_RDTSCP |
9855                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
9856                                   SECONDARY_EXEC_APIC_REGISTER_VIRT |
9857                                   SECONDARY_EXEC_PCOMMIT);
9858                 if (nested_cpu_has(vmcs12,
9859                                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
9860                         exec_control |= vmcs12->secondary_vm_exec_control;
9861
9862                 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
9863                         /*
9864                          * If translation failed, no matter: This feature asks
9865                          * to exit when accessing the given address, and if it
9866                          * can never be accessed, this feature won't do
9867                          * anything anyway.
9868                          */
9869                         if (!vmx->nested.apic_access_page)
9870                                 exec_control &=
9871                                   ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9872                         else
9873                                 vmcs_write64(APIC_ACCESS_ADDR,
9874                                   page_to_phys(vmx->nested.apic_access_page));
9875                 } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
9876                             cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
9877                         exec_control |=
9878                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9879                         kvm_vcpu_reload_apic_access_page(vcpu);
9880                 }
9881
9882                 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
9883                         vmcs_write64(EOI_EXIT_BITMAP0,
9884                                 vmcs12->eoi_exit_bitmap0);
9885                         vmcs_write64(EOI_EXIT_BITMAP1,
9886                                 vmcs12->eoi_exit_bitmap1);
9887                         vmcs_write64(EOI_EXIT_BITMAP2,
9888                                 vmcs12->eoi_exit_bitmap2);
9889                         vmcs_write64(EOI_EXIT_BITMAP3,
9890                                 vmcs12->eoi_exit_bitmap3);
9891                         vmcs_write16(GUEST_INTR_STATUS,
9892                                 vmcs12->guest_intr_status);
9893                 }
9894
9895                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
9896         }
9897
9898
9899         /*
9900          * Set host-state according to L0's settings (vmcs12 is irrelevant here)
9901          * Some constant fields are set here by vmx_set_constant_host_state().
9902          * Other fields are different per CPU, and will be set later when
9903          * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
9904          */
9905         vmx_set_constant_host_state(vmx);
9906
9907         /*
9908          * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
9909          * entry, but only if the current (host) sp changed from the value
9910          * we wrote last (vmx->host_rsp). This cache is no longer relevant
9911          * if we switch vmcs, and rather than hold a separate cache per vmcs,
9912          * here we just force the write to happen on entry.
9913          */
9914         vmx->host_rsp = 0;
9915
9916         exec_control = vmx_exec_control(vmx); /* L0's desires */
9917         exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
9918         exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
9919         exec_control &= ~CPU_BASED_TPR_SHADOW;
9920         exec_control |= vmcs12->cpu_based_vm_exec_control;
9921
9922         if (exec_control & CPU_BASED_TPR_SHADOW) {
9923                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
9924                                 page_to_phys(vmx->nested.virtual_apic_page));
9925                 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
9926         }
9927
9928         if (cpu_has_vmx_msr_bitmap() &&
9929             exec_control & CPU_BASED_USE_MSR_BITMAPS) {
9930                 nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
9931                 /* MSR_BITMAP will be set by following vmx_set_efer. */
9932         } else
9933                 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9934
9935         /*
9936          * Merging of IO bitmap not currently supported.
9937          * Rather, exit every time.
9938          */
9939         exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
9940         exec_control |= CPU_BASED_UNCOND_IO_EXITING;
9941
9942         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
9943
9944         /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
9945          * bitwise-or of what L1 wants to trap for L2, and what we want to
9946          * trap. Note that CR0.TS also needs updating - we do this later.
9947          */
9948         update_exception_bitmap(vcpu);
9949         vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
9950         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
9951
9952         /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
9953          * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
9954          * bits are further modified by vmx_set_efer() below.
9955          */
9956         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
9957
9958         /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
9959          * emulated by vmx_set_efer(), below.
9960          */
9961         vm_entry_controls_init(vmx, 
9962                 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
9963                         ~VM_ENTRY_IA32E_MODE) |
9964                 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
9965
9966         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
9967                 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
9968                 vcpu->arch.pat = vmcs12->guest_ia32_pat;
9969         } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
9970                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
9971
9972
9973         set_cr4_guest_host_mask(vmx);
9974
9975         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
9976                 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
9977
9978         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
9979                 vmcs_write64(TSC_OFFSET,
9980                         vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
9981         else
9982                 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
9983
9984         if (enable_vpid) {
9985                 /*
9986                  * There is no direct mapping between vpid02 and vpid12, the
9987                  * vpid02 is per-vCPU for L0 and reused while the value of
9988                  * vpid12 is changed w/ one invvpid during nested vmentry.
9989                  * The vpid12 is allocated by L1 for L2, so it will not
9990                  * influence global bitmap(for vpid01 and vpid02 allocation)
9991                  * even if spawn a lot of nested vCPUs.
9992                  */
9993                 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
9994                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
9995                         if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
9996                                 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
9997                                 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
9998                         }
9999                 } else {
10000                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
10001                         vmx_flush_tlb(vcpu);
10002                 }
10003
10004         }
10005
10006         if (nested_cpu_has_ept(vmcs12)) {
10007                 kvm_mmu_unload(vcpu);
10008                 nested_ept_init_mmu_context(vcpu);
10009         }
10010
10011         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
10012                 vcpu->arch.efer = vmcs12->guest_ia32_efer;
10013         else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
10014                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
10015         else
10016                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
10017         /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
10018         vmx_set_efer(vcpu, vcpu->arch.efer);
10019
10020         /*
10021          * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
10022          * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
10023          * The CR0_READ_SHADOW is what L2 should have expected to read given
10024          * the specifications by L1; It's not enough to take
10025          * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
10026          * have more bits than L1 expected.
10027          */
10028         vmx_set_cr0(vcpu, vmcs12->guest_cr0);
10029         vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
10030
10031         vmx_set_cr4(vcpu, vmcs12->guest_cr4);
10032         vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
10033
10034         /* shadow page tables on either EPT or shadow page tables */
10035         kvm_set_cr3(vcpu, vmcs12->guest_cr3);
10036         kvm_mmu_reset_context(vcpu);
10037
10038         if (!enable_ept)
10039                 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
10040
10041         /*
10042          * L1 may access the L2's PDPTR, so save them to construct vmcs12
10043          */
10044         if (enable_ept) {
10045                 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
10046                 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
10047                 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
10048                 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
10049         }
10050
10051         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
10052         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
10053 }
10054
10055 /*
10056  * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
10057  * for running an L2 nested guest.
10058  */
10059 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10060 {
10061         struct vmcs12 *vmcs12;
10062         struct vcpu_vmx *vmx = to_vmx(vcpu);
10063         int cpu;
10064         struct loaded_vmcs *vmcs02;
10065         bool ia32e;
10066         u32 msr_entry_idx;
10067
10068         if (!nested_vmx_check_permission(vcpu) ||
10069             !nested_vmx_check_vmcs12(vcpu))
10070                 return 1;
10071
10072         skip_emulated_instruction(vcpu);
10073         vmcs12 = get_vmcs12(vcpu);
10074
10075         if (enable_shadow_vmcs)
10076                 copy_shadow_to_vmcs12(vmx);
10077
10078         /*
10079          * The nested entry process starts with enforcing various prerequisites
10080          * on vmcs12 as required by the Intel SDM, and act appropriately when
10081          * they fail: As the SDM explains, some conditions should cause the
10082          * instruction to fail, while others will cause the instruction to seem
10083          * to succeed, but return an EXIT_REASON_INVALID_STATE.
10084          * To speed up the normal (success) code path, we should avoid checking
10085          * for misconfigurations which will anyway be caught by the processor
10086          * when using the merged vmcs02.
10087          */
10088         if (vmcs12->launch_state == launch) {
10089                 nested_vmx_failValid(vcpu,
10090                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
10091                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
10092                 return 1;
10093         }
10094
10095         if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
10096             vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
10097                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10098                 return 1;
10099         }
10100
10101         if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
10102                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10103                 return 1;
10104         }
10105
10106         if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
10107                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10108                 return 1;
10109         }
10110
10111         if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
10112                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10113                 return 1;
10114         }
10115
10116         if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
10117                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10118                 return 1;
10119         }
10120
10121         if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
10122                                 vmx->nested.nested_vmx_true_procbased_ctls_low,
10123                                 vmx->nested.nested_vmx_procbased_ctls_high) ||
10124             !vmx_control_verify(vmcs12->secondary_vm_exec_control,
10125                                 vmx->nested.nested_vmx_secondary_ctls_low,
10126                                 vmx->nested.nested_vmx_secondary_ctls_high) ||
10127             !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
10128                                 vmx->nested.nested_vmx_pinbased_ctls_low,
10129                                 vmx->nested.nested_vmx_pinbased_ctls_high) ||
10130             !vmx_control_verify(vmcs12->vm_exit_controls,
10131                                 vmx->nested.nested_vmx_true_exit_ctls_low,
10132                                 vmx->nested.nested_vmx_exit_ctls_high) ||
10133             !vmx_control_verify(vmcs12->vm_entry_controls,
10134                                 vmx->nested.nested_vmx_true_entry_ctls_low,
10135                                 vmx->nested.nested_vmx_entry_ctls_high))
10136         {
10137                 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
10138                 return 1;
10139         }
10140
10141         if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
10142             ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
10143                 nested_vmx_failValid(vcpu,
10144                         VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
10145                 return 1;
10146         }
10147
10148         if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
10149             ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
10150                 nested_vmx_entry_failure(vcpu, vmcs12,
10151                         EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10152                 return 1;
10153         }
10154         if (vmcs12->vmcs_link_pointer != -1ull) {
10155                 nested_vmx_entry_failure(vcpu, vmcs12,
10156                         EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
10157                 return 1;
10158         }
10159
10160         /*
10161          * If the load IA32_EFER VM-entry control is 1, the following checks
10162          * are performed on the field for the IA32_EFER MSR:
10163          * - Bits reserved in the IA32_EFER MSR must be 0.
10164          * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
10165          *   the IA-32e mode guest VM-exit control. It must also be identical
10166          *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
10167          *   CR0.PG) is 1.
10168          */
10169         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
10170                 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
10171                 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
10172                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
10173                     ((vmcs12->guest_cr0 & X86_CR0_PG) &&
10174                      ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
10175                         nested_vmx_entry_failure(vcpu, vmcs12,
10176                                 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10177                         return 1;
10178                 }
10179         }
10180
10181         /*
10182          * If the load IA32_EFER VM-exit control is 1, bits reserved in the
10183          * IA32_EFER MSR must be 0 in the field for that register. In addition,
10184          * the values of the LMA and LME bits in the field must each be that of
10185          * the host address-space size VM-exit control.
10186          */
10187         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
10188                 ia32e = (vmcs12->vm_exit_controls &
10189                          VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
10190                 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
10191                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
10192                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
10193                         nested_vmx_entry_failure(vcpu, vmcs12,
10194                                 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10195                         return 1;
10196                 }
10197         }
10198
10199         /*
10200          * We're finally done with prerequisite checking, and can start with
10201          * the nested entry.
10202          */
10203
10204         vmcs02 = nested_get_current_vmcs02(vmx);
10205         if (!vmcs02)
10206                 return -ENOMEM;
10207
10208         enter_guest_mode(vcpu);
10209
10210         vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
10211
10212         if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
10213                 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
10214
10215         cpu = get_cpu();
10216         vmx->loaded_vmcs = vmcs02;
10217         vmx_vcpu_put(vcpu);
10218         vmx_vcpu_load(vcpu, cpu);
10219         vcpu->cpu = cpu;
10220         put_cpu();
10221
10222         vmx_segment_cache_clear(vmx);
10223
10224         prepare_vmcs02(vcpu, vmcs12);
10225
10226         msr_entry_idx = nested_vmx_load_msr(vcpu,
10227                                             vmcs12->vm_entry_msr_load_addr,
10228                                             vmcs12->vm_entry_msr_load_count);
10229         if (msr_entry_idx) {
10230                 leave_guest_mode(vcpu);
10231                 vmx_load_vmcs01(vcpu);
10232                 nested_vmx_entry_failure(vcpu, vmcs12,
10233                                 EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
10234                 return 1;
10235         }
10236
10237         vmcs12->launch_state = 1;
10238
10239         if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
10240                 return kvm_vcpu_halt(vcpu);
10241
10242         vmx->nested.nested_run_pending = 1;
10243
10244         /*
10245          * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
10246          * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
10247          * returned as far as L1 is concerned. It will only return (and set
10248          * the success flag) when L2 exits (see nested_vmx_vmexit()).
10249          */
10250         return 1;
10251 }
10252
10253 /*
10254  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
10255  * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
10256  * This function returns the new value we should put in vmcs12.guest_cr0.
10257  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
10258  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
10259  *     available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
10260  *     didn't trap the bit, because if L1 did, so would L0).
10261  *  2. Bits that L1 asked to trap (and therefore L0 also did) could not have
10262  *     been modified by L2, and L1 knows it. So just leave the old value of
10263  *     the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
10264  *     isn't relevant, because if L0 traps this bit it can set it to anything.
10265  *  3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
10266  *     changed these bits, and therefore they need to be updated, but L0
10267  *     didn't necessarily allow them to be changed in GUEST_CR0 - and rather
10268  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
10269  */
10270 static inline unsigned long
10271 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10272 {
10273         return
10274         /*1*/   (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
10275         /*2*/   (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
10276         /*3*/   (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
10277                         vcpu->arch.cr0_guest_owned_bits));
10278 }
10279
10280 static inline unsigned long
10281 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10282 {
10283         return
10284         /*1*/   (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
10285         /*2*/   (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
10286         /*3*/   (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
10287                         vcpu->arch.cr4_guest_owned_bits));
10288 }
10289
10290 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
10291                                        struct vmcs12 *vmcs12)
10292 {
10293         u32 idt_vectoring;
10294         unsigned int nr;
10295
10296         if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
10297                 nr = vcpu->arch.exception.nr;
10298                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
10299
10300                 if (kvm_exception_is_soft(nr)) {
10301                         vmcs12->vm_exit_instruction_len =
10302                                 vcpu->arch.event_exit_inst_len;
10303                         idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
10304                 } else
10305                         idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
10306
10307                 if (vcpu->arch.exception.has_error_code) {
10308                         idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
10309                         vmcs12->idt_vectoring_error_code =
10310                                 vcpu->arch.exception.error_code;
10311                 }
10312
10313                 vmcs12->idt_vectoring_info_field = idt_vectoring;
10314         } else if (vcpu->arch.nmi_injected) {
10315                 vmcs12->idt_vectoring_info_field =
10316                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
10317         } else if (vcpu->arch.interrupt.pending) {
10318                 nr = vcpu->arch.interrupt.nr;
10319                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
10320
10321                 if (vcpu->arch.interrupt.soft) {
10322                         idt_vectoring |= INTR_TYPE_SOFT_INTR;
10323                         vmcs12->vm_entry_instruction_len =
10324                                 vcpu->arch.event_exit_inst_len;
10325                 } else
10326                         idt_vectoring |= INTR_TYPE_EXT_INTR;
10327
10328                 vmcs12->idt_vectoring_info_field = idt_vectoring;
10329         }
10330 }
10331
10332 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
10333 {
10334         struct vcpu_vmx *vmx = to_vmx(vcpu);
10335
10336         if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
10337             vmx->nested.preemption_timer_expired) {
10338                 if (vmx->nested.nested_run_pending)
10339                         return -EBUSY;
10340                 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
10341                 return 0;
10342         }
10343
10344         if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
10345                 if (vmx->nested.nested_run_pending ||
10346                     vcpu->arch.interrupt.pending)
10347                         return -EBUSY;
10348                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
10349                                   NMI_VECTOR | INTR_TYPE_NMI_INTR |
10350                                   INTR_INFO_VALID_MASK, 0);
10351                 /*
10352                  * The NMI-triggered VM exit counts as injection:
10353                  * clear this one and block further NMIs.
10354                  */
10355                 vcpu->arch.nmi_pending = 0;
10356                 vmx_set_nmi_mask(vcpu, true);
10357                 return 0;
10358         }
10359
10360         if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
10361             nested_exit_on_intr(vcpu)) {
10362                 if (vmx->nested.nested_run_pending)
10363                         return -EBUSY;
10364                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
10365                 return 0;
10366         }
10367
10368         return vmx_complete_nested_posted_interrupt(vcpu);
10369 }
10370
10371 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
10372 {
10373         ktime_t remaining =
10374                 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
10375         u64 value;
10376
10377         if (ktime_to_ns(remaining) <= 0)
10378                 return 0;
10379
10380         value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
10381         do_div(value, 1000000);
10382         return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
10383 }
10384
10385 /*
10386  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
10387  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
10388  * and this function updates it to reflect the changes to the guest state while
10389  * L2 was running (and perhaps made some exits which were handled directly by L0
10390  * without going back to L1), and to reflect the exit reason.
10391  * Note that we do not have to copy here all VMCS fields, just those that
10392  * could have changed by the L2 guest or the exit - i.e., the guest-state and
10393  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
10394  * which already writes to vmcs12 directly.
10395  */
10396 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10397                            u32 exit_reason, u32 exit_intr_info,
10398                            unsigned long exit_qualification)
10399 {
10400         /* update guest state fields: */
10401         vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
10402         vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
10403
10404         vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
10405         vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
10406         vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
10407
10408         vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
10409         vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
10410         vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
10411         vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
10412         vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
10413         vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
10414         vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
10415         vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
10416         vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
10417         vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
10418         vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
10419         vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
10420         vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
10421         vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
10422         vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
10423         vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
10424         vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
10425         vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
10426         vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
10427         vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
10428         vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
10429         vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
10430         vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
10431         vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
10432         vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
10433         vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
10434         vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
10435         vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
10436         vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
10437         vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
10438         vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
10439         vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
10440         vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
10441         vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
10442         vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
10443         vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
10444
10445         vmcs12->guest_interruptibility_info =
10446                 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
10447         vmcs12->guest_pending_dbg_exceptions =
10448                 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
10449         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
10450                 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
10451         else
10452                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
10453
10454         if (nested_cpu_has_preemption_timer(vmcs12)) {
10455                 if (vmcs12->vm_exit_controls &
10456                     VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
10457                         vmcs12->vmx_preemption_timer_value =
10458                                 vmx_get_preemption_timer_value(vcpu);
10459                 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
10460         }
10461
10462         /*
10463          * In some cases (usually, nested EPT), L2 is allowed to change its
10464          * own CR3 without exiting. If it has changed it, we must keep it.
10465          * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
10466          * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
10467          *
10468          * Additionally, restore L2's PDPTR to vmcs12.
10469          */
10470         if (enable_ept) {
10471                 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
10472                 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
10473                 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
10474                 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
10475                 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
10476         }
10477
10478         if (nested_cpu_has_vid(vmcs12))
10479                 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
10480
10481         vmcs12->vm_entry_controls =
10482                 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
10483                 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
10484
10485         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
10486                 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
10487                 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
10488         }
10489
10490         /* TODO: These cannot have changed unless we have MSR bitmaps and
10491          * the relevant bit asks not to trap the change */
10492         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
10493                 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
10494         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
10495                 vmcs12->guest_ia32_efer = vcpu->arch.efer;
10496         vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
10497         vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
10498         vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
10499         if (kvm_mpx_supported())
10500                 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
10501         if (nested_cpu_has_xsaves(vmcs12))
10502                 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
10503
10504         /* update exit information fields: */
10505
10506         vmcs12->vm_exit_reason = exit_reason;
10507         vmcs12->exit_qualification = exit_qualification;
10508
10509         vmcs12->vm_exit_intr_info = exit_intr_info;
10510         if ((vmcs12->vm_exit_intr_info &
10511              (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
10512             (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
10513                 vmcs12->vm_exit_intr_error_code =
10514                         vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
10515         vmcs12->idt_vectoring_info_field = 0;
10516         vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
10517         vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
10518
10519         if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
10520                 /* vm_entry_intr_info_field is cleared on exit. Emulate this
10521                  * instead of reading the real value. */
10522                 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
10523
10524                 /*
10525                  * Transfer the event that L0 or L1 may wanted to inject into
10526                  * L2 to IDT_VECTORING_INFO_FIELD.
10527                  */
10528                 vmcs12_save_pending_event(vcpu, vmcs12);
10529         }
10530
10531         /*
10532          * Drop what we picked up for L2 via vmx_complete_interrupts. It is
10533          * preserved above and would only end up incorrectly in L1.
10534          */
10535         vcpu->arch.nmi_injected = false;
10536         kvm_clear_exception_queue(vcpu);
10537         kvm_clear_interrupt_queue(vcpu);
10538 }
10539
10540 /*
10541  * A part of what we need to when the nested L2 guest exits and we want to
10542  * run its L1 parent, is to reset L1's guest state to the host state specified
10543  * in vmcs12.
10544  * This function is to be called not only on normal nested exit, but also on
10545  * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
10546  * Failures During or After Loading Guest State").
10547  * This function should be called when the active VMCS is L1's (vmcs01).
10548  */
10549 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
10550                                    struct vmcs12 *vmcs12)
10551 {
10552         struct kvm_segment seg;
10553
10554         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
10555                 vcpu->arch.efer = vmcs12->host_ia32_efer;
10556         else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
10557                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
10558         else
10559                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
10560         vmx_set_efer(vcpu, vcpu->arch.efer);
10561
10562         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
10563         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
10564         vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
10565         /*
10566          * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
10567          * actually changed, because it depends on the current state of
10568          * fpu_active (which may have changed).
10569          * Note that vmx_set_cr0 refers to efer set above.
10570          */
10571         vmx_set_cr0(vcpu, vmcs12->host_cr0);
10572         /*
10573          * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
10574          * to apply the same changes to L1's vmcs. We just set cr0 correctly,
10575          * but we also need to update cr0_guest_host_mask and exception_bitmap.
10576          */
10577         update_exception_bitmap(vcpu);
10578         vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
10579         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
10580
10581         /*
10582          * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
10583          * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
10584          */
10585         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
10586         kvm_set_cr4(vcpu, vmcs12->host_cr4);
10587
10588         nested_ept_uninit_mmu_context(vcpu);
10589
10590         kvm_set_cr3(vcpu, vmcs12->host_cr3);
10591         kvm_mmu_reset_context(vcpu);
10592
10593         if (!enable_ept)
10594                 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
10595
10596         if (enable_vpid) {
10597                 /*
10598                  * Trivially support vpid by letting L2s share their parent
10599                  * L1's vpid. TODO: move to a more elaborate solution, giving
10600                  * each L2 its own vpid and exposing the vpid feature to L1.
10601                  */
10602                 vmx_flush_tlb(vcpu);
10603         }
10604
10605
10606         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
10607         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
10608         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
10609         vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
10610         vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
10611
10612         /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
10613         if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
10614                 vmcs_write64(GUEST_BNDCFGS, 0);
10615
10616         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
10617                 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
10618                 vcpu->arch.pat = vmcs12->host_ia32_pat;
10619         }
10620         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
10621                 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
10622                         vmcs12->host_ia32_perf_global_ctrl);
10623
10624         /* Set L1 segment info according to Intel SDM
10625             27.5.2 Loading Host Segment and Descriptor-Table Registers */
10626         seg = (struct kvm_segment) {
10627                 .base = 0,
10628                 .limit = 0xFFFFFFFF,
10629                 .selector = vmcs12->host_cs_selector,
10630                 .type = 11,
10631                 .present = 1,
10632                 .s = 1,
10633                 .g = 1
10634         };
10635         if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
10636                 seg.l = 1;
10637         else
10638                 seg.db = 1;
10639         vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
10640         seg = (struct kvm_segment) {
10641                 .base = 0,
10642                 .limit = 0xFFFFFFFF,
10643                 .type = 3,
10644                 .present = 1,
10645                 .s = 1,
10646                 .db = 1,
10647                 .g = 1
10648         };
10649         seg.selector = vmcs12->host_ds_selector;
10650         vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
10651         seg.selector = vmcs12->host_es_selector;
10652         vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
10653         seg.selector = vmcs12->host_ss_selector;
10654         vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
10655         seg.selector = vmcs12->host_fs_selector;
10656         seg.base = vmcs12->host_fs_base;
10657         vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
10658         seg.selector = vmcs12->host_gs_selector;
10659         seg.base = vmcs12->host_gs_base;
10660         vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
10661         seg = (struct kvm_segment) {
10662                 .base = vmcs12->host_tr_base,
10663                 .limit = 0x67,
10664                 .selector = vmcs12->host_tr_selector,
10665                 .type = 11,
10666                 .present = 1
10667         };
10668         vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
10669
10670         kvm_set_dr(vcpu, 7, 0x400);
10671         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
10672
10673         if (cpu_has_vmx_msr_bitmap())
10674                 vmx_set_msr_bitmap(vcpu);
10675
10676         if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
10677                                 vmcs12->vm_exit_msr_load_count))
10678                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
10679 }
10680
10681 /*
10682  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
10683  * and modify vmcs12 to make it see what it would expect to see there if
10684  * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
10685  */
10686 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10687                               u32 exit_intr_info,
10688                               unsigned long exit_qualification)
10689 {
10690         struct vcpu_vmx *vmx = to_vmx(vcpu);
10691         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
10692
10693         /* trying to cancel vmlaunch/vmresume is a bug */
10694         WARN_ON_ONCE(vmx->nested.nested_run_pending);
10695
10696         leave_guest_mode(vcpu);
10697         prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
10698                        exit_qualification);
10699
10700         if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
10701                                  vmcs12->vm_exit_msr_store_count))
10702                 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
10703
10704         vmx_load_vmcs01(vcpu);
10705
10706         if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
10707             && nested_exit_intr_ack_set(vcpu)) {
10708                 int irq = kvm_cpu_get_interrupt(vcpu);
10709                 WARN_ON(irq < 0);
10710                 vmcs12->vm_exit_intr_info = irq |
10711                         INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
10712         }
10713
10714         trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
10715                                        vmcs12->exit_qualification,
10716                                        vmcs12->idt_vectoring_info_field,
10717                                        vmcs12->vm_exit_intr_info,
10718                                        vmcs12->vm_exit_intr_error_code,
10719                                        KVM_ISA_VMX);
10720
10721         vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
10722         vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
10723         vmx_segment_cache_clear(vmx);
10724
10725         /* if no vmcs02 cache requested, remove the one we used */
10726         if (VMCS02_POOL_SIZE == 0)
10727                 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
10728
10729         load_vmcs12_host_state(vcpu, vmcs12);
10730
10731         /* Update TSC_OFFSET if TSC was changed while L2 ran */
10732         vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
10733
10734         /* This is needed for same reason as it was needed in prepare_vmcs02 */
10735         vmx->host_rsp = 0;
10736
10737         /* Unpin physical memory we referred to in vmcs02 */
10738         if (vmx->nested.apic_access_page) {
10739                 nested_release_page(vmx->nested.apic_access_page);
10740                 vmx->nested.apic_access_page = NULL;
10741         }
10742         if (vmx->nested.virtual_apic_page) {
10743                 nested_release_page(vmx->nested.virtual_apic_page);
10744                 vmx->nested.virtual_apic_page = NULL;
10745         }
10746         if (vmx->nested.pi_desc_page) {
10747                 kunmap(vmx->nested.pi_desc_page);
10748                 nested_release_page(vmx->nested.pi_desc_page);
10749                 vmx->nested.pi_desc_page = NULL;
10750                 vmx->nested.pi_desc = NULL;
10751         }
10752
10753         /*
10754          * We are now running in L2, mmu_notifier will force to reload the
10755          * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
10756          */
10757         kvm_vcpu_reload_apic_access_page(vcpu);
10758
10759         /*
10760          * Exiting from L2 to L1, we're now back to L1 which thinks it just
10761          * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
10762          * success or failure flag accordingly.
10763          */
10764         if (unlikely(vmx->fail)) {
10765                 vmx->fail = 0;
10766                 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
10767         } else
10768                 nested_vmx_succeed(vcpu);
10769         if (enable_shadow_vmcs)
10770                 vmx->nested.sync_shadow_vmcs = true;
10771
10772         /* in case we halted in L2 */
10773         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
10774 }
10775
10776 /*
10777  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
10778  */
10779 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
10780 {
10781         if (is_guest_mode(vcpu))
10782                 nested_vmx_vmexit(vcpu, -1, 0, 0);
10783         free_nested(to_vmx(vcpu));
10784 }
10785
10786 /*
10787  * L1's failure to enter L2 is a subset of a normal exit, as explained in
10788  * 23.7 "VM-entry failures during or after loading guest state" (this also
10789  * lists the acceptable exit-reason and exit-qualification parameters).
10790  * It should only be called before L2 actually succeeded to run, and when
10791  * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
10792  */
10793 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
10794                         struct vmcs12 *vmcs12,
10795                         u32 reason, unsigned long qualification)
10796 {
10797         load_vmcs12_host_state(vcpu, vmcs12);
10798         vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
10799         vmcs12->exit_qualification = qualification;
10800         nested_vmx_succeed(vcpu);
10801         if (enable_shadow_vmcs)
10802                 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
10803 }
10804
10805 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
10806                                struct x86_instruction_info *info,
10807                                enum x86_intercept_stage stage)
10808 {
10809         return X86EMUL_CONTINUE;
10810 }
10811
10812 #ifdef CONFIG_X86_64
10813 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
10814 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
10815                                   u64 divisor, u64 *result)
10816 {
10817         u64 low = a << shift, high = a >> (64 - shift);
10818
10819         /* To avoid the overflow on divq */
10820         if (high >= divisor)
10821                 return 1;
10822
10823         /* Low hold the result, high hold rem which is discarded */
10824         asm("divq %2\n\t" : "=a" (low), "=d" (high) :
10825             "rm" (divisor), "0" (low), "1" (high));
10826         *result = low;
10827
10828         return 0;
10829 }
10830
10831 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
10832 {
10833         struct vcpu_vmx *vmx = to_vmx(vcpu);
10834         u64 tscl = rdtsc(), delta_tsc;
10835
10836         delta_tsc = guest_deadline_tsc - kvm_read_l1_tsc(vcpu, tscl);
10837
10838         /* Convert to host delta tsc if tsc scaling is enabled */
10839         if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
10840                         u64_shl_div_u64(delta_tsc,
10841                                 kvm_tsc_scaling_ratio_frac_bits,
10842                                 vcpu->arch.tsc_scaling_ratio,
10843                                 &delta_tsc))
10844                 return -ERANGE;
10845
10846         /*
10847          * If the delta tsc can't fit in the 32 bit after the multi shift,
10848          * we can't use the preemption timer.
10849          * It's possible that it fits on later vmentries, but checking
10850          * on every vmentry is costly so we just use an hrtimer.
10851          */
10852         if (delta_tsc >> (cpu_preemption_timer_multi + 32))
10853                 return -ERANGE;
10854
10855         vmx->hv_deadline_tsc = tscl + delta_tsc;
10856         vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10857                         PIN_BASED_VMX_PREEMPTION_TIMER);
10858         return 0;
10859 }
10860
10861 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
10862 {
10863         struct vcpu_vmx *vmx = to_vmx(vcpu);
10864         vmx->hv_deadline_tsc = -1;
10865         vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10866                         PIN_BASED_VMX_PREEMPTION_TIMER);
10867 }
10868 #endif
10869
10870 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
10871 {
10872         if (ple_gap)
10873                 shrink_ple_window(vcpu);
10874 }
10875
10876 static void vmx_slot_enable_log_dirty(struct kvm *kvm,
10877                                      struct kvm_memory_slot *slot)
10878 {
10879         kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
10880         kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
10881 }
10882
10883 static void vmx_slot_disable_log_dirty(struct kvm *kvm,
10884                                        struct kvm_memory_slot *slot)
10885 {
10886         kvm_mmu_slot_set_dirty(kvm, slot);
10887 }
10888
10889 static void vmx_flush_log_dirty(struct kvm *kvm)
10890 {
10891         kvm_flush_pml_buffers(kvm);
10892 }
10893
10894 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
10895                                            struct kvm_memory_slot *memslot,
10896                                            gfn_t offset, unsigned long mask)
10897 {
10898         kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
10899 }
10900
10901 /*
10902  * This routine does the following things for vCPU which is going
10903  * to be blocked if VT-d PI is enabled.
10904  * - Store the vCPU to the wakeup list, so when interrupts happen
10905  *   we can find the right vCPU to wake up.
10906  * - Change the Posted-interrupt descriptor as below:
10907  *      'NDST' <-- vcpu->pre_pcpu
10908  *      'NV' <-- POSTED_INTR_WAKEUP_VECTOR
10909  * - If 'ON' is set during this process, which means at least one
10910  *   interrupt is posted for this vCPU, we cannot block it, in
10911  *   this case, return 1, otherwise, return 0.
10912  *
10913  */
10914 static int pi_pre_block(struct kvm_vcpu *vcpu)
10915 {
10916         unsigned long flags;
10917         unsigned int dest;
10918         struct pi_desc old, new;
10919         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
10920
10921         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10922                 !irq_remapping_cap(IRQ_POSTING_CAP))
10923                 return 0;
10924
10925         vcpu->pre_pcpu = vcpu->cpu;
10926         spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
10927                           vcpu->pre_pcpu), flags);
10928         list_add_tail(&vcpu->blocked_vcpu_list,
10929                       &per_cpu(blocked_vcpu_on_cpu,
10930                       vcpu->pre_pcpu));
10931         spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
10932                                vcpu->pre_pcpu), flags);
10933
10934         do {
10935                 old.control = new.control = pi_desc->control;
10936
10937                 /*
10938                  * We should not block the vCPU if
10939                  * an interrupt is posted for it.
10940                  */
10941                 if (pi_test_on(pi_desc) == 1) {
10942                         spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
10943                                           vcpu->pre_pcpu), flags);
10944                         list_del(&vcpu->blocked_vcpu_list);
10945                         spin_unlock_irqrestore(
10946                                         &per_cpu(blocked_vcpu_on_cpu_lock,
10947                                         vcpu->pre_pcpu), flags);
10948                         vcpu->pre_pcpu = -1;
10949
10950                         return 1;
10951                 }
10952
10953                 WARN((pi_desc->sn == 1),
10954                      "Warning: SN field of posted-interrupts "
10955                      "is set before blocking\n");
10956
10957                 /*
10958                  * Since vCPU can be preempted during this process,
10959                  * vcpu->cpu could be different with pre_pcpu, we
10960                  * need to set pre_pcpu as the destination of wakeup
10961                  * notification event, then we can find the right vCPU
10962                  * to wakeup in wakeup handler if interrupts happen
10963                  * when the vCPU is in blocked state.
10964                  */
10965                 dest = cpu_physical_id(vcpu->pre_pcpu);
10966
10967                 if (x2apic_enabled())
10968                         new.ndst = dest;
10969                 else
10970                         new.ndst = (dest << 8) & 0xFF00;
10971
10972                 /* set 'NV' to 'wakeup vector' */
10973                 new.nv = POSTED_INTR_WAKEUP_VECTOR;
10974         } while (cmpxchg(&pi_desc->control, old.control,
10975                         new.control) != old.control);
10976
10977         return 0;
10978 }
10979
10980 static int vmx_pre_block(struct kvm_vcpu *vcpu)
10981 {
10982         if (pi_pre_block(vcpu))
10983                 return 1;
10984
10985         if (kvm_lapic_hv_timer_in_use(vcpu))
10986                 kvm_lapic_switch_to_sw_timer(vcpu);
10987
10988         return 0;
10989 }
10990
10991 static void pi_post_block(struct kvm_vcpu *vcpu)
10992 {
10993         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
10994         struct pi_desc old, new;
10995         unsigned int dest;
10996         unsigned long flags;
10997
10998         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10999                 !irq_remapping_cap(IRQ_POSTING_CAP))
11000                 return;
11001
11002         do {
11003                 old.control = new.control = pi_desc->control;
11004
11005                 dest = cpu_physical_id(vcpu->cpu);
11006
11007                 if (x2apic_enabled())
11008                         new.ndst = dest;
11009                 else
11010                         new.ndst = (dest << 8) & 0xFF00;
11011
11012                 /* Allow posting non-urgent interrupts */
11013                 new.sn = 0;
11014
11015                 /* set 'NV' to 'notification vector' */
11016                 new.nv = POSTED_INTR_VECTOR;
11017         } while (cmpxchg(&pi_desc->control, old.control,
11018                         new.control) != old.control);
11019
11020         if(vcpu->pre_pcpu != -1) {
11021                 spin_lock_irqsave(
11022                         &per_cpu(blocked_vcpu_on_cpu_lock,
11023                         vcpu->pre_pcpu), flags);
11024                 list_del(&vcpu->blocked_vcpu_list);
11025                 spin_unlock_irqrestore(
11026                         &per_cpu(blocked_vcpu_on_cpu_lock,
11027                         vcpu->pre_pcpu), flags);
11028                 vcpu->pre_pcpu = -1;
11029         }
11030 }
11031
11032 static void vmx_post_block(struct kvm_vcpu *vcpu)
11033 {
11034         if (kvm_x86_ops->set_hv_timer)
11035                 kvm_lapic_switch_to_hv_timer(vcpu);
11036
11037         pi_post_block(vcpu);
11038 }
11039
11040 /*
11041  * vmx_update_pi_irte - set IRTE for Posted-Interrupts
11042  *
11043  * @kvm: kvm
11044  * @host_irq: host irq of the interrupt
11045  * @guest_irq: gsi of the interrupt
11046  * @set: set or unset PI
11047  * returns 0 on success, < 0 on failure
11048  */
11049 static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
11050                               uint32_t guest_irq, bool set)
11051 {
11052         struct kvm_kernel_irq_routing_entry *e;
11053         struct kvm_irq_routing_table *irq_rt;
11054         struct kvm_lapic_irq irq;
11055         struct kvm_vcpu *vcpu;
11056         struct vcpu_data vcpu_info;
11057         int idx, ret = -EINVAL;
11058
11059         if (!kvm_arch_has_assigned_device(kvm) ||
11060                 !irq_remapping_cap(IRQ_POSTING_CAP))
11061                 return 0;
11062
11063         idx = srcu_read_lock(&kvm->irq_srcu);
11064         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
11065         BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
11066
11067         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
11068                 if (e->type != KVM_IRQ_ROUTING_MSI)
11069                         continue;
11070                 /*
11071                  * VT-d PI cannot support posting multicast/broadcast
11072                  * interrupts to a vCPU, we still use interrupt remapping
11073                  * for these kind of interrupts.
11074                  *
11075                  * For lowest-priority interrupts, we only support
11076                  * those with single CPU as the destination, e.g. user
11077                  * configures the interrupts via /proc/irq or uses
11078                  * irqbalance to make the interrupts single-CPU.
11079                  *
11080                  * We will support full lowest-priority interrupt later.
11081                  */
11082
11083                 kvm_set_msi_irq(e, &irq);
11084                 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
11085                         /*
11086                          * Make sure the IRTE is in remapped mode if
11087                          * we don't handle it in posted mode.
11088                          */
11089                         ret = irq_set_vcpu_affinity(host_irq, NULL);
11090                         if (ret < 0) {
11091                                 printk(KERN_INFO
11092                                    "failed to back to remapped mode, irq: %u\n",
11093                                    host_irq);
11094                                 goto out;
11095                         }
11096
11097                         continue;
11098                 }
11099
11100                 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
11101                 vcpu_info.vector = irq.vector;
11102
11103                 trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
11104                                 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
11105
11106                 if (set)
11107                         ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
11108                 else {
11109                         /* suppress notification event before unposting */
11110                         pi_set_sn(vcpu_to_pi_desc(vcpu));
11111                         ret = irq_set_vcpu_affinity(host_irq, NULL);
11112                         pi_clear_sn(vcpu_to_pi_desc(vcpu));
11113                 }
11114
11115                 if (ret < 0) {
11116                         printk(KERN_INFO "%s: failed to update PI IRTE\n",
11117                                         __func__);
11118                         goto out;
11119                 }
11120         }
11121
11122         ret = 0;
11123 out:
11124         srcu_read_unlock(&kvm->irq_srcu, idx);
11125         return ret;
11126 }
11127
11128 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
11129 {
11130         if (vcpu->arch.mcg_cap & MCG_LMCE_P)
11131                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
11132                         FEATURE_CONTROL_LMCE;
11133         else
11134                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
11135                         ~FEATURE_CONTROL_LMCE;
11136 }
11137
11138 static struct kvm_x86_ops vmx_x86_ops = {
11139         .cpu_has_kvm_support = cpu_has_kvm_support,
11140         .disabled_by_bios = vmx_disabled_by_bios,
11141         .hardware_setup = hardware_setup,
11142         .hardware_unsetup = hardware_unsetup,
11143         .check_processor_compatibility = vmx_check_processor_compat,
11144         .hardware_enable = hardware_enable,
11145         .hardware_disable = hardware_disable,
11146         .cpu_has_accelerated_tpr = report_flexpriority,
11147         .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
11148
11149         .vcpu_create = vmx_create_vcpu,
11150         .vcpu_free = vmx_free_vcpu,
11151         .vcpu_reset = vmx_vcpu_reset,
11152
11153         .prepare_guest_switch = vmx_save_host_state,
11154         .vcpu_load = vmx_vcpu_load,
11155         .vcpu_put = vmx_vcpu_put,
11156
11157         .update_bp_intercept = update_exception_bitmap,
11158         .get_msr = vmx_get_msr,
11159         .set_msr = vmx_set_msr,
11160         .get_segment_base = vmx_get_segment_base,
11161         .get_segment = vmx_get_segment,
11162         .set_segment = vmx_set_segment,
11163         .get_cpl = vmx_get_cpl,
11164         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
11165         .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
11166         .decache_cr3 = vmx_decache_cr3,
11167         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
11168         .set_cr0 = vmx_set_cr0,
11169         .set_cr3 = vmx_set_cr3,
11170         .set_cr4 = vmx_set_cr4,
11171         .set_efer = vmx_set_efer,
11172         .get_idt = vmx_get_idt,
11173         .set_idt = vmx_set_idt,
11174         .get_gdt = vmx_get_gdt,
11175         .set_gdt = vmx_set_gdt,
11176         .get_dr6 = vmx_get_dr6,
11177         .set_dr6 = vmx_set_dr6,
11178         .set_dr7 = vmx_set_dr7,
11179         .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
11180         .cache_reg = vmx_cache_reg,
11181         .get_rflags = vmx_get_rflags,
11182         .set_rflags = vmx_set_rflags,
11183
11184         .get_pkru = vmx_get_pkru,
11185
11186         .fpu_activate = vmx_fpu_activate,
11187         .fpu_deactivate = vmx_fpu_deactivate,
11188
11189         .tlb_flush = vmx_flush_tlb,
11190
11191         .run = vmx_vcpu_run,
11192         .handle_exit = vmx_handle_exit,
11193         .skip_emulated_instruction = skip_emulated_instruction,
11194         .set_interrupt_shadow = vmx_set_interrupt_shadow,
11195         .get_interrupt_shadow = vmx_get_interrupt_shadow,
11196         .patch_hypercall = vmx_patch_hypercall,
11197         .set_irq = vmx_inject_irq,
11198         .set_nmi = vmx_inject_nmi,
11199         .queue_exception = vmx_queue_exception,
11200         .cancel_injection = vmx_cancel_injection,
11201         .interrupt_allowed = vmx_interrupt_allowed,
11202         .nmi_allowed = vmx_nmi_allowed,
11203         .get_nmi_mask = vmx_get_nmi_mask,
11204         .set_nmi_mask = vmx_set_nmi_mask,
11205         .enable_nmi_window = enable_nmi_window,
11206         .enable_irq_window = enable_irq_window,
11207         .update_cr8_intercept = update_cr8_intercept,
11208         .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
11209         .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
11210         .get_enable_apicv = vmx_get_enable_apicv,
11211         .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
11212         .load_eoi_exitmap = vmx_load_eoi_exitmap,
11213         .hwapic_irr_update = vmx_hwapic_irr_update,
11214         .hwapic_isr_update = vmx_hwapic_isr_update,
11215         .sync_pir_to_irr = vmx_sync_pir_to_irr,
11216         .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
11217
11218         .set_tss_addr = vmx_set_tss_addr,
11219         .get_tdp_level = get_ept_level,
11220         .get_mt_mask = vmx_get_mt_mask,
11221
11222         .get_exit_info = vmx_get_exit_info,
11223
11224         .get_lpage_level = vmx_get_lpage_level,
11225
11226         .cpuid_update = vmx_cpuid_update,
11227
11228         .rdtscp_supported = vmx_rdtscp_supported,
11229         .invpcid_supported = vmx_invpcid_supported,
11230
11231         .set_supported_cpuid = vmx_set_supported_cpuid,
11232
11233         .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
11234
11235         .read_tsc_offset = vmx_read_tsc_offset,
11236         .write_tsc_offset = vmx_write_tsc_offset,
11237         .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
11238         .read_l1_tsc = vmx_read_l1_tsc,
11239
11240         .set_tdp_cr3 = vmx_set_cr3,
11241
11242         .check_intercept = vmx_check_intercept,
11243         .handle_external_intr = vmx_handle_external_intr,
11244         .mpx_supported = vmx_mpx_supported,
11245         .xsaves_supported = vmx_xsaves_supported,
11246
11247         .check_nested_events = vmx_check_nested_events,
11248
11249         .sched_in = vmx_sched_in,
11250
11251         .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
11252         .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
11253         .flush_log_dirty = vmx_flush_log_dirty,
11254         .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
11255
11256         .pre_block = vmx_pre_block,
11257         .post_block = vmx_post_block,
11258
11259         .pmu_ops = &intel_pmu_ops,
11260
11261         .update_pi_irte = vmx_update_pi_irte,
11262
11263 #ifdef CONFIG_X86_64
11264         .set_hv_timer = vmx_set_hv_timer,
11265         .cancel_hv_timer = vmx_cancel_hv_timer,
11266 #endif
11267
11268         .setup_mce = vmx_setup_mce,
11269 };
11270
11271 static int __init vmx_init(void)
11272 {
11273         int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
11274                      __alignof__(struct vcpu_vmx), THIS_MODULE);
11275         if (r)
11276                 return r;
11277
11278 #ifdef CONFIG_KEXEC_CORE
11279         rcu_assign_pointer(crash_vmclear_loaded_vmcss,
11280                            crash_vmclear_local_loaded_vmcss);
11281 #endif
11282
11283         return 0;
11284 }
11285
11286 static void __exit vmx_exit(void)
11287 {
11288 #ifdef CONFIG_KEXEC_CORE
11289         RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
11290         synchronize_rcu();
11291 #endif
11292
11293         kvm_exit();
11294 }
11295
11296 module_init(vmx_init)
11297 module_exit(vmx_exit)