Merge tag 'trace-v4.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[cascardo/linux.git] / arch / x86 / kvm / cpuid.h
1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3
4 #include "x86.h"
5 #include <asm/cpu.h>
6
7 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
8 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
9                                               u32 function, u32 index);
10 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
11                             struct kvm_cpuid_entry2 __user *entries,
12                             unsigned int type);
13 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
14                              struct kvm_cpuid *cpuid,
15                              struct kvm_cpuid_entry __user *entries);
16 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
17                               struct kvm_cpuid2 *cpuid,
18                               struct kvm_cpuid_entry2 __user *entries);
19 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20                               struct kvm_cpuid2 *cpuid,
21                               struct kvm_cpuid_entry2 __user *entries);
22 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
23
24 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
25
26 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
27 {
28         return vcpu->arch.maxphyaddr;
29 }
30
31 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
32 {
33         struct kvm_cpuid_entry2 *best;
34
35         if (!static_cpu_has(X86_FEATURE_XSAVE))
36                 return false;
37
38         best = kvm_find_cpuid_entry(vcpu, 1, 0);
39         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
40 }
41
42 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
43 {
44         struct kvm_cpuid_entry2 *best;
45
46         best = kvm_find_cpuid_entry(vcpu, 1, 0);
47         return best && (best->edx & bit(X86_FEATURE_MTRR));
48 }
49
50 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
51 {
52         struct kvm_cpuid_entry2 *best;
53
54         best = kvm_find_cpuid_entry(vcpu, 7, 0);
55         return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
56 }
57
58 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
59 {
60         struct kvm_cpuid_entry2 *best;
61
62         best = kvm_find_cpuid_entry(vcpu, 7, 0);
63         return best && (best->ebx & bit(X86_FEATURE_SMEP));
64 }
65
66 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
67 {
68         struct kvm_cpuid_entry2 *best;
69
70         best = kvm_find_cpuid_entry(vcpu, 7, 0);
71         return best && (best->ebx & bit(X86_FEATURE_SMAP));
72 }
73
74 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
75 {
76         struct kvm_cpuid_entry2 *best;
77
78         best = kvm_find_cpuid_entry(vcpu, 7, 0);
79         return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
80 }
81
82 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
83 {
84         struct kvm_cpuid_entry2 *best;
85
86         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
87         return best && (best->edx & bit(X86_FEATURE_LM));
88 }
89
90 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
91 {
92         struct kvm_cpuid_entry2 *best;
93
94         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
95         return best && (best->ecx & bit(X86_FEATURE_OSVW));
96 }
97
98 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
99 {
100         struct kvm_cpuid_entry2 *best;
101
102         best = kvm_find_cpuid_entry(vcpu, 1, 0);
103         return best && (best->ecx & bit(X86_FEATURE_PCID));
104 }
105
106 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
107 {
108         struct kvm_cpuid_entry2 *best;
109
110         best = kvm_find_cpuid_entry(vcpu, 1, 0);
111         return best && (best->ecx & bit(X86_FEATURE_X2APIC));
112 }
113
114 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
115 {
116         struct kvm_cpuid_entry2 *best;
117
118         best = kvm_find_cpuid_entry(vcpu, 0, 0);
119         return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
120 }
121
122 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
123 {
124         struct kvm_cpuid_entry2 *best;
125
126         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
127         return best && (best->edx & bit(X86_FEATURE_GBPAGES));
128 }
129
130 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
131 {
132         struct kvm_cpuid_entry2 *best;
133
134         best = kvm_find_cpuid_entry(vcpu, 7, 0);
135         return best && (best->ebx & bit(X86_FEATURE_RTM));
136 }
137
138 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
139 {
140         struct kvm_cpuid_entry2 *best;
141
142         best = kvm_find_cpuid_entry(vcpu, 7, 0);
143         return best && (best->ebx & bit(X86_FEATURE_MPX));
144 }
145
146 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
147 {
148         struct kvm_cpuid_entry2 *best;
149
150         best = kvm_find_cpuid_entry(vcpu, 7, 0);
151         return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
152 }
153
154 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
155 {
156         struct kvm_cpuid_entry2 *best;
157
158         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
159         return best && (best->edx & bit(X86_FEATURE_RDTSCP));
160 }
161
162 /*
163  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
164  */
165 #define BIT_NRIPS       3
166
167 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
168 {
169         struct kvm_cpuid_entry2 *best;
170
171         best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
172
173         /*
174          * NRIPS is a scattered cpuid feature, so we can't use
175          * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
176          * position 8, not 3).
177          */
178         return best && (best->edx & bit(BIT_NRIPS));
179 }
180 #undef BIT_NRIPS
181
182 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
183 {
184         struct kvm_cpuid_entry2 *best;
185
186         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
187         if (!best)
188                 return -1;
189
190         return x86_family(best->eax);
191 }
192
193 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
194 {
195         struct kvm_cpuid_entry2 *best;
196
197         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
198         if (!best)
199                 return -1;
200
201         return x86_model(best->eax);
202 }
203
204 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
205 {
206         struct kvm_cpuid_entry2 *best;
207
208         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
209         if (!best)
210                 return -1;
211
212         return x86_stepping(best->eax);
213 }
214
215 #endif