2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
5 * derived from arch/x86/kvm/x86.c
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
21 #include <asm/fpu/xstate.h>
28 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
31 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
33 xstate_bv &= XFEATURE_MASK_EXTEND;
35 if (xstate_bv & 0x1) {
36 u32 eax, ebx, ecx, edx, offset;
37 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
38 offset = compacted ? ret : ebx;
39 ret = max(ret, offset + eax);
49 bool kvm_mpx_supported(void)
51 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
52 && kvm_x86_ops->mpx_supported());
54 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
56 u64 kvm_supported_xcr0(void)
58 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
60 if (!kvm_mpx_supported())
61 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
66 #define F(x) bit(X86_FEATURE_##x)
68 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
70 struct kvm_cpuid_entry2 *best;
71 struct kvm_lapic *apic = vcpu->arch.apic;
73 best = kvm_find_cpuid_entry(vcpu, 1, 0);
77 /* Update OSXSAVE bit */
78 if (cpu_has_xsave && best->function == 0x1) {
79 best->ecx &= ~F(OSXSAVE);
80 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
81 best->ecx |= F(OSXSAVE);
85 if (best->ecx & F(TSC_DEADLINE_TIMER))
86 apic->lapic_timer.timer_mode_mask = 3 << 17;
88 apic->lapic_timer.timer_mode_mask = 1 << 17;
91 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
93 vcpu->arch.guest_supported_xcr0 = 0;
94 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
96 vcpu->arch.guest_supported_xcr0 =
97 (best->eax | ((u64)best->edx << 32)) &
99 vcpu->arch.guest_xstate_size = best->ebx =
100 xstate_required_size(vcpu->arch.xcr0, false);
103 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
104 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
105 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
107 vcpu->arch.eager_fpu = use_eager_fpu();
108 if (vcpu->arch.eager_fpu)
109 kvm_x86_ops->fpu_activate(vcpu);
112 * The existing code assumes virtual address is 48-bit in the canonical
113 * address checks; exit if it is ever changed.
115 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
116 if (best && ((best->eax & 0xff00) >> 8) != 48 &&
117 ((best->eax & 0xff00) >> 8) != 0)
120 /* Update physical-address width */
121 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
123 kvm_pmu_refresh(vcpu);
127 static int is_efer_nx(void)
129 unsigned long long efer = 0;
131 rdmsrl_safe(MSR_EFER, &efer);
132 return efer & EFER_NX;
135 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
138 struct kvm_cpuid_entry2 *e, *entry;
141 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
142 e = &vcpu->arch.cpuid_entries[i];
143 if (e->function == 0x80000001) {
148 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
149 entry->edx &= ~F(NX);
150 printk(KERN_INFO "kvm: guest NX capability removed\n");
154 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
156 struct kvm_cpuid_entry2 *best;
158 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
159 if (!best || best->eax < 0x80000008)
161 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
163 return best->eax & 0xff;
167 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
169 /* when an old userspace process fills a new kernel module */
170 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
171 struct kvm_cpuid *cpuid,
172 struct kvm_cpuid_entry __user *entries)
175 struct kvm_cpuid_entry *cpuid_entries;
178 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
181 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
185 if (copy_from_user(cpuid_entries, entries,
186 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
188 for (i = 0; i < cpuid->nent; i++) {
189 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
190 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
191 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
192 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
193 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
194 vcpu->arch.cpuid_entries[i].index = 0;
195 vcpu->arch.cpuid_entries[i].flags = 0;
196 vcpu->arch.cpuid_entries[i].padding[0] = 0;
197 vcpu->arch.cpuid_entries[i].padding[1] = 0;
198 vcpu->arch.cpuid_entries[i].padding[2] = 0;
200 vcpu->arch.cpuid_nent = cpuid->nent;
201 cpuid_fix_nx_cap(vcpu);
202 kvm_apic_set_version(vcpu);
203 kvm_x86_ops->cpuid_update(vcpu);
204 r = kvm_update_cpuid(vcpu);
207 vfree(cpuid_entries);
212 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
213 struct kvm_cpuid2 *cpuid,
214 struct kvm_cpuid_entry2 __user *entries)
219 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
222 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
223 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
225 vcpu->arch.cpuid_nent = cpuid->nent;
226 kvm_apic_set_version(vcpu);
227 kvm_x86_ops->cpuid_update(vcpu);
228 r = kvm_update_cpuid(vcpu);
233 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
234 struct kvm_cpuid2 *cpuid,
235 struct kvm_cpuid_entry2 __user *entries)
240 if (cpuid->nent < vcpu->arch.cpuid_nent)
243 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
244 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
249 cpuid->nent = vcpu->arch.cpuid_nent;
253 static void cpuid_mask(u32 *word, int wordnum)
255 *word &= boot_cpu_data.x86_capability[wordnum];
258 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
261 entry->function = function;
262 entry->index = index;
263 cpuid_count(entry->function, entry->index,
264 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
268 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
269 u32 func, u32 index, int *nent, int maxnent)
273 entry->eax = 1; /* only one leaf currently */
277 entry->ecx = F(MOVBE);
284 entry->function = func;
285 entry->index = index;
290 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
291 u32 index, int *nent, int maxnent)
294 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
296 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
298 unsigned f_lm = F(LM);
300 unsigned f_gbpages = 0;
303 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
304 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
305 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
306 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
309 const u32 kvm_supported_word0_x86_features =
310 F(FPU) | F(VME) | F(DE) | F(PSE) |
311 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
312 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
313 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
314 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
315 0 /* Reserved, DS, ACPI */ | F(MMX) |
316 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
317 0 /* HTT, TM, Reserved, PBE */;
318 /* cpuid 0x80000001.edx */
319 const u32 kvm_supported_word1_x86_features =
320 F(FPU) | F(VME) | F(DE) | F(PSE) |
321 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
322 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
323 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
324 F(PAT) | F(PSE36) | 0 /* Reserved */ |
325 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
326 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
327 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
329 const u32 kvm_supported_word4_x86_features =
330 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
331 * but *not* advertised to guests via CPUID ! */
332 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
333 0 /* DS-CPL, VMX, SMX, EST */ |
334 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
335 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
336 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
337 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
338 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
340 /* cpuid 0x80000001.ecx */
341 const u32 kvm_supported_word6_x86_features =
342 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
343 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
344 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
345 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
347 /* cpuid 0xC0000001.edx */
348 const u32 kvm_supported_word5_x86_features =
349 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
350 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
354 const u32 kvm_supported_word9_x86_features =
355 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
356 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
357 F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
358 F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(PCOMMIT);
360 /* cpuid 0xD.1.eax */
361 const u32 kvm_supported_word10_x86_features =
362 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
364 /* all calls to cpuid_count() should be made on the same cpu */
369 if (*nent >= maxnent)
372 do_cpuid_1_ent(entry, function, index);
377 entry->eax = min(entry->eax, (u32)0xd);
380 entry->edx &= kvm_supported_word0_x86_features;
381 cpuid_mask(&entry->edx, 0);
382 entry->ecx &= kvm_supported_word4_x86_features;
383 cpuid_mask(&entry->ecx, 4);
384 /* we support x2apic emulation even if host does not support
385 * it since we emulate x2apic in software */
386 entry->ecx |= F(X2APIC);
388 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
389 * may return different values. This forces us to get_cpu() before
390 * issuing the first command, and also to emulate this annoying behavior
391 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
393 int t, times = entry->eax & 0xff;
395 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
396 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
397 for (t = 1; t < times; ++t) {
398 if (*nent >= maxnent)
401 do_cpuid_1_ent(&entry[t], function, 0);
402 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
407 /* function 4 has additional index. */
411 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
412 /* read more entries until cache_type is zero */
414 if (*nent >= maxnent)
417 cache_type = entry[i - 1].eax & 0x1f;
420 do_cpuid_1_ent(&entry[i], function, i);
422 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
427 case 6: /* Thermal management */
428 entry->eax = 0x4; /* allow ARAT */
434 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
435 /* Mask ebx against host capability word 9 */
437 entry->ebx &= kvm_supported_word9_x86_features;
438 cpuid_mask(&entry->ebx, 9);
439 // TSC_ADJUST is emulated
440 entry->ebx |= F(TSC_ADJUST);
450 case 0xa: { /* Architectural Performance Monitoring */
451 struct x86_pmu_capability cap;
452 union cpuid10_eax eax;
453 union cpuid10_edx edx;
455 perf_get_x86_pmu_capability(&cap);
458 * Only support guest architectural pmu on a host
459 * with architectural pmu.
462 memset(&cap, 0, sizeof(cap));
464 eax.split.version_id = min(cap.version, 2);
465 eax.split.num_counters = cap.num_counters_gp;
466 eax.split.bit_width = cap.bit_width_gp;
467 eax.split.mask_length = cap.events_mask_len;
469 edx.split.num_counters_fixed = cap.num_counters_fixed;
470 edx.split.bit_width_fixed = cap.bit_width_fixed;
471 edx.split.reserved = 0;
473 entry->eax = eax.full;
474 entry->ebx = cap.events_mask;
476 entry->edx = edx.full;
479 /* function 0xb has additional index. */
483 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
484 /* read more entries until level_type is zero */
486 if (*nent >= maxnent)
489 level_type = entry[i - 1].ecx & 0xff00;
492 do_cpuid_1_ent(&entry[i], function, i);
494 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
501 u64 supported = kvm_supported_xcr0();
503 entry->eax &= supported;
504 entry->ebx = xstate_required_size(supported, false);
505 entry->ecx = entry->ebx;
506 entry->edx &= supported >> 32;
507 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
511 for (idx = 1, i = 1; idx < 64; ++idx) {
512 u64 mask = ((u64)1 << idx);
513 if (*nent >= maxnent)
516 do_cpuid_1_ent(&entry[i], function, idx);
518 entry[i].eax &= kvm_supported_word10_x86_features;
520 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
522 xstate_required_size(supported,
525 if (entry[i].eax == 0 || !(supported & mask))
527 if (WARN_ON_ONCE(entry[i].ecx & 1))
533 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
539 case KVM_CPUID_SIGNATURE: {
540 static const char signature[12] = "KVMKVMKVM\0\0";
541 const u32 *sigptr = (const u32 *)signature;
542 entry->eax = KVM_CPUID_FEATURES;
543 entry->ebx = sigptr[0];
544 entry->ecx = sigptr[1];
545 entry->edx = sigptr[2];
548 case KVM_CPUID_FEATURES:
549 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
550 (1 << KVM_FEATURE_NOP_IO_DELAY) |
551 (1 << KVM_FEATURE_CLOCKSOURCE2) |
552 (1 << KVM_FEATURE_ASYNC_PF) |
553 (1 << KVM_FEATURE_PV_EOI) |
554 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
555 (1 << KVM_FEATURE_PV_UNHALT);
558 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
565 entry->eax = min(entry->eax, 0x8000001a);
568 entry->edx &= kvm_supported_word1_x86_features;
569 cpuid_mask(&entry->edx, 1);
570 entry->ecx &= kvm_supported_word6_x86_features;
571 cpuid_mask(&entry->ecx, 6);
573 case 0x80000007: /* Advanced power management */
574 /* invariant TSC is CPUID.80000007H:EDX[8] */
575 entry->edx &= (1 << 8);
576 /* mask against host */
577 entry->edx &= boot_cpu_data.x86_power;
578 entry->eax = entry->ebx = entry->ecx = 0;
581 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
582 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
583 unsigned phys_as = entry->eax & 0xff;
587 entry->eax = g_phys_as | (virt_as << 8);
588 entry->ebx = entry->edx = 0;
592 entry->ecx = entry->edx = 0;
598 /*Add support for Centaur's CPUID instruction*/
600 /*Just support up to 0xC0000004 now*/
601 entry->eax = min(entry->eax, 0xC0000004);
604 entry->edx &= kvm_supported_word5_x86_features;
605 cpuid_mask(&entry->edx, 5);
607 case 3: /* Processor serial number */
608 case 5: /* MONITOR/MWAIT */
613 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
617 kvm_x86_ops->set_supported_cpuid(function, entry);
627 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
628 u32 idx, int *nent, int maxnent, unsigned int type)
630 if (type == KVM_GET_EMULATED_CPUID)
631 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
633 return __do_cpuid_ent(entry, func, idx, nent, maxnent);
638 struct kvm_cpuid_param {
642 bool (*qualifier)(const struct kvm_cpuid_param *param);
645 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
647 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
650 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
651 __u32 num_entries, unsigned int ioctl_type)
656 if (ioctl_type != KVM_GET_EMULATED_CPUID)
660 * We want to make sure that ->padding is being passed clean from
661 * userspace in case we want to use it for something in the future.
663 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
664 * have to give ourselves satisfied only with the emulated side. /me
667 for (i = 0; i < num_entries; i++) {
668 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
671 if (pad[0] || pad[1] || pad[2])
677 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
678 struct kvm_cpuid_entry2 __user *entries,
681 struct kvm_cpuid_entry2 *cpuid_entries;
682 int limit, nent = 0, r = -E2BIG, i;
684 static const struct kvm_cpuid_param param[] = {
685 { .func = 0, .has_leaf_count = true },
686 { .func = 0x80000000, .has_leaf_count = true },
687 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
688 { .func = KVM_CPUID_SIGNATURE },
689 { .func = KVM_CPUID_FEATURES },
694 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
695 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
697 if (sanity_check_entries(entries, cpuid->nent, type))
701 cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
706 for (i = 0; i < ARRAY_SIZE(param); i++) {
707 const struct kvm_cpuid_param *ent = ¶m[i];
709 if (ent->qualifier && !ent->qualifier(ent))
712 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
713 &nent, cpuid->nent, type);
718 if (!ent->has_leaf_count)
721 limit = cpuid_entries[nent - 1].eax;
722 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
723 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
724 &nent, cpuid->nent, type);
731 if (copy_to_user(entries, cpuid_entries,
732 nent * sizeof(struct kvm_cpuid_entry2)))
738 vfree(cpuid_entries);
743 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
745 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
746 int j, nent = vcpu->arch.cpuid_nent;
748 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
749 /* when no next entry is found, the current entry[i] is reselected */
750 for (j = i + 1; ; j = (j + 1) % nent) {
751 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
752 if (ej->function == e->function) {
753 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
757 return 0; /* silence gcc, even though control never reaches here */
760 /* find an entry with matching function, matching index (if needed), and that
761 * should be read next (if it's stateful) */
762 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
763 u32 function, u32 index)
765 if (e->function != function)
767 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
769 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
770 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
775 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
776 u32 function, u32 index)
779 struct kvm_cpuid_entry2 *best = NULL;
781 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
782 struct kvm_cpuid_entry2 *e;
784 e = &vcpu->arch.cpuid_entries[i];
785 if (is_matching_cpuid_entry(e, function, index)) {
786 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
787 move_to_next_stateful_cpuid_entry(vcpu, i);
794 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
797 * If no match is found, check whether we exceed the vCPU's limit
798 * and return the content of the highest valid _standard_ leaf instead.
799 * This is to satisfy the CPUID specification.
801 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
802 u32 function, u32 index)
804 struct kvm_cpuid_entry2 *maxlevel;
806 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
807 if (!maxlevel || maxlevel->eax >= function)
809 if (function & 0x80000000) {
810 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
814 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
817 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
819 u32 function = *eax, index = *ecx;
820 struct kvm_cpuid_entry2 *best;
822 best = kvm_find_cpuid_entry(vcpu, function, index);
825 best = check_cpuid_limit(vcpu, function, index);
828 * Perfmon not yet supported for L2 guest.
830 if (is_guest_mode(vcpu) && function == 0xa)
839 *eax = *ebx = *ecx = *edx = 0;
840 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
842 EXPORT_SYMBOL_GPL(kvm_cpuid);
844 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
846 u32 function, eax, ebx, ecx, edx;
848 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
849 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
850 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
851 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
852 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
853 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
854 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
855 kvm_x86_ops->skip_emulated_instruction(vcpu);
857 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);