2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/kvm_host.h>
13 #include <asm/mmu_context.h>
15 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
17 int cpu = smp_processor_id();
19 return vcpu->arch.guest_kernel_asid[cpu] &
20 cpu_asid_mask(&cpu_data[cpu]);
23 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
25 int cpu = smp_processor_id();
27 return vcpu->arch.guest_user_asid[cpu] &
28 cpu_asid_mask(&cpu_data[cpu]);
31 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
33 int srcu_idx, err = 0;
36 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
39 srcu_idx = srcu_read_lock(&kvm->srcu);
40 pfn = gfn_to_pfn(kvm, gfn);
42 if (is_error_pfn(pfn)) {
43 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
48 kvm->arch.guest_pmap[gfn] = pfn;
50 srcu_read_unlock(&kvm->srcu, srcu_idx);
54 /* Translate guest KSEG0 addresses to Host PA */
55 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
59 unsigned long offset = gva & ~PAGE_MASK;
60 struct kvm *kvm = vcpu->kvm;
62 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
63 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
64 __builtin_return_address(0), gva);
65 return KVM_INVALID_PAGE;
68 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
70 if (gfn >= kvm->arch.guest_pmap_npages) {
71 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
73 return KVM_INVALID_PAGE;
76 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
77 return KVM_INVALID_ADDR;
79 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
82 /* XXXKYMA: Must be called with interrupts disabled */
83 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
84 struct kvm_vcpu *vcpu)
88 unsigned long vaddr = 0;
89 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
90 struct kvm *kvm = vcpu->kvm;
91 const int flush_dcache_mask = 0;
94 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
95 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
96 kvm_mips_dump_host_tlbs();
100 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
101 if (gfn >= kvm->arch.guest_pmap_npages) {
102 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 kvm_mips_dump_host_tlbs();
107 vaddr = badvaddr & (PAGE_MASK << 1);
109 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
112 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
115 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
116 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
118 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
119 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
120 ENTRYLO_D | ENTRYLO_V;
121 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
122 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
123 ENTRYLO_D | ENTRYLO_V;
126 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
127 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
134 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
135 struct kvm_mips_tlb *tlb)
137 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
138 struct kvm *kvm = vcpu->kvm;
139 kvm_pfn_t pfn0, pfn1;
142 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
146 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
150 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
154 pfn0 = kvm->arch.guest_pmap[
155 mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
156 pfn1 = kvm->arch.guest_pmap[
157 mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
160 /* Get attributes from the Guest TLB */
161 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
162 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
163 (tlb->tlb_lo[0] & ENTRYLO_D) |
164 (tlb->tlb_lo[0] & ENTRYLO_V);
165 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
166 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
167 (tlb->tlb_lo[1] & ENTRYLO_D) |
168 (tlb->tlb_lo[1] & ENTRYLO_V);
170 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
171 tlb->tlb_lo[0], tlb->tlb_lo[1]);
174 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
175 kvm_mips_get_kernel_asid(vcpu) :
176 kvm_mips_get_user_asid(vcpu));
177 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
184 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
185 struct kvm_vcpu *vcpu)
187 unsigned long asid = asid_cache(cpu);
189 asid += cpu_asid_inc();
190 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
191 if (cpu_has_vtag_icache)
194 kvm_local_flush_tlb_all(); /* start new asid cycle */
196 if (!asid) /* fix version if needed */
197 asid = asid_first_version(cpu);
200 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
204 * kvm_mips_migrate_count() - Migrate timer.
205 * @vcpu: Virtual CPU.
207 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
208 * if it was running prior to being cancelled.
210 * Must be called when the VCPU is migrated to a different CPU to ensure that
211 * timer expiry during guest execution interrupts the guest and causes the
212 * interrupt to be delivered in a timely manner.
214 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
216 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
217 hrtimer_restart(&vcpu->arch.comparecount_timer);
220 /* Restore ASID once we are scheduled back after preemption */
221 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
223 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
227 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
229 /* Allocate new kernel and user ASIDs if needed */
231 local_irq_save(flags);
233 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
234 asid_version_mask(cpu)) {
235 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
236 vcpu->arch.guest_kernel_asid[cpu] =
237 vcpu->arch.guest_kernel_mm.context.asid[cpu];
238 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
239 vcpu->arch.guest_user_asid[cpu] =
240 vcpu->arch.guest_user_mm.context.asid[cpu];
243 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
244 cpu_context(cpu, current->mm));
245 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
246 cpu, vcpu->arch.guest_kernel_asid[cpu]);
247 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
248 vcpu->arch.guest_user_asid[cpu]);
251 if (vcpu->arch.last_sched_cpu != cpu) {
252 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
253 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
255 * Migrate the timer interrupt to the current CPU so that it
256 * always interrupts the guest and synchronously triggers a
257 * guest timer interrupt.
259 kvm_mips_migrate_count(vcpu);
264 * If we preempted while the guest was executing, then reload
265 * the pre-empted ASID
267 if (current->flags & PF_VCPU) {
268 write_c0_entryhi(vcpu->arch.
269 preempt_entryhi & asid_mask);
273 /* New ASIDs were allocated for the VM */
276 * Were we in guest context? If so then the pre-empted ASID is
277 * no longer valid, we need to set it to what it should be based
278 * on the mode of the Guest (Kernel/User)
280 if (current->flags & PF_VCPU) {
281 if (KVM_GUEST_KERNEL_MODE(vcpu))
282 write_c0_entryhi(vcpu->arch.
283 guest_kernel_asid[cpu] &
286 write_c0_entryhi(vcpu->arch.
287 guest_user_asid[cpu] &
293 /* restore guest state to registers */
294 kvm_mips_callbacks->vcpu_set_regs(vcpu);
296 local_irq_restore(flags);
300 /* ASID can change if another task is scheduled during preemption */
301 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
306 local_irq_save(flags);
308 cpu = smp_processor_id();
310 vcpu->arch.preempt_entryhi = read_c0_entryhi();
311 vcpu->arch.last_sched_cpu = cpu;
313 /* save guest state in registers */
314 kvm_mips_callbacks->vcpu_get_regs(vcpu);
316 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
317 asid_version_mask(cpu))) {
318 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
319 cpu_context(cpu, current->mm));
320 drop_mmu_context(current->mm, cpu);
322 write_c0_entryhi(cpu_asid(cpu, current->mm));
325 local_irq_restore(flags);
328 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
330 struct mips_coproc *cop0 = vcpu->arch.cop0;
331 unsigned long paddr, flags, vpn2, asid;
332 unsigned long va = (unsigned long)opc;
336 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
337 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
338 local_irq_save(flags);
339 index = kvm_mips_host_tlb_lookup(vcpu, va);
343 vpn2 = va & VPN2_MASK;
344 asid = kvm_read_c0_guest_entryhi(cop0) &
346 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
348 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
349 __func__, opc, vcpu, read_c0_entryhi());
350 kvm_mips_dump_host_tlbs();
351 kvm_mips_dump_guest_tlbs(vcpu);
352 local_irq_restore(flags);
353 return KVM_INVALID_INST;
355 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
360 local_irq_restore(flags);
361 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
362 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
363 inst = *(u32 *) CKSEG0ADDR(paddr);
365 kvm_err("%s: illegal address: %p\n", __func__, opc);
366 return KVM_INVALID_INST;