2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/highmem.h>
22 #include <asm/kvm_ppc.h>
23 #include <asm/kvm_e500.h>
25 #include "../mm/mmu_decl.h"
30 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
40 * This table provide mappings from:
41 * (guestAS,guestTID,guestPR) --> ID of physical cpu
46 * Each vcpu keeps one vcpu_id_table.
48 struct vcpu_id_table {
49 struct id id[2][NUM_TIDS][2];
53 * This table provide reversed mappings of vcpu_id_table:
54 * ID --> address of vcpu_id_table item.
55 * Each physical core has one pcpu_id_table.
57 struct pcpu_id_table {
58 struct id *entry[NUM_TIDS];
61 static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
63 /* This variable keeps last used shadow ID on local core.
64 * The valid range of shadow ID is [1..255] */
65 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
67 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
70 * Allocate a free shadow id and setup a valid sid mapping in given entry.
71 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
73 * The caller must have preemption disabled, and keep it that way until
74 * it has finished with the returned shadow id (either written into the
75 * TLB or arch.shadow_pid, or discarded).
77 static inline int local_sid_setup_one(struct id *entry)
82 sid = ++(__get_cpu_var(pcpu_last_used_sid));
84 __get_cpu_var(pcpu_sids).entry[sid] = entry;
86 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
91 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
92 * the caller will invalidate everything and start over.
94 * sid > NUM_TIDS indicates a race, which we disable preemption to
97 WARN_ON(sid > NUM_TIDS);
103 * Check if given entry contain a valid shadow id mapping.
104 * An ID mapping is considered valid only if
105 * both vcpu and pcpu know this mapping.
107 * The caller must have preemption disabled, and keep it that way until
108 * it has finished with the returned shadow id (either written into the
109 * TLB or arch.shadow_pid, or discarded).
111 static inline int local_sid_lookup(struct id *entry)
113 if (entry && entry->val != 0 &&
114 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
115 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
120 /* Invalidate all id mappings on local core -- call with preempt disabled */
121 static inline void local_sid_destroy_all(void)
123 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
127 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
129 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
130 return vcpu_e500->idt;
133 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
135 kfree(vcpu_e500->idt);
138 /* Invalidate all mappings on vcpu */
139 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
141 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
143 /* Update shadow pid when mappings are changed */
144 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
147 /* Invalidate one ID mapping on vcpu */
148 static inline void kvmppc_e500_id_table_reset_one(
149 struct kvmppc_vcpu_e500 *vcpu_e500,
150 int as, int pid, int pr)
152 struct vcpu_id_table *idt = vcpu_e500->idt;
155 BUG_ON(pid >= NUM_TIDS);
158 idt->id[as][pid][pr].val = 0;
159 idt->id[as][pid][pr].pentry = NULL;
161 /* Update shadow pid when mappings are changed */
162 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
166 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
167 * This function first lookup if a valid mapping exists,
168 * if not, then creates a new one.
170 * The caller must have preemption disabled, and keep it that way until
171 * it has finished with the returned shadow id (either written into the
172 * TLB or arch.shadow_pid, or discarded).
174 static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
175 unsigned int as, unsigned int gid,
176 unsigned int pr, int avoid_recursion)
178 struct vcpu_id_table *idt = vcpu_e500->idt;
182 BUG_ON(gid >= NUM_TIDS);
185 sid = local_sid_lookup(&idt->id[as][gid][pr]);
189 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
192 local_sid_destroy_all();
195 /* Update shadow pid when mappings are changed */
196 if (!avoid_recursion)
197 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
203 /* Map guest pid to shadow.
204 * We use PID to keep shadow of current guest non-zero PID,
205 * and use PID1 to keep shadow of guest zero PID.
206 * So that guest tlbe with TID=0 can be accessed at any time */
207 void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
210 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
211 get_cur_as(&vcpu_e500->vcpu),
212 get_cur_pid(&vcpu_e500->vcpu),
213 get_cur_pr(&vcpu_e500->vcpu), 1);
214 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
215 get_cur_as(&vcpu_e500->vcpu), 0,
216 get_cur_pr(&vcpu_e500->vcpu), 1);
220 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
222 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
226 printk("| %8s | %8s | %8s | %8s | %8s |\n",
227 "nr", "mas1", "mas2", "mas3", "mas7");
229 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
230 printk("Guest TLB%d:\n", tlbsel);
231 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
232 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
233 if (tlbe->mas1 & MAS1_VALID)
234 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
235 tlbsel, i, tlbe->mas1, tlbe->mas2,
236 tlbe->mas3, tlbe->mas7);
241 static inline unsigned int gtlb0_get_next_victim(
242 struct kvmppc_vcpu_e500 *vcpu_e500)
246 victim = vcpu_e500->gtlb_nv[0]++;
247 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
248 vcpu_e500->gtlb_nv[0] = 0;
253 static inline unsigned int tlb1_max_shadow_size(void)
255 /* reserve one entry for magic page */
256 return host_tlb_params[1].entries - tlbcam_index - 1;
259 static inline int tlbe_is_writable(struct tlbe *tlbe)
261 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
264 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
266 /* Mask off reserved bits. */
267 mas3 &= MAS3_ATTRIB_MASK;
270 /* Guest is in supervisor mode,
271 * so we need to translate guest
272 * supervisor permissions into user permissions. */
273 mas3 &= ~E500_TLB_USER_PERM_MASK;
274 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
277 return mas3 | E500_TLB_SUPER_PERM_MASK;
280 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
283 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
285 return mas2 & MAS2_ATTRIB_MASK;
290 * writing shadow tlb entry to host TLB
292 static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
296 local_irq_save(flags);
297 mtspr(SPRN_MAS0, mas0);
298 mtspr(SPRN_MAS1, stlbe->mas1);
299 mtspr(SPRN_MAS2, stlbe->mas2);
300 mtspr(SPRN_MAS3, stlbe->mas3);
301 mtspr(SPRN_MAS7, stlbe->mas7);
302 asm volatile("isync; tlbwe" : : : "memory");
303 local_irq_restore(flags);
306 /* esel is index into set, not whole array */
307 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
308 int tlbsel, int esel, struct tlbe *stlbe)
311 __write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(esel));
313 __write_host_tlbe(stlbe,
315 MAS0_ESEL(to_htlb1_esel(esel)));
317 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
318 stlbe->mas3, stlbe->mas7);
321 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
325 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
329 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
330 get_page(pfn_to_page(pfn));
333 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
335 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
336 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
337 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
338 magic.mas3 = (pfn << PAGE_SHIFT) |
339 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
340 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
342 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
346 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
348 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
350 /* Shadow PID may be expired on local core */
351 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
354 void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
358 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
359 int tlbsel, int esel)
361 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
362 struct vcpu_id_table *idt = vcpu_e500->idt;
363 unsigned int pr, tid, ts, pid;
367 ts = get_tlb_ts(gtlbe);
368 tid = get_tlb_tid(gtlbe);
372 /* One guest ID may be mapped to two shadow IDs */
373 for (pr = 0; pr < 2; pr++) {
375 * The shadow PID can have a valid mapping on at most one
376 * host CPU. In the common case, it will be valid on this
377 * CPU, in which case (for TLB0) we do a local invalidation
378 * of the specific address.
380 * If the shadow PID is not valid on the current host CPU, or
381 * if we're invalidating a TLB1 entry, we invalidate the
385 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
386 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
391 * The guest is invalidating a TLB0 entry which is in a PID
392 * that has a valid shadow mapping on this host CPU. We
393 * search host TLB0 to invalidate it's shadow TLB entry,
394 * similar to __tlbil_va except that we need to look in AS1.
396 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
397 eaddr = get_tlb_eaddr(gtlbe);
399 local_irq_save(flags);
401 mtspr(SPRN_MAS6, val);
402 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
403 val = mfspr(SPRN_MAS1);
404 if (val & MAS1_VALID) {
405 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
406 asm volatile("tlbwe");
409 local_irq_restore(flags);
415 static int tlb0_set_base(gva_t addr, int sets, int ways)
419 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
425 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
427 int sets = KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
429 return tlb0_set_base(addr, sets, KVM_E500_TLB0_WAY_NUM);
432 static int htlb0_set_base(gva_t addr)
434 return tlb0_set_base(addr, host_tlb_params[0].sets,
435 host_tlb_params[0].ways);
438 static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
440 unsigned int esel = get_tlb_esel_bit(vcpu_e500);
443 esel &= KVM_E500_TLB0_WAY_NUM_MASK;
444 esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
446 esel &= vcpu_e500->gtlb_size[tlbsel] - 1;
452 /* Search the guest TLB for a matching entry. */
453 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
454 gva_t eaddr, int tlbsel, unsigned int pid, int as)
456 int size = vcpu_e500->gtlb_size[tlbsel];
457 unsigned int set_base;
461 set_base = gtlb0_set_base(vcpu_e500, eaddr);
462 size = KVM_E500_TLB0_WAY_NUM;
467 for (i = 0; i < size; i++) {
468 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
471 if (eaddr < get_tlb_eaddr(tlbe))
474 if (eaddr > get_tlb_end(tlbe))
477 tid = get_tlb_tid(tlbe);
478 if (tid && (tid != pid))
481 if (!get_tlb_v(tlbe))
484 if (get_tlb_ts(tlbe) != as && as != -1)
493 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
498 ref->flags = E500_TLB_VALID;
500 if (tlbe_is_writable(gtlbe))
501 ref->flags |= E500_TLB_DIRTY;
504 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
506 if (ref->flags & E500_TLB_VALID) {
507 if (ref->flags & E500_TLB_DIRTY)
508 kvm_release_pfn_dirty(ref->pfn);
510 kvm_release_pfn_clean(ref->pfn);
516 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
521 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
522 struct tlbe_ref *ref =
523 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
524 kvmppc_e500_ref_release(ref);
528 static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
533 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
534 struct tlbe_ref *ref =
535 &vcpu_e500->tlb_refs[stlbsel][i];
536 kvmppc_e500_ref_release(ref);
539 clear_tlb_privs(vcpu_e500);
542 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
543 unsigned int eaddr, int as)
545 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
546 unsigned int victim, pidsel, tsized;
549 /* since we only have two TLBs, only lower bit is used. */
550 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
551 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
552 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
553 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
555 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
556 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
557 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
558 | MAS1_TID(vcpu_e500->pid[pidsel])
559 | MAS1_TSIZE(tsized);
560 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
561 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
562 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
563 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
564 | (get_cur_pid(vcpu) << 16)
565 | (as ? MAS6_SAS : 0);
569 /* TID must be supplied by the caller */
570 static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
571 struct tlbe *gtlbe, int tsize,
572 struct tlbe_ref *ref,
573 u64 gvaddr, struct tlbe *stlbe)
575 pfn_t pfn = ref->pfn;
577 BUG_ON(!(ref->flags & E500_TLB_VALID));
579 /* Force TS=1 IPROT=0 for all guest mappings. */
580 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
581 stlbe->mas2 = (gvaddr & MAS2_EPN)
582 | e500_shadow_mas2_attrib(gtlbe->mas2,
583 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
584 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
585 | e500_shadow_mas3_attrib(gtlbe->mas3,
586 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
587 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
590 /* sesel is an index into the entire array, not just the set */
591 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
592 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int sesel,
593 struct tlbe *stlbe, struct tlbe_ref *ref)
595 struct kvm_memory_slot *slot;
596 unsigned long pfn, hva;
598 int tsize = BOOK3E_PAGESZ_4K;
601 * Translate guest physical to true physical, acquiring
602 * a page reference if it is normal, non-reserved memory.
604 * gfn_to_memslot() must succeed because otherwise we wouldn't
605 * have gotten this far. Eventually we should just pass the slot
606 * pointer through from the first lookup.
608 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
609 hva = gfn_to_hva_memslot(slot, gfn);
612 struct vm_area_struct *vma;
613 down_read(¤t->mm->mmap_sem);
615 vma = find_vma(current->mm, hva);
616 if (vma && hva >= vma->vm_start &&
617 (vma->vm_flags & VM_PFNMAP)) {
619 * This VMA is a physically contiguous region (e.g.
620 * /dev/mem) that bypasses normal Linux page
621 * management. Find the overlap between the
622 * vma and the memslot.
625 unsigned long start, end;
626 unsigned long slot_start, slot_end;
630 start = vma->vm_pgoff;
632 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
634 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
636 slot_start = pfn - (gfn - slot->base_gfn);
637 slot_end = slot_start + slot->npages;
639 if (start < slot_start)
644 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
648 * e500 doesn't implement the lowest tsize bit,
651 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
654 * Now find the largest tsize (up to what the guest
655 * requested) that will cover gfn, stay within the
656 * range, and for which gfn and pfn are mutually
660 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
661 unsigned long gfn_start, gfn_end, tsize_pages;
662 tsize_pages = 1 << (tsize - 2);
664 gfn_start = gfn & ~(tsize_pages - 1);
665 gfn_end = gfn_start + tsize_pages;
667 if (gfn_start + pfn - gfn < start)
669 if (gfn_end + pfn - gfn > end)
671 if ((gfn & (tsize_pages - 1)) !=
672 (pfn & (tsize_pages - 1)))
675 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
676 pfn &= ~(tsize_pages - 1);
681 up_read(¤t->mm->mmap_sem);
684 if (likely(!pfnmap)) {
685 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
686 if (is_error_pfn(pfn)) {
687 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
689 kvm_release_pfn_clean(pfn);
694 /* Drop old ref and setup new one. */
695 kvmppc_e500_ref_release(ref);
696 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
698 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
701 /* XXX only map the one-one case, for now use TLB0 */
702 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
703 int esel, struct tlbe *stlbe)
706 struct tlbe_ref *ref;
707 int sesel = esel & (host_tlb_params[0].ways - 1);
711 gtlbe = &vcpu_e500->gtlb_arch[0][esel];
712 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
714 ea = get_tlb_eaddr(gtlbe);
715 sesel_base = htlb0_set_base(ea);
717 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
718 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
719 gtlbe, 0, sesel_base + sesel, stlbe, ref);
724 /* Caller must ensure that the specified guest TLB entry is safe to insert into
726 /* XXX for both one-one and one-to-many , for now use TLB1 */
727 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
728 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
730 struct tlbe_ref *ref;
733 victim = vcpu_e500->host_tlb1_nv++;
735 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
736 vcpu_e500->host_tlb1_nv = 0;
738 ref = &vcpu_e500->tlb_refs[1][victim];
739 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
745 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
747 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
749 /* Recalc shadow pid since MSR changes */
750 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
753 static inline int kvmppc_e500_gtlbe_invalidate(
754 struct kvmppc_vcpu_e500 *vcpu_e500,
755 int tlbsel, int esel)
757 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
759 if (unlikely(get_tlb_iprot(gtlbe)))
767 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
771 if (value & MMUCSR0_TLB0FI)
772 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
773 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
774 if (value & MMUCSR0_TLB1FI)
775 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
776 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
778 /* Invalidate all vcpu id mappings */
779 kvmppc_e500_id_table_reset_all(vcpu_e500);
784 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
786 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
791 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
793 ia = (ea >> 2) & 0x1;
795 /* since we only have two TLBs, only lower bit is used. */
796 tlbsel = (ea >> 3) & 0x1;
799 /* invalidate all entries */
800 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
801 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
804 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
805 get_cur_pid(vcpu), -1);
807 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
810 /* Invalidate all vcpu id mappings */
811 kvmppc_e500_id_table_reset_all(vcpu_e500);
816 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
818 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
822 tlbsel = get_tlb_tlbsel(vcpu_e500);
823 esel = get_tlb_esel(vcpu_e500, tlbsel);
825 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
826 vcpu_e500->mas0 &= ~MAS0_NV(~0);
827 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
828 vcpu_e500->mas1 = gtlbe->mas1;
829 vcpu_e500->mas2 = gtlbe->mas2;
830 vcpu_e500->mas3 = gtlbe->mas3;
831 vcpu_e500->mas7 = gtlbe->mas7;
836 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
838 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
839 int as = !!get_cur_sas(vcpu_e500);
840 unsigned int pid = get_cur_spid(vcpu_e500);
842 struct tlbe *gtlbe = NULL;
845 ea = kvmppc_get_gpr(vcpu, rb);
847 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
848 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
850 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
856 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
857 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
858 vcpu_e500->mas1 = gtlbe->mas1;
859 vcpu_e500->mas2 = gtlbe->mas2;
860 vcpu_e500->mas3 = gtlbe->mas3;
861 vcpu_e500->mas7 = gtlbe->mas7;
865 /* since we only have two TLBs, only lower bit is used. */
866 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
867 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
869 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
870 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
871 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
872 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
873 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
874 vcpu_e500->mas2 &= MAS2_EPN;
875 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
876 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
880 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
884 /* sesel is index into the set, not the whole array */
885 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
888 int stlbsel, int sesel)
893 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
895 get_cur_pr(&vcpu_e500->vcpu), 0);
897 stlbe->mas1 |= MAS1_TID(stid);
898 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
902 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
904 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
908 tlbsel = get_tlb_tlbsel(vcpu_e500);
909 esel = get_tlb_esel(vcpu_e500, tlbsel);
911 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
913 if (get_tlb_v(gtlbe))
914 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
916 gtlbe->mas1 = vcpu_e500->mas1;
917 gtlbe->mas2 = vcpu_e500->mas2;
918 gtlbe->mas3 = vcpu_e500->mas3;
919 gtlbe->mas7 = vcpu_e500->mas7;
921 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
922 gtlbe->mas3, gtlbe->mas7);
924 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
925 if (tlbe_is_host_safe(vcpu, gtlbe)) {
934 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
935 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
938 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
944 eaddr = get_tlb_eaddr(gtlbe);
945 raddr = get_tlb_raddr(gtlbe);
947 /* Create a 4KB mapping on the host.
948 * If the guest wanted a large page,
949 * only the first 4KB is mapped here and the rest
950 * are mapped on the fly. */
952 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
953 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
960 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
963 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
967 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
969 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
971 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
974 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
976 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
978 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
981 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
983 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
985 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
988 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
990 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
992 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
995 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
998 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1000 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
1001 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
1003 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
1006 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1010 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1013 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1014 struct tlbe_priv *priv;
1015 struct tlbe *gtlbe, stlbe;
1016 int tlbsel = tlbsel_of(index);
1017 int esel = esel_of(index);
1020 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
1025 sesel = esel & (host_tlb_params[0].ways - 1);
1026 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
1028 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
1029 &priv->ref, eaddr, &stlbe);
1033 gfn_t gfn = gpaddr >> PAGE_SHIFT;
1036 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
1046 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
1049 int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
1050 gva_t eaddr, unsigned int pid, int as)
1052 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1055 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
1056 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
1058 return index_of(tlbsel, esel);
1064 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
1066 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1068 if (vcpu->arch.pid != pid) {
1069 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
1070 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1074 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1078 /* Insert large initial mapping for guest. */
1079 tlbe = &vcpu_e500->gtlb_arch[1][0];
1080 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1082 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
1085 /* 4K map for serial output. Used by kernel wrapper. */
1086 tlbe = &vcpu_e500->gtlb_arch[1][1];
1087 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1088 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1089 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1093 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1095 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1096 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1099 * This should never happen on real e500 hardware, but is
1100 * architecturally possible -- e.g. in some weird nested
1101 * virtualization case.
1103 if (host_tlb_params[0].entries == 0 ||
1104 host_tlb_params[1].entries == 0) {
1105 pr_err("%s: need to know host tlb size\n", __func__);
1109 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1110 TLBnCFG_ASSOC_SHIFT;
1111 host_tlb_params[1].ways = host_tlb_params[1].entries;
1113 if (!is_power_of_2(host_tlb_params[0].entries) ||
1114 !is_power_of_2(host_tlb_params[0].ways) ||
1115 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1116 host_tlb_params[0].ways == 0) {
1117 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1118 __func__, host_tlb_params[0].entries,
1119 host_tlb_params[0].ways);
1123 host_tlb_params[0].sets =
1124 host_tlb_params[0].entries / host_tlb_params[0].ways;
1125 host_tlb_params[1].sets = 1;
1127 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
1128 vcpu_e500->gtlb_arch[0] =
1129 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1130 if (vcpu_e500->gtlb_arch[0] == NULL)
1133 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
1134 vcpu_e500->gtlb_arch[1] =
1135 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1136 if (vcpu_e500->gtlb_arch[1] == NULL)
1139 vcpu_e500->tlb_refs[0] =
1140 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1142 if (!vcpu_e500->tlb_refs[0])
1145 vcpu_e500->tlb_refs[1] =
1146 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1148 if (!vcpu_e500->tlb_refs[1])
1151 vcpu_e500->gtlb_priv[0] =
1152 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[0],
1154 if (!vcpu_e500->gtlb_priv[0])
1157 vcpu_e500->gtlb_priv[1] =
1158 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[1],
1160 if (!vcpu_e500->gtlb_priv[1])
1163 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1166 /* Init TLB configuration register */
1167 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
1168 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
1169 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
1170 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
1175 kfree(vcpu_e500->tlb_refs[0]);
1176 kfree(vcpu_e500->tlb_refs[1]);
1177 kfree(vcpu_e500->gtlb_priv[0]);
1178 kfree(vcpu_e500->gtlb_priv[1]);
1179 kfree(vcpu_e500->gtlb_arch[0]);
1180 kfree(vcpu_e500->gtlb_arch[1]);
1184 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1186 clear_tlb_refs(vcpu_e500);
1188 kvmppc_e500_id_table_free(vcpu_e500);
1190 kfree(vcpu_e500->tlb_refs[0]);
1191 kfree(vcpu_e500->tlb_refs[1]);
1192 kfree(vcpu_e500->gtlb_priv[0]);
1193 kfree(vcpu_e500->gtlb_priv[1]);
1194 kfree(vcpu_e500->gtlb_arch[1]);
1195 kfree(vcpu_e500->gtlb_arch[0]);