2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 /* Values in HSTATE_NAPPING(r13) */
36 #define NAPPING_CEDE 1
37 #define NAPPING_NOVCPU 2
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
45 * LR = return address to continue at after eventually re-enabling MMU
47 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
49 std r0, PPC_LR_STKOFF(r1)
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 mtmsrd r0,1 /* clear RI in MSR */
63 ld r4, HSTATE_KVM_VCPU(r13)
66 /* Back from guest - restore host state and return to caller */
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
74 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
84 beq 23f /* skip if not */
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
90 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
126 ld r3, HSTATE_DECEXP(r13)
132 * For external and machine check interrupts, we need
133 * to call the Linux handler to process the interrupt.
134 * We do that by jumping to absolute address 0x500 for
135 * external interrupts, or the machine_check_fwnmi label
136 * for machine checks (since firmware might have patched
137 * the vector area at 0x200). The [h]rfid at the end of the
138 * handler will return to the book3s_hv_interrupts.S code.
139 * For other interrupts we do the rfid to get back
140 * to the book3s_hv_interrupts.S code here.
142 ld r8, 112+PPC_LR_STKOFF(r1)
144 ld r7, HSTATE_HOST_MSR(r13)
146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
150 beq cr2, 14f /* HMI check */
152 /* RFI into the highmem handler, or branch to interrupt handler */
156 mtmsrd r6, 1 /* Clear RI in MSR */
159 beq cr1, 13f /* machine check */
162 /* On POWER7, we have external interrupts set to use HSRR0/1 */
163 11: mtspr SPRN_HSRR0, r8
167 13: b machine_check_fwnmi
169 14: mtspr SPRN_HSRR0, r8
171 b hmi_exception_after_realmode
173 kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
178 /* set our bit in napping_threads */
179 ld r5, HSTATE_KVM_VCORE(r13)
180 lbz r7, HSTATE_PTID(r13)
183 addi r6, r5, VCORE_NAPPING_THREADS
188 /* order napping_threads update vs testing entry_exit_map */
191 lwz r7, VCORE_ENTRY_EXIT(r5)
193 bge kvm_novcpu_exit /* another thread already exiting */
194 li r3, NAPPING_NOVCPU
195 stb r3, HSTATE_NAPPING(r13)
197 li r3, 0 /* Don't wake on privileged (OS) doorbell */
201 ld r1, HSTATE_HOST_R1(r13)
202 ld r5, HSTATE_KVM_VCORE(r13)
204 stb r0, HSTATE_NAPPING(r13)
205 stb r0, HSTATE_HWTHREAD_REQ(r13)
207 /* check the wake reason */
208 bl kvmppc_check_wake_reason
210 /* see if any other thread is already exiting */
211 lwz r0, VCORE_ENTRY_EXIT(r5)
215 /* clear our bit in napping_threads */
216 lbz r7, HSTATE_PTID(r13)
219 addi r6, r5, VCORE_NAPPING_THREADS
225 /* See if the wake reason means we need to exit */
229 /* See if our timeslice has expired (HDEC is negative) */
231 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
235 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
236 ld r4, HSTATE_KVM_VCPU(r13)
238 beq kvmppc_primary_no_guest
240 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
241 addi r3, r4, VCPU_TB_RMENTRY
242 bl kvmhv_start_timing
250 * We come in here when wakened from nap mode.
251 * Relocation is off and most register values are lost.
252 * r13 points to the PACA.
254 .globl kvm_start_guest
257 /* Set runlatch bit the minute you wake up from nap */
264 li r0,KVM_HWTHREAD_IN_KVM
265 stb r0,HSTATE_HWTHREAD_STATE(r13)
267 /* NV GPR values from power7_idle() will no longer be valid */
269 stb r0,PACA_NAPSTATELOST(r13)
271 /* were we napping due to cede? */
272 lbz r0,HSTATE_NAPPING(r13)
273 cmpwi r0,NAPPING_CEDE
275 cmpwi r0,NAPPING_NOVCPU
276 beq kvm_novcpu_wakeup
278 ld r1,PACAEMERGSP(r13)
279 subi r1,r1,STACK_FRAME_OVERHEAD
282 * We weren't napping due to cede, so this must be a secondary
283 * thread being woken up to run a guest, or being woken up due
284 * to a stray IPI. (Or due to some machine check or hypervisor
285 * maintenance interrupt while the core is in KVM.)
288 /* Check the wake reason in SRR1 to see why we got here */
289 bl kvmppc_check_wake_reason
293 /* get vcpu pointer, NULL if we have no vcpu to run */
294 ld r4,HSTATE_KVM_VCPU(r13)
296 /* if we have no vcpu to run, go back to sleep */
299 kvm_secondary_got_guest:
301 /* Set HSTATE_DSCR(r13) to something sensible */
302 ld r6, PACA_DSCR(r13)
303 std r6, HSTATE_DSCR(r13)
305 /* Order load of vcore, ptid etc. after load of vcpu */
309 /* Back from the guest, go back to nap */
310 /* Clear our vcpu pointer so we don't come back in early */
313 * Once we clear HSTATE_KVM_VCPU(r13), the code in
314 * kvmppc_run_core() is going to assume that all our vcpu
315 * state is visible in memory. This lwsync makes sure
319 std r0, HSTATE_KVM_VCPU(r13)
322 * At this point we have finished executing in the guest.
323 * We need to wait for hwthread_req to become zero, since
324 * we may not turn on the MMU while hwthread_req is non-zero.
325 * While waiting we also need to check if we get given a vcpu to run.
328 lbz r3, HSTATE_HWTHREAD_REQ(r13)
332 li r0, KVM_HWTHREAD_IN_KERNEL
333 stb r0, HSTATE_HWTHREAD_STATE(r13)
334 /* need to recheck hwthread_req after a barrier, to avoid race */
336 lbz r3, HSTATE_HWTHREAD_REQ(r13)
340 * We jump to power7_wakeup_loss, which will return to the caller
341 * of power7_nap in the powernv cpu offline loop. The value we
342 * put in r3 becomes the return value for power7_nap.
346 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
352 ld r4, HSTATE_KVM_VCPU(r13)
356 b kvm_secondary_got_guest
358 54: li r0, KVM_HWTHREAD_IN_KVM
359 stb r0, HSTATE_HWTHREAD_STATE(r13)
362 /******************************************************************************
366 *****************************************************************************/
368 .global kvmppc_hv_entry
373 * R4 = vcpu pointer (or NULL)
378 * all other volatile GPRS = free
381 std r0, PPC_LR_STKOFF(r1)
384 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13)
387 li r6, KVM_GUEST_MODE_HOST_HV
388 stb r6, HSTATE_IN_GUEST(r13)
390 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
391 /* Store initial timestamp */
394 addi r3, r4, VCPU_TB_RMENTRY
395 bl kvmhv_start_timing
405 * POWER7/POWER8 host -> guest partition switch code.
406 * We don't have to lock against concurrent tlbies,
407 * but we do have to coordinate across hardware threads.
409 /* Set bit in entry map iff exit map is zero. */
410 ld r5, HSTATE_KVM_VCORE(r13)
412 lbz r6, HSTATE_PTID(r13)
414 addi r9, r5, VCORE_ENTRY_EXIT
416 cmpwi r3, 0x100 /* any threads starting to exit? */
417 bge secondary_too_late /* if so we're too late to the party */
422 /* Primary thread switches to guest partition. */
423 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
428 li r0,LPID_RSVD /* switch to reserved LPID */
431 mtspr SPRN_SDR1,r6 /* switch to partition page table */
435 /* See if we need to flush the TLB */
436 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
437 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
438 srdi r6,r6,6 /* doubleword number */
439 sldi r6,r6,3 /* address offset */
441 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
447 23: ldarx r7,0,r6 /* if set, clear the bit */
451 /* Flush the TLB of any entries for this LPID */
452 /* use arch 2.07S as a proxy for POWER8 */
454 li r6,512 /* POWER8 has 512 sets */
456 li r6,128 /* POWER7 has 128 sets */
457 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
459 li r7,0x800 /* IS field = 0b10 */
466 /* Add timebase offset onto timebase */
467 22: ld r8,VCORE_TB_OFFSET(r5)
470 mftb r6 /* current host timebase */
472 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
473 mftb r7 /* check if lower 24 bits overflowed */
478 addis r8,r8,0x100 /* if so, increment upper 40 bits */
481 /* Load guest PCR value to select appropriate compat mode */
482 37: ld r7, VCORE_PCR(r5)
489 /* DPDES is shared between threads */
490 ld r8, VCORE_DPDES(r5)
492 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
495 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
498 /* Secondary threads wait for primary to have done partition switch */
499 20: lbz r0,VCORE_IN_GUEST(r5)
504 10: ld r8,VCORE_LPCR(r5)
508 /* Check if HDEC expires soon */
510 cmpwi r3,512 /* 1 microsecond */
511 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
514 /* Do we have a guest vcpu to run? */
516 beq kvmppc_primary_no_guest
519 /* Load up guest SLB entries */
520 lwz r5,VCPU_SLB_MAX(r4)
525 1: ld r8,VCPU_SLB_E(r6)
528 addi r6,r6,VCPU_SLB_SIZE
531 /* Increment yield count if they have a VPA */
535 li r6, LPPACA_YIELDCOUNT
540 stb r6, VCPU_VPA_DIRTY(r4)
543 /* Save purr/spurr */
546 std r5,HSTATE_PURR(r13)
547 std r6,HSTATE_SPURR(r13)
554 /* Set partition DABR */
555 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
556 lwz r5,VCPU_DABRX(r4)
561 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
563 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
566 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
568 /* Turn on TM/FP/VSX/VMX so we can restore them. */
574 oris r5, r5, (MSR_VEC | MSR_VSX)@h
578 * The user may change these outside of a transaction, so they must
579 * always be context switched.
581 ld r5, VCPU_TFHAR(r4)
582 ld r6, VCPU_TFIAR(r4)
583 ld r7, VCPU_TEXASR(r4)
586 mtspr SPRN_TEXASR, r7
589 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
590 beq skip_tm /* TM not active in guest */
592 /* Make sure the failure summary is set, otherwise we'll program check
593 * when we trechkpt. It's possible that this might have been not set
594 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
597 oris r7, r7, (TEXASR_FS)@h
598 mtspr SPRN_TEXASR, r7
601 * We need to load up the checkpointed state for the guest.
602 * We need to do this early as it will blow away any GPRs, VSRs and
607 addi r3, r31, VCPU_FPRS_TM
609 addi r3, r31, VCPU_VRS_TM
612 lwz r7, VCPU_VRSAVE_TM(r4)
613 mtspr SPRN_VRSAVE, r7
615 ld r5, VCPU_LR_TM(r4)
616 lwz r6, VCPU_CR_TM(r4)
617 ld r7, VCPU_CTR_TM(r4)
618 ld r8, VCPU_AMR_TM(r4)
619 ld r9, VCPU_TAR_TM(r4)
627 * Load up PPR and DSCR values but don't put them in the actual SPRs
628 * till the last moment to avoid running with userspace PPR and DSCR for
631 ld r29, VCPU_DSCR_TM(r4)
632 ld r30, VCPU_PPR_TM(r4)
634 std r2, PACATMSCRATCH(r13) /* Save TOC */
636 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
640 /* Load GPRs r0-r28 */
643 ld reg, VCPU_GPRS_TM(reg)(r31)
650 /* Load final GPRs */
651 ld 29, VCPU_GPRS_TM(29)(r31)
652 ld 30, VCPU_GPRS_TM(30)(r31)
653 ld 31, VCPU_GPRS_TM(31)(r31)
655 /* TM checkpointed state is now setup. All GPRs are now volatile. */
658 /* Now let's get back the state we need. */
661 ld r29, HSTATE_DSCR(r13)
663 ld r4, HSTATE_KVM_VCPU(r13)
664 ld r1, HSTATE_HOST_R1(r13)
665 ld r2, PACATMSCRATCH(r13)
667 /* Set the MSR RI since we have our registers back. */
673 /* Load guest PMU registers */
674 /* R4 is live here (vcpu pointer) */
676 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
677 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
681 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
684 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
685 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
686 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
687 lwz r6, VCPU_PMC + 8(r4)
688 lwz r7, VCPU_PMC + 12(r4)
689 lwz r8, VCPU_PMC + 16(r4)
690 lwz r9, VCPU_PMC + 20(r4)
698 ld r5, VCPU_MMCR + 8(r4)
699 ld r6, VCPU_MMCR + 16(r4)
707 ld r5, VCPU_MMCR + 24(r4)
709 lwz r7, VCPU_PMC + 24(r4)
710 lwz r8, VCPU_PMC + 28(r4)
711 ld r9, VCPU_MMCR + 32(r4)
717 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
721 /* Load up FP, VMX and VSX registers */
724 ld r14, VCPU_GPR(R14)(r4)
725 ld r15, VCPU_GPR(R15)(r4)
726 ld r16, VCPU_GPR(R16)(r4)
727 ld r17, VCPU_GPR(R17)(r4)
728 ld r18, VCPU_GPR(R18)(r4)
729 ld r19, VCPU_GPR(R19)(r4)
730 ld r20, VCPU_GPR(R20)(r4)
731 ld r21, VCPU_GPR(R21)(r4)
732 ld r22, VCPU_GPR(R22)(r4)
733 ld r23, VCPU_GPR(R23)(r4)
734 ld r24, VCPU_GPR(R24)(r4)
735 ld r25, VCPU_GPR(R25)(r4)
736 ld r26, VCPU_GPR(R26)(r4)
737 ld r27, VCPU_GPR(R27)(r4)
738 ld r28, VCPU_GPR(R28)(r4)
739 ld r29, VCPU_GPR(R29)(r4)
740 ld r30, VCPU_GPR(R30)(r4)
741 ld r31, VCPU_GPR(R31)(r4)
743 /* Switch DSCR to guest value */
748 /* Skip next section on POWER7 */
750 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
751 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
754 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
757 /* Load up POWER8-specific registers */
759 lwz r6, VCPU_PSPB(r4)
765 ld r6, VCPU_DAWRX(r4)
766 ld r7, VCPU_CIABR(r4)
776 ld r8, VCPU_EBBHR(r4)
778 ld r5, VCPU_EBBRR(r4)
779 ld r6, VCPU_BESCR(r4)
780 ld r7, VCPU_CSIGR(r4)
786 ld r5, VCPU_TCSCR(r4)
788 lwz r7, VCPU_GUEST_PID(r4)
797 * Set the decrementer to the guest decrementer.
799 ld r8,VCPU_DEC_EXPIRES(r4)
800 /* r8 is a host timebase value here, convert to guest TB */
801 ld r5,HSTATE_KVM_VCORE(r13)
802 ld r6,VCORE_TB_OFFSET(r5)
809 ld r5, VCPU_SPRG0(r4)
810 ld r6, VCPU_SPRG1(r4)
811 ld r7, VCPU_SPRG2(r4)
812 ld r8, VCPU_SPRG3(r4)
818 /* Load up DAR and DSISR */
820 lwz r6, VCPU_DSISR(r4)
824 /* Restore AMR and UAMOR, set AMOR to all 1s */
832 /* Restore state of CTRL run bit; assume 1 on entry */
846 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
854 deliver_guest_interrupt:
855 /* r11 = vcpu->arch.msr & ~MSR_HV */
856 rldicl r11, r11, 63 - MSR_HV_LG, 1
857 rotldi r11, r11, 1 + MSR_HV_LG
860 /* Check if we can deliver an external or decrementer interrupt now */
861 ld r0, VCPU_PENDING_EXC(r4)
862 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
864 andi. r8, r11, MSR_EE
866 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
867 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
871 li r0, BOOK3S_INTERRUPT_EXTERNAL
875 li r0, BOOK3S_INTERRUPT_DECREMENTER
878 12: mtspr SPRN_SRR0, r10
882 bl kvmppc_msr_interrupt
888 * R10: value for HSRR0
889 * R11: value for HSRR1
894 stb r0,VCPU_CEDED(r4) /* cancel cede */
898 /* Activate guest mode, so faults get handled by KVM */
899 li r9, KVM_GUEST_MODE_GUEST_HV
900 stb r9, HSTATE_IN_GUEST(r13)
902 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
903 /* Accumulate timing */
904 addi r3, r4, VCPU_TB_GUEST
905 bl kvmhv_accumulate_time
913 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
916 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
923 ld r1, VCPU_GPR(R1)(r4)
924 ld r2, VCPU_GPR(R2)(r4)
925 ld r3, VCPU_GPR(R3)(r4)
926 ld r5, VCPU_GPR(R5)(r4)
927 ld r6, VCPU_GPR(R6)(r4)
928 ld r7, VCPU_GPR(R7)(r4)
929 ld r8, VCPU_GPR(R8)(r4)
930 ld r9, VCPU_GPR(R9)(r4)
931 ld r10, VCPU_GPR(R10)(r4)
932 ld r11, VCPU_GPR(R11)(r4)
933 ld r12, VCPU_GPR(R12)(r4)
934 ld r13, VCPU_GPR(R13)(r4)
938 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
939 ld r0, VCPU_GPR(R0)(r4)
940 ld r4, VCPU_GPR(R4)(r4)
945 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
949 addi r3, r4, VCPU_TB_RMEXIT
950 bl kvmhv_accumulate_time
951 11: b kvmhv_switch_to_host
954 ld r4, HSTATE_KVM_VCPU(r13)
957 addi r3, r4, VCPU_TB_RMEXIT
958 bl kvmhv_accumulate_time
962 /******************************************************************************
966 *****************************************************************************/
969 * We come here from the first-level interrupt handlers.
971 .globl kvmppc_interrupt_hv
975 * R12 = interrupt vector
977 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
978 * guest R13 saved in SPRN_SCRATCH0
980 std r9, HSTATE_SCRATCH2(r13)
982 lbz r9, HSTATE_IN_GUEST(r13)
983 cmpwi r9, KVM_GUEST_MODE_HOST_HV
984 beq kvmppc_bad_host_intr
985 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
986 cmpwi r9, KVM_GUEST_MODE_GUEST
987 ld r9, HSTATE_SCRATCH2(r13)
988 beq kvmppc_interrupt_pr
990 /* We're now back in the host but in guest MMU context */
991 li r9, KVM_GUEST_MODE_HOST_HV
992 stb r9, HSTATE_IN_GUEST(r13)
994 ld r9, HSTATE_KVM_VCPU(r13)
998 std r0, VCPU_GPR(R0)(r9)
999 std r1, VCPU_GPR(R1)(r9)
1000 std r2, VCPU_GPR(R2)(r9)
1001 std r3, VCPU_GPR(R3)(r9)
1002 std r4, VCPU_GPR(R4)(r9)
1003 std r5, VCPU_GPR(R5)(r9)
1004 std r6, VCPU_GPR(R6)(r9)
1005 std r7, VCPU_GPR(R7)(r9)
1006 std r8, VCPU_GPR(R8)(r9)
1007 ld r0, HSTATE_SCRATCH2(r13)
1008 std r0, VCPU_GPR(R9)(r9)
1009 std r10, VCPU_GPR(R10)(r9)
1010 std r11, VCPU_GPR(R11)(r9)
1011 ld r3, HSTATE_SCRATCH0(r13)
1012 lwz r4, HSTATE_SCRATCH1(r13)
1013 std r3, VCPU_GPR(R12)(r9)
1016 ld r3, HSTATE_CFAR(r13)
1017 std r3, VCPU_CFAR(r9)
1018 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1020 ld r4, HSTATE_PPR(r13)
1021 std r4, VCPU_PPR(r9)
1022 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1024 /* Restore R1/R2 so we can handle faults */
1025 ld r1, HSTATE_HOST_R1(r13)
1028 mfspr r10, SPRN_SRR0
1029 mfspr r11, SPRN_SRR1
1030 std r10, VCPU_SRR0(r9)
1031 std r11, VCPU_SRR1(r9)
1032 andi. r0, r12, 2 /* need to read HSRR0/1? */
1034 mfspr r10, SPRN_HSRR0
1035 mfspr r11, SPRN_HSRR1
1037 1: std r10, VCPU_PC(r9)
1038 std r11, VCPU_MSR(r9)
1042 std r3, VCPU_GPR(R13)(r9)
1045 stw r12,VCPU_TRAP(r9)
1047 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1048 addi r3, r9, VCPU_TB_RMINTR
1050 bl kvmhv_accumulate_time
1051 ld r5, VCPU_GPR(R5)(r9)
1052 ld r6, VCPU_GPR(R6)(r9)
1053 ld r7, VCPU_GPR(R7)(r9)
1054 ld r8, VCPU_GPR(R8)(r9)
1057 /* Save HEIR (HV emulation assist reg) in emul_inst
1058 if this is an HEI (HV emulation interrupt, e40) */
1059 li r3,KVM_INST_FETCH_FAILED
1060 stw r3,VCPU_LAST_INST(r9)
1061 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1064 11: stw r3,VCPU_HEIR(r9)
1066 /* these are volatile across C function calls */
1069 std r3, VCPU_CTR(r9)
1070 stw r4, VCPU_XER(r9)
1072 /* If this is a page table miss then see if it's theirs or ours */
1073 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1075 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1078 /* See if this is a leftover HDEC interrupt */
1079 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1084 bge fast_guest_return
1086 /* See if this is an hcall we can handle in real mode */
1087 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1088 beq hcall_try_real_mode
1090 /* External interrupt ? */
1091 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1092 bne+ guest_exit_cont
1094 /* External interrupt, first check for host_ipi. If this is
1095 * set, we know the host wants us out so let's do it now
1101 /* Check if any CPU is heading out to the host, if so head out too */
1102 ld r5, HSTATE_KVM_VCORE(r13)
1103 lwz r0, VCORE_ENTRY_EXIT(r5)
1106 blt deliver_guest_interrupt
1108 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1109 /* Save more register state */
1112 std r6, VCPU_DAR(r9)
1113 stw r7, VCPU_DSISR(r9)
1114 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1115 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1117 std r6, VCPU_FAULT_DAR(r9)
1118 stw r7, VCPU_FAULT_DSISR(r9)
1120 /* See if it is a machine check */
1121 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1122 beq machine_check_realmode
1124 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1125 addi r3, r9, VCPU_TB_RMEXIT
1127 bl kvmhv_accumulate_time
1130 /* Save guest CTRL register, set runlatch to 1 */
1131 6: mfspr r6,SPRN_CTRLF
1132 stw r6,VCPU_CTRL(r9)
1138 /* Read the guest SLB and save it away */
1139 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1145 andis. r0,r8,SLB_ESID_V@h
1147 add r8,r8,r6 /* put index in */
1149 std r8,VCPU_SLB_E(r7)
1150 std r3,VCPU_SLB_V(r7)
1151 addi r7,r7,VCPU_SLB_SIZE
1155 stw r5,VCPU_SLB_MAX(r9)
1158 * Save the guest PURR/SPURR
1163 ld r8,VCPU_SPURR(r9)
1164 std r5,VCPU_PURR(r9)
1165 std r6,VCPU_SPURR(r9)
1170 * Restore host PURR/SPURR and add guest times
1171 * so that the time in the guest gets accounted.
1173 ld r3,HSTATE_PURR(r13)
1174 ld r4,HSTATE_SPURR(r13)
1185 /* r5 is a guest timebase value here, convert to host TB */
1186 ld r3,HSTATE_KVM_VCORE(r13)
1187 ld r4,VCORE_TB_OFFSET(r3)
1189 std r5,VCPU_DEC_EXPIRES(r9)
1193 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1194 /* Save POWER8-specific registers */
1198 std r5, VCPU_IAMR(r9)
1199 stw r6, VCPU_PSPB(r9)
1200 std r7, VCPU_FSCR(r9)
1205 std r6, VCPU_VTB(r9)
1206 std r7, VCPU_TAR(r9)
1207 mfspr r8, SPRN_EBBHR
1208 std r8, VCPU_EBBHR(r9)
1209 mfspr r5, SPRN_EBBRR
1210 mfspr r6, SPRN_BESCR
1211 mfspr r7, SPRN_CSIGR
1213 std r5, VCPU_EBBRR(r9)
1214 std r6, VCPU_BESCR(r9)
1215 std r7, VCPU_CSIGR(r9)
1216 std r8, VCPU_TACR(r9)
1217 mfspr r5, SPRN_TCSCR
1221 std r5, VCPU_TCSCR(r9)
1222 std r6, VCPU_ACOP(r9)
1223 stw r7, VCPU_GUEST_PID(r9)
1224 std r8, VCPU_WORT(r9)
1227 /* Save and reset AMR and UAMOR before turning on the MMU */
1231 std r6,VCPU_UAMOR(r9)
1235 /* Switch DSCR back to host value */
1237 ld r7, HSTATE_DSCR(r13)
1238 std r8, VCPU_DSCR(r9)
1241 /* Save non-volatile GPRs */
1242 std r14, VCPU_GPR(R14)(r9)
1243 std r15, VCPU_GPR(R15)(r9)
1244 std r16, VCPU_GPR(R16)(r9)
1245 std r17, VCPU_GPR(R17)(r9)
1246 std r18, VCPU_GPR(R18)(r9)
1247 std r19, VCPU_GPR(R19)(r9)
1248 std r20, VCPU_GPR(R20)(r9)
1249 std r21, VCPU_GPR(R21)(r9)
1250 std r22, VCPU_GPR(R22)(r9)
1251 std r23, VCPU_GPR(R23)(r9)
1252 std r24, VCPU_GPR(R24)(r9)
1253 std r25, VCPU_GPR(R25)(r9)
1254 std r26, VCPU_GPR(R26)(r9)
1255 std r27, VCPU_GPR(R27)(r9)
1256 std r28, VCPU_GPR(R28)(r9)
1257 std r29, VCPU_GPR(R29)(r9)
1258 std r30, VCPU_GPR(R30)(r9)
1259 std r31, VCPU_GPR(R31)(r9)
1262 mfspr r3, SPRN_SPRG0
1263 mfspr r4, SPRN_SPRG1
1264 mfspr r5, SPRN_SPRG2
1265 mfspr r6, SPRN_SPRG3
1266 std r3, VCPU_SPRG0(r9)
1267 std r4, VCPU_SPRG1(r9)
1268 std r5, VCPU_SPRG2(r9)
1269 std r6, VCPU_SPRG3(r9)
1275 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1278 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1282 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1286 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1287 beq 1f /* TM not active in guest. */
1289 li r3, TM_CAUSE_KVM_RESCHED
1291 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1295 /* All GPRs are volatile at this point. */
1298 /* Temporarily store r13 and r9 so we have some regs to play with */
1301 std r9, PACATMSCRATCH(r13)
1302 ld r9, HSTATE_KVM_VCPU(r13)
1304 /* Get a few more GPRs free. */
1305 std r29, VCPU_GPRS_TM(29)(r9)
1306 std r30, VCPU_GPRS_TM(30)(r9)
1307 std r31, VCPU_GPRS_TM(31)(r9)
1309 /* Save away PPR and DSCR soon so don't run with user values. */
1312 mfspr r30, SPRN_DSCR
1313 ld r29, HSTATE_DSCR(r13)
1314 mtspr SPRN_DSCR, r29
1316 /* Save all but r9, r13 & r29-r31 */
1319 .if (reg != 9) && (reg != 13)
1320 std reg, VCPU_GPRS_TM(reg)(r9)
1324 /* ... now save r13 */
1326 std r4, VCPU_GPRS_TM(13)(r9)
1327 /* ... and save r9 */
1328 ld r4, PACATMSCRATCH(r13)
1329 std r4, VCPU_GPRS_TM(9)(r9)
1331 /* Reload stack pointer and TOC. */
1332 ld r1, HSTATE_HOST_R1(r13)
1335 /* Set MSR RI now we have r1 and r13 back. */
1339 /* Save away checkpinted SPRs. */
1340 std r31, VCPU_PPR_TM(r9)
1341 std r30, VCPU_DSCR_TM(r9)
1347 std r5, VCPU_LR_TM(r9)
1348 stw r6, VCPU_CR_TM(r9)
1349 std r7, VCPU_CTR_TM(r9)
1350 std r8, VCPU_AMR_TM(r9)
1351 std r10, VCPU_TAR_TM(r9)
1353 /* Restore r12 as trap number. */
1354 lwz r12, VCPU_TRAP(r9)
1357 addi r3, r9, VCPU_FPRS_TM
1359 addi r3, r9, VCPU_VRS_TM
1361 mfspr r6, SPRN_VRSAVE
1362 stw r6, VCPU_VRSAVE_TM(r9)
1365 * We need to save these SPRs after the treclaim so that the software
1366 * error code is recorded correctly in the TEXASR. Also the user may
1367 * change these outside of a transaction, so they must always be
1370 mfspr r5, SPRN_TFHAR
1371 mfspr r6, SPRN_TFIAR
1372 mfspr r7, SPRN_TEXASR
1373 std r5, VCPU_TFHAR(r9)
1374 std r6, VCPU_TFIAR(r9)
1375 std r7, VCPU_TEXASR(r9)
1379 /* Increment yield count if they have a VPA */
1380 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1383 li r4, LPPACA_YIELDCOUNT
1388 stb r3, VCPU_VPA_DIRTY(r9)
1390 /* Save PMU registers if requested */
1391 /* r8 and cr0.eq are live here */
1394 * POWER8 seems to have a hardware bug where setting
1395 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1396 * when some counters are already negative doesn't seem
1397 * to cause a performance monitor alert (and hence interrupt).
1398 * The effect of this is that when saving the PMU state,
1399 * if there is no PMU alert pending when we read MMCR0
1400 * before freezing the counters, but one becomes pending
1401 * before we read the counters, we lose it.
1402 * To work around this, we need a way to freeze the counters
1403 * before reading MMCR0. Normally, freezing the counters
1404 * is done by writing MMCR0 (to set MMCR0[FC]) which
1405 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1406 * we can also freeze the counters using MMCR2, by writing
1407 * 1s to all the counter freeze condition bits (there are
1408 * 9 bits each for 6 counters).
1410 li r3, -1 /* set all freeze bits */
1412 mfspr r10, SPRN_MMCR2
1413 mtspr SPRN_MMCR2, r3
1415 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1417 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1418 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1419 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1420 mfspr r6, SPRN_MMCRA
1421 /* Clear MMCRA in order to disable SDAR updates */
1423 mtspr SPRN_MMCRA, r7
1425 beq 21f /* if no VPA, save PMU stuff anyway */
1426 lbz r7, LPPACA_PMCINUSE(r8)
1427 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1429 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1431 21: mfspr r5, SPRN_MMCR1
1434 std r4, VCPU_MMCR(r9)
1435 std r5, VCPU_MMCR + 8(r9)
1436 std r6, VCPU_MMCR + 16(r9)
1438 std r10, VCPU_MMCR + 24(r9)
1439 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1440 std r7, VCPU_SIAR(r9)
1441 std r8, VCPU_SDAR(r9)
1448 stw r3, VCPU_PMC(r9)
1449 stw r4, VCPU_PMC + 4(r9)
1450 stw r5, VCPU_PMC + 8(r9)
1451 stw r6, VCPU_PMC + 12(r9)
1452 stw r7, VCPU_PMC + 16(r9)
1453 stw r8, VCPU_PMC + 20(r9)
1456 mfspr r6, SPRN_SPMC1
1457 mfspr r7, SPRN_SPMC2
1458 mfspr r8, SPRN_MMCRS
1459 std r5, VCPU_SIER(r9)
1460 stw r6, VCPU_PMC + 24(r9)
1461 stw r7, VCPU_PMC + 28(r9)
1462 std r8, VCPU_MMCR + 32(r9)
1464 mtspr SPRN_MMCRS, r4
1465 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1473 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1476 kvmhv_do_exit: /* r12 = trap, r13 = paca */
1478 * POWER7/POWER8 guest -> host partition switch code.
1479 * We don't have to lock against tlbies but we do
1480 * have to coordinate the hardware threads.
1482 /* Set our bit in the threads-exiting-guest map in the 0xff00
1483 bits of vcore->entry_exit_map */
1484 ld r5, HSTATE_KVM_VCORE(r13)
1485 lbz r4, HSTATE_PTID(r13)
1488 addi r6, r5, VCORE_ENTRY_EXIT
1493 isync /* order stwcx. vs. reading napping_threads */
1496 * At this point we have an interrupt that we have to pass
1497 * up to the kernel or qemu; we can't handle it in real mode.
1498 * Thus we have to do a partition switch, so we have to
1499 * collect the other threads, if we are the first thread
1500 * to take an interrupt. To do this, we send a message or
1501 * IPI to all the threads that have their bit set in the entry
1502 * map in vcore->entry_exit_map (other than ourselves).
1503 * However, we don't need to bother if this is an HDEC
1504 * interrupt, since the other threads will already be on their
1505 * way here in that case.
1507 cmpwi r3,0x100 /* Are we the first here? */
1509 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1513 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1515 /* Order entry/exit update vs. IPIs */
1517 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1521 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1524 stbcix r0,r7,r8 /* trigger the IPI */
1526 addi r6,r6,PACA_SIZE
1529 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1532 kvmhv_switch_to_host:
1533 /* Secondary threads wait for primary to do partition switch */
1534 43: ld r5,HSTATE_KVM_VCORE(r13)
1535 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1536 lbz r3,HSTATE_PTID(r13)
1540 13: lbz r3,VCORE_IN_GUEST(r5)
1546 /* Primary thread waits for all the secondaries to exit guest */
1547 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1554 /* Primary thread switches back to host partition */
1555 ld r6,KVM_HOST_SDR1(r4)
1556 lwz r7,KVM_HOST_LPID(r4)
1557 li r8,LPID_RSVD /* switch to reserved LPID */
1560 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1565 /* DPDES is shared between threads */
1566 mfspr r7, SPRN_DPDES
1567 std r7, VCORE_DPDES(r5)
1568 /* clear DPDES so we don't get guest doorbells in the host */
1570 mtspr SPRN_DPDES, r8
1571 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1573 /* Subtract timebase offset from timebase */
1574 ld r8,VCORE_TB_OFFSET(r5)
1577 mftb r6 /* current guest timebase */
1579 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1580 mftb r7 /* check if lower 24 bits overflowed */
1585 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1589 17: ld r0, VCORE_PCR(r5)
1595 /* Signal secondary CPUs to continue */
1596 stb r0,VCORE_IN_GUEST(r5)
1597 lis r8,0x7fff /* MAX_INT@h */
1600 16: ld r8,KVM_HOST_LPCR(r4)
1604 /* load host SLB entries */
1605 ld r8,PACA_SLBSHADOWPTR(r13)
1607 .rept SLB_NUM_BOLTED
1608 li r3, SLBSHADOW_SAVEAREA
1612 andis. r7,r5,SLB_ESID_V@h
1618 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1619 /* Finish timing, if we have a vcpu */
1620 ld r4, HSTATE_KVM_VCPU(r13)
1624 bl kvmhv_accumulate_time
1627 /* Unset guest mode */
1628 li r0, KVM_GUEST_MODE_NONE
1629 stb r0, HSTATE_IN_GUEST(r13)
1631 ld r0, 112+PPC_LR_STKOFF(r1)
1637 * Check whether an HDSI is an HPTE not found fault or something else.
1638 * If it is an HPTE not found fault that is due to the guest accessing
1639 * a page that they have mapped but which we have paged out, then
1640 * we continue on with the guest exit path. In all other cases,
1641 * reflect the HDSI to the guest as a DSI.
1645 mfspr r6, SPRN_HDSISR
1646 /* HPTE not found fault or protection fault? */
1647 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1648 beq 1f /* if not, send it to the guest */
1649 andi. r0, r11, MSR_DR /* data relocation enabled? */
1652 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1653 bne 1f /* if no SLB entry found */
1654 4: std r4, VCPU_FAULT_DAR(r9)
1655 stw r6, VCPU_FAULT_DSISR(r9)
1657 /* Search the hash table. */
1658 mr r3, r9 /* vcpu pointer */
1659 li r7, 1 /* data fault */
1660 bl kvmppc_hpte_hv_fault
1661 ld r9, HSTATE_KVM_VCPU(r13)
1663 ld r11, VCPU_MSR(r9)
1664 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1665 cmpdi r3, 0 /* retry the instruction */
1667 cmpdi r3, -1 /* handle in kernel mode */
1669 cmpdi r3, -2 /* MMIO emulation; need instr word */
1672 /* Synthesize a DSI for the guest */
1673 ld r4, VCPU_FAULT_DAR(r9)
1675 1: mtspr SPRN_DAR, r4
1676 mtspr SPRN_DSISR, r6
1677 mtspr SPRN_SRR0, r10
1678 mtspr SPRN_SRR1, r11
1679 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1680 bl kvmppc_msr_interrupt
1681 fast_interrupt_c_return:
1682 6: ld r7, VCPU_CTR(r9)
1683 lwz r8, VCPU_XER(r9)
1689 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1690 ld r5, KVM_VRMA_SLB_V(r5)
1693 /* If this is for emulated MMIO, load the instruction word */
1694 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1696 /* Set guest mode to 'jump over instruction' so if lwz faults
1697 * we'll just continue at the next IP. */
1698 li r0, KVM_GUEST_MODE_SKIP
1699 stb r0, HSTATE_IN_GUEST(r13)
1701 /* Do the access with MSR:DR enabled */
1703 ori r4, r3, MSR_DR /* Enable paging for data */
1708 /* Store the result */
1709 stw r8, VCPU_LAST_INST(r9)
1711 /* Unset guest mode. */
1712 li r0, KVM_GUEST_MODE_HOST_HV
1713 stb r0, HSTATE_IN_GUEST(r13)
1717 * Similarly for an HISI, reflect it to the guest as an ISI unless
1718 * it is an HPTE not found fault for a page that we have paged out.
1721 andis. r0, r11, SRR1_ISI_NOPT@h
1723 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1726 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1727 bne 1f /* if no SLB entry found */
1729 /* Search the hash table. */
1730 mr r3, r9 /* vcpu pointer */
1733 li r7, 0 /* instruction fault */
1734 bl kvmppc_hpte_hv_fault
1735 ld r9, HSTATE_KVM_VCPU(r13)
1737 ld r11, VCPU_MSR(r9)
1738 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1739 cmpdi r3, 0 /* retry the instruction */
1740 beq fast_interrupt_c_return
1741 cmpdi r3, -1 /* handle in kernel mode */
1744 /* Synthesize an ISI for the guest */
1746 1: mtspr SPRN_SRR0, r10
1747 mtspr SPRN_SRR1, r11
1748 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1749 bl kvmppc_msr_interrupt
1750 b fast_interrupt_c_return
1752 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1753 ld r5, KVM_VRMA_SLB_V(r6)
1757 * Try to handle an hcall in real mode.
1758 * Returns to the guest if we handle it, or continues on up to
1759 * the kernel if we can't (i.e. if we don't have a handler for
1760 * it, or if the handler returns H_TOO_HARD).
1762 * r5 - r8 contain hcall args,
1763 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1765 hcall_try_real_mode:
1766 ld r3,VCPU_GPR(R3)(r9)
1768 /* sc 1 from userspace - reflect to guest syscall */
1769 bne sc_1_fast_return
1771 cmpldi r3,hcall_real_table_end - hcall_real_table
1773 /* See if this hcall is enabled for in-kernel handling */
1775 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1776 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1778 ld r0, KVM_ENABLED_HCALLS(r4)
1779 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1783 /* Get pointer to handler, if any, and call it */
1784 LOAD_REG_ADDR(r4, hcall_real_table)
1790 mr r3,r9 /* get vcpu pointer */
1791 ld r4,VCPU_GPR(R4)(r9)
1794 beq hcall_real_fallback
1795 ld r4,HSTATE_KVM_VCPU(r13)
1796 std r3,VCPU_GPR(R3)(r4)
1804 li r10, BOOK3S_INTERRUPT_SYSCALL
1805 bl kvmppc_msr_interrupt
1809 /* We've attempted a real mode hcall, but it's punted it back
1810 * to userspace. We need to restore some clobbered volatiles
1811 * before resuming the pass-it-to-qemu path */
1812 hcall_real_fallback:
1813 li r12,BOOK3S_INTERRUPT_SYSCALL
1814 ld r9, HSTATE_KVM_VCPU(r13)
1818 .globl hcall_real_table
1820 .long 0 /* 0 - unused */
1821 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1822 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1823 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1824 .long 0 /* 0x10 - H_CLEAR_MOD */
1825 .long 0 /* 0x14 - H_CLEAR_REF */
1826 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1827 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1828 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1829 .long 0 /* 0x24 - H_SET_SPRG0 */
1830 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1845 #ifdef CONFIG_KVM_XICS
1846 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1847 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1848 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1849 .long 0 /* 0x70 - H_IPOLL */
1850 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1852 .long 0 /* 0x64 - H_EOI */
1853 .long 0 /* 0x68 - H_CPPR */
1854 .long 0 /* 0x6c - H_IPI */
1855 .long 0 /* 0x70 - H_IPOLL */
1856 .long 0 /* 0x74 - H_XIRR */
1884 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1885 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1901 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1905 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2020 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2021 .globl hcall_real_table_end
2022 hcall_real_table_end:
2024 _GLOBAL(kvmppc_h_set_xdabr)
2025 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2027 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2030 6: li r3, H_PARAMETER
2033 _GLOBAL(kvmppc_h_set_dabr)
2034 li r5, DABRX_USER | DABRX_KERNEL
2038 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2039 std r4,VCPU_DABR(r3)
2040 stw r5, VCPU_DABRX(r3)
2041 mtspr SPRN_DABRX, r5
2042 /* Work around P7 bug where DABR can get corrupted on mtspr */
2043 1: mtspr SPRN_DABR,r4
2051 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2052 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2053 rlwimi r5, r4, 1, DAWRX_WT
2055 std r4, VCPU_DAWR(r3)
2056 std r5, VCPU_DAWRX(r3)
2058 mtspr SPRN_DAWRX, r5
2062 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2064 std r11,VCPU_MSR(r3)
2066 stb r0,VCPU_CEDED(r3)
2067 sync /* order setting ceded vs. testing prodded */
2068 lbz r5,VCPU_PRODDED(r3)
2070 bne kvm_cede_prodded
2071 li r0,0 /* set trap to 0 to say hcall is handled */
2072 stw r0,VCPU_TRAP(r3)
2074 std r0,VCPU_GPR(R3)(r3)
2077 * Set our bit in the bitmask of napping threads unless all the
2078 * other threads are already napping, in which case we send this
2081 ld r5,HSTATE_KVM_VCORE(r13)
2082 lbz r6,HSTATE_PTID(r13)
2083 lwz r8,VCORE_ENTRY_EXIT(r5)
2087 addi r6,r5,VCORE_NAPPING_THREADS
2094 /* order napping_threads update vs testing entry_exit_map */
2097 stb r0,HSTATE_NAPPING(r13)
2098 lwz r7,VCORE_ENTRY_EXIT(r5)
2100 bge 33f /* another thread already exiting */
2103 * Although not specifically required by the architecture, POWER7
2104 * preserves the following registers in nap mode, even if an SMT mode
2105 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2106 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2108 /* Save non-volatile GPRs */
2109 std r14, VCPU_GPR(R14)(r3)
2110 std r15, VCPU_GPR(R15)(r3)
2111 std r16, VCPU_GPR(R16)(r3)
2112 std r17, VCPU_GPR(R17)(r3)
2113 std r18, VCPU_GPR(R18)(r3)
2114 std r19, VCPU_GPR(R19)(r3)
2115 std r20, VCPU_GPR(R20)(r3)
2116 std r21, VCPU_GPR(R21)(r3)
2117 std r22, VCPU_GPR(R22)(r3)
2118 std r23, VCPU_GPR(R23)(r3)
2119 std r24, VCPU_GPR(R24)(r3)
2120 std r25, VCPU_GPR(R25)(r3)
2121 std r26, VCPU_GPR(R26)(r3)
2122 std r27, VCPU_GPR(R27)(r3)
2123 std r28, VCPU_GPR(R28)(r3)
2124 std r29, VCPU_GPR(R29)(r3)
2125 std r30, VCPU_GPR(R30)(r3)
2126 std r31, VCPU_GPR(R31)(r3)
2132 * Set DEC to the smaller of DEC and HDEC, so that we wake
2133 * no later than the end of our timeslice (HDEC interrupts
2134 * don't wake us from nap).
2143 /* save expiry time of guest decrementer */
2146 ld r4, HSTATE_KVM_VCPU(r13)
2147 ld r5, HSTATE_KVM_VCORE(r13)
2148 ld r6, VCORE_TB_OFFSET(r5)
2149 subf r3, r6, r3 /* convert to host TB value */
2150 std r3, VCPU_DEC_EXPIRES(r4)
2152 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2153 ld r4, HSTATE_KVM_VCPU(r13)
2154 addi r3, r4, VCPU_TB_CEDE
2155 bl kvmhv_accumulate_time
2158 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2161 * Take a nap until a decrementer or external or doobell interrupt
2162 * occurs, with PECE1 and PECE0 set in LPCR.
2163 * On POWER8, if we are ceding, also set PECEDP.
2164 * Also clear the runlatch bit before napping.
2167 mfspr r0, SPRN_CTRLF
2169 mtspr SPRN_CTRLT, r0
2172 stb r0,HSTATE_HWTHREAD_REQ(r13)
2174 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2176 rlwimi r5, r3, 0, LPCR_PECEDP
2177 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2181 std r0, HSTATE_SCRATCH0(r13)
2183 ld r0, HSTATE_SCRATCH0(r13)
2195 /* get vcpu pointer */
2196 ld r4, HSTATE_KVM_VCPU(r13)
2198 /* Woken by external or decrementer interrupt */
2199 ld r1, HSTATE_HOST_R1(r13)
2201 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2202 addi r3, r4, VCPU_TB_RMINTR
2203 bl kvmhv_accumulate_time
2206 /* load up FP state */
2209 /* Restore guest decrementer */
2210 ld r3, VCPU_DEC_EXPIRES(r4)
2211 ld r5, HSTATE_KVM_VCORE(r13)
2212 ld r6, VCORE_TB_OFFSET(r5)
2213 add r3, r3, r6 /* convert host TB to guest TB value */
2219 ld r14, VCPU_GPR(R14)(r4)
2220 ld r15, VCPU_GPR(R15)(r4)
2221 ld r16, VCPU_GPR(R16)(r4)
2222 ld r17, VCPU_GPR(R17)(r4)
2223 ld r18, VCPU_GPR(R18)(r4)
2224 ld r19, VCPU_GPR(R19)(r4)
2225 ld r20, VCPU_GPR(R20)(r4)
2226 ld r21, VCPU_GPR(R21)(r4)
2227 ld r22, VCPU_GPR(R22)(r4)
2228 ld r23, VCPU_GPR(R23)(r4)
2229 ld r24, VCPU_GPR(R24)(r4)
2230 ld r25, VCPU_GPR(R25)(r4)
2231 ld r26, VCPU_GPR(R26)(r4)
2232 ld r27, VCPU_GPR(R27)(r4)
2233 ld r28, VCPU_GPR(R28)(r4)
2234 ld r29, VCPU_GPR(R29)(r4)
2235 ld r30, VCPU_GPR(R30)(r4)
2236 ld r31, VCPU_GPR(R31)(r4)
2238 /* Check the wake reason in SRR1 to see why we got here */
2239 bl kvmppc_check_wake_reason
2241 /* clear our bit in vcore->napping_threads */
2242 34: ld r5,HSTATE_KVM_VCORE(r13)
2243 lbz r7,HSTATE_PTID(r13)
2246 addi r6,r5,VCORE_NAPPING_THREADS
2252 stb r0,HSTATE_NAPPING(r13)
2254 /* See if the wake reason means we need to exit */
2255 stw r12, VCPU_TRAP(r4)
2260 /* see if any other thread is already exiting */
2261 lwz r0,VCORE_ENTRY_EXIT(r5)
2265 b kvmppc_cede_reentry /* if not go back to guest */
2267 /* cede when already previously prodded case */
2270 stb r0,VCPU_PRODDED(r3)
2271 sync /* order testing prodded vs. clearing ceded */
2272 stb r0,VCPU_CEDED(r3)
2276 /* we've ceded but we want to give control to the host */
2278 b hcall_real_fallback
2280 /* Try to handle a machine check in real mode */
2281 machine_check_realmode:
2282 mr r3, r9 /* get vcpu pointer */
2283 bl kvmppc_realmode_machine_check
2285 cmpdi r3, 0 /* Did we handle MCE ? */
2286 ld r9, HSTATE_KVM_VCPU(r13)
2287 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2289 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2290 * machine check interrupt (set HSRR0 to 0x200). And for handled
2291 * errors (no-fatal), just go back to guest execution with current
2292 * HSRR0 instead of exiting guest. This new approach will inject
2293 * machine check to guest for fatal error causing guest to crash.
2295 * The old code used to return to host for unhandled errors which
2296 * was causing guest to hang with soft lockups inside guest and
2297 * makes it difficult to recover guest instance.
2300 ld r11, VCPU_MSR(r9)
2301 bne 2f /* Continue guest execution. */
2302 /* If not, deliver a machine check. SRR0/1 are already set */
2303 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2304 ld r11, VCPU_MSR(r9)
2305 bl kvmppc_msr_interrupt
2306 2: b fast_interrupt_c_return
2309 * Check the reason we woke from nap, and take appropriate action.
2311 * 0 if nothing needs to be done
2312 * 1 if something happened that needs to be handled by the host
2313 * -1 if there was a guest wakeup (IPI)
2315 * Also sets r12 to the interrupt vector for any interrupt that needs
2316 * to be handled now by the host (0x500 for external interrupt), or zero.
2317 * Modifies r0, r6, r7, r8.
2319 kvmppc_check_wake_reason:
2322 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2324 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2325 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2326 cmpwi r6, 8 /* was it an external interrupt? */
2327 li r12, BOOK3S_INTERRUPT_EXTERNAL
2328 beq kvmppc_read_intr /* if so, see what it was */
2331 cmpwi r6, 6 /* was it the decrementer? */
2334 cmpwi r6, 5 /* privileged doorbell? */
2336 cmpwi r6, 3 /* hypervisor doorbell? */
2338 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2339 li r3, 1 /* anything else, return 1 */
2342 /* hypervisor doorbell */
2343 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2348 * Determine what sort of external interrupt is pending (if any).
2350 * 0 if no interrupt is pending
2351 * 1 if an interrupt is pending that needs to be handled by the host
2352 * -1 if there was a guest wakeup IPI (which has now been cleared)
2353 * Modifies r0, r6, r7, r8, returns value in r3.
2356 /* see if a host IPI is pending */
2358 lbz r0, HSTATE_HOST_IPI(r13)
2362 /* Now read the interrupt from the ICP */
2363 ld r6, HSTATE_XICS_PHYS(r13)
2369 * Save XIRR for later. Since we get in in reverse endian on LE
2370 * systems, save it byte reversed and fetch it back in host endian.
2372 li r3, HSTATE_SAVED_XIRR
2374 #ifdef __LITTLE_ENDIAN__
2375 lwz r3, HSTATE_SAVED_XIRR(r13)
2379 rlwinm. r3, r3, 0, 0xffffff
2381 beq 1f /* if nothing pending in the ICP */
2383 /* We found something in the ICP...
2385 * If it's not an IPI, stash it in the PACA and return to
2386 * the host, we don't (yet) handle directing real external
2387 * interrupts directly to the guest
2389 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2392 /* It's an IPI, clear the MFRR and EOI it */
2395 stbcix r3, r6, r8 /* clear the IPI */
2396 stwcix r0, r6, r7 /* EOI it */
2399 /* We need to re-check host IPI now in case it got set in the
2400 * meantime. If it's clear, we bounce the interrupt to the
2403 lbz r0, HSTATE_HOST_IPI(r13)
2407 /* OK, it's an IPI for us */
2411 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2412 * the PACA earlier, it will be picked up by the host ICP driver
2417 43: /* We raced with the host, we need to resend that IPI, bummer */
2419 stbcix r0, r6, r8 /* set the IPI */
2425 * Save away FP, VMX and VSX registers.
2427 * N.B. r30 and r31 are volatile across this function,
2428 * thus it is not callable from C.
2435 #ifdef CONFIG_ALTIVEC
2437 oris r8,r8,MSR_VEC@h
2438 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2442 oris r8,r8,MSR_VSX@h
2443 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2446 addi r3,r3,VCPU_FPRS
2448 #ifdef CONFIG_ALTIVEC
2450 addi r3,r31,VCPU_VRS
2452 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2454 mfspr r6,SPRN_VRSAVE
2455 stw r6,VCPU_VRSAVE(r31)
2460 * Load up FP, VMX and VSX registers
2462 * N.B. r30 and r31 are volatile across this function,
2463 * thus it is not callable from C.
2470 #ifdef CONFIG_ALTIVEC
2472 oris r8,r8,MSR_VEC@h
2473 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2477 oris r8,r8,MSR_VSX@h
2478 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2481 addi r3,r4,VCPU_FPRS
2483 #ifdef CONFIG_ALTIVEC
2485 addi r3,r31,VCPU_VRS
2487 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2489 lwz r7,VCPU_VRSAVE(r31)
2490 mtspr SPRN_VRSAVE,r7
2496 * We come here if we get any exception or interrupt while we are
2497 * executing host real mode code while in guest MMU context.
2498 * For now just spin, but we should do something better.
2500 kvmppc_bad_host_intr:
2504 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2505 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2506 * r11 has the guest MSR value (in/out)
2507 * r9 has a vcpu pointer (in)
2508 * r0 is used as a scratch register
2510 kvmppc_msr_interrupt:
2511 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2512 cmpwi r0, 2 /* Check if we are in transactional state.. */
2513 ld r11, VCPU_INTR_MSR(r9)
2515 /* ... if transactional, change to suspended */
2517 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2521 * This works around a hardware bug on POWER8E processors, where
2522 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2523 * performance monitor interrupt. Instead, when we need to have
2524 * an interrupt pending, we have to arrange for a counter to overflow.
2528 mtspr SPRN_MMCR2, r3
2529 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2530 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2531 mtspr SPRN_MMCR0, r3
2538 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2540 * Start timing an activity
2541 * r3 = pointer to time accumulation struct, r4 = vcpu
2544 ld r5, HSTATE_KVM_VCORE(r13)
2545 lbz r6, VCORE_IN_GUEST(r5)
2547 beq 5f /* if in guest, need to */
2548 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2551 std r3, VCPU_CUR_ACTIVITY(r4)
2552 std r5, VCPU_ACTIVITY_START(r4)
2556 * Accumulate time to one activity and start another.
2557 * r3 = pointer to new time accumulation struct, r4 = vcpu
2559 kvmhv_accumulate_time:
2560 ld r5, HSTATE_KVM_VCORE(r13)
2561 lbz r8, VCORE_IN_GUEST(r5)
2563 beq 4f /* if in guest, need to */
2564 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2565 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2566 ld r6, VCPU_ACTIVITY_START(r4)
2567 std r3, VCPU_CUR_ACTIVITY(r4)
2570 std r7, VCPU_ACTIVITY_START(r4)
2574 ld r8, TAS_SEQCOUNT(r5)
2577 std r8, TAS_SEQCOUNT(r5)
2579 ld r7, TAS_TOTAL(r5)
2581 std r7, TAS_TOTAL(r5)
2587 3: std r3, TAS_MIN(r5)
2593 std r8, TAS_SEQCOUNT(r5)