2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 /* Values in HSTATE_NAPPING(r13) */
36 #define NAPPING_CEDE 1
37 #define NAPPING_NOVCPU 2
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
45 * LR = return address to continue at after eventually re-enabling MMU
47 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
49 std r0, PPC_LR_STKOFF(r1)
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 mtmsrd r0,1 /* clear RI in MSR */
63 ld r4, HSTATE_KVM_VCPU(r13)
66 /* Back from guest - restore host state and return to caller */
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
74 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
84 beq 23f /* skip if not */
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
90 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
126 ld r3, HSTATE_DECEXP(r13)
132 * For external and machine check interrupts, we need
133 * to call the Linux handler to process the interrupt.
134 * We do that by jumping to absolute address 0x500 for
135 * external interrupts, or the machine_check_fwnmi label
136 * for machine checks (since firmware might have patched
137 * the vector area at 0x200). The [h]rfid at the end of the
138 * handler will return to the book3s_hv_interrupts.S code.
139 * For other interrupts we do the rfid to get back
140 * to the book3s_hv_interrupts.S code here.
142 ld r8, 112+PPC_LR_STKOFF(r1)
144 ld r7, HSTATE_HOST_MSR(r13)
146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
150 beq cr2, 14f /* HMI check */
152 /* RFI into the highmem handler, or branch to interrupt handler */
156 mtmsrd r6, 1 /* Clear RI in MSR */
159 beq cr1, 13f /* machine check */
162 /* On POWER7, we have external interrupts set to use HSRR0/1 */
163 11: mtspr SPRN_HSRR0, r8
167 13: b machine_check_fwnmi
169 14: mtspr SPRN_HSRR0, r8
171 b hmi_exception_after_realmode
173 kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
178 /* set our bit in napping_threads */
179 ld r5, HSTATE_KVM_VCORE(r13)
180 lbz r7, HSTATE_PTID(r13)
183 addi r6, r5, VCORE_NAPPING_THREADS
188 /* order napping_threads update vs testing entry_exit_count */
191 lwz r7, VCORE_ENTRY_EXIT(r5)
193 bge kvm_novcpu_exit /* another thread already exiting */
194 li r3, NAPPING_NOVCPU
195 stb r3, HSTATE_NAPPING(r13)
197 li r3, 0 /* Don't wake on privileged (OS) doorbell */
201 ld r1, HSTATE_HOST_R1(r13)
202 ld r5, HSTATE_KVM_VCORE(r13)
204 stb r0, HSTATE_NAPPING(r13)
205 stb r0, HSTATE_HWTHREAD_REQ(r13)
207 /* check the wake reason */
208 bl kvmppc_check_wake_reason
210 /* see if any other thread is already exiting */
211 lwz r0, VCORE_ENTRY_EXIT(r5)
215 /* clear our bit in napping_threads */
216 lbz r7, HSTATE_PTID(r13)
219 addi r6, r5, VCORE_NAPPING_THREADS
225 /* See if the wake reason means we need to exit */
229 /* See if our timeslice has expired (HDEC is negative) */
231 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
235 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
236 ld r4, HSTATE_KVM_VCPU(r13)
238 beq kvmppc_primary_no_guest
240 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
241 addi r3, r4, VCPU_TB_RMENTRY
242 bl kvmhv_start_timing
250 * We come in here when wakened from nap mode.
251 * Relocation is off and most register values are lost.
252 * r13 points to the PACA.
254 .globl kvm_start_guest
257 /* Set runlatch bit the minute you wake up from nap */
264 li r0,KVM_HWTHREAD_IN_KVM
265 stb r0,HSTATE_HWTHREAD_STATE(r13)
267 /* NV GPR values from power7_idle() will no longer be valid */
269 stb r0,PACA_NAPSTATELOST(r13)
271 /* were we napping due to cede? */
272 lbz r0,HSTATE_NAPPING(r13)
273 cmpwi r0,NAPPING_CEDE
275 cmpwi r0,NAPPING_NOVCPU
276 beq kvm_novcpu_wakeup
278 ld r1,PACAEMERGSP(r13)
279 subi r1,r1,STACK_FRAME_OVERHEAD
282 * We weren't napping due to cede, so this must be a secondary
283 * thread being woken up to run a guest, or being woken up due
284 * to a stray IPI. (Or due to some machine check or hypervisor
285 * maintenance interrupt while the core is in KVM.)
288 /* Check the wake reason in SRR1 to see why we got here */
289 bl kvmppc_check_wake_reason
293 /* get vcpu pointer, NULL if we have no vcpu to run */
294 ld r4,HSTATE_KVM_VCPU(r13)
296 /* if we have no vcpu to run, go back to sleep */
299 kvm_secondary_got_guest:
301 /* Set HSTATE_DSCR(r13) to something sensible */
302 ld r6, PACA_DSCR(r13)
303 std r6, HSTATE_DSCR(r13)
305 /* Order load of vcore, ptid etc. after load of vcpu */
309 /* Back from the guest, go back to nap */
310 /* Clear our vcpu pointer so we don't come back in early */
313 * Once we clear HSTATE_KVM_VCPU(r13), the code in
314 * kvmppc_run_core() is going to assume that all our vcpu
315 * state is visible in memory. This lwsync makes sure
319 std r0, HSTATE_KVM_VCPU(r13)
322 * At this point we have finished executing in the guest.
323 * We need to wait for hwthread_req to become zero, since
324 * we may not turn on the MMU while hwthread_req is non-zero.
325 * While waiting we also need to check if we get given a vcpu to run.
328 lbz r3, HSTATE_HWTHREAD_REQ(r13)
332 li r0, KVM_HWTHREAD_IN_KERNEL
333 stb r0, HSTATE_HWTHREAD_STATE(r13)
334 /* need to recheck hwthread_req after a barrier, to avoid race */
336 lbz r3, HSTATE_HWTHREAD_REQ(r13)
340 * We jump to power7_wakeup_loss, which will return to the caller
341 * of power7_nap in the powernv cpu offline loop. The value we
342 * put in r3 becomes the return value for power7_nap.
346 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
352 ld r4, HSTATE_KVM_VCPU(r13)
356 b kvm_secondary_got_guest
358 54: li r0, KVM_HWTHREAD_IN_KVM
359 stb r0, HSTATE_HWTHREAD_STATE(r13)
362 /******************************************************************************
366 *****************************************************************************/
368 .global kvmppc_hv_entry
373 * R4 = vcpu pointer (or NULL)
378 * all other volatile GPRS = free
381 std r0, PPC_LR_STKOFF(r1)
384 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13)
387 li r6, KVM_GUEST_MODE_HOST_HV
388 stb r6, HSTATE_IN_GUEST(r13)
390 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
391 /* Store initial timestamp */
394 addi r3, r4, VCPU_TB_RMENTRY
395 bl kvmhv_start_timing
405 * POWER7/POWER8 host -> guest partition switch code.
406 * We don't have to lock against concurrent tlbies,
407 * but we do have to coordinate across hardware threads.
409 /* Increment entry count iff exit count is zero. */
410 ld r5,HSTATE_KVM_VCORE(r13)
411 addi r9,r5,VCORE_ENTRY_EXIT
413 cmpwi r3,0x100 /* any threads starting to exit? */
414 bge secondary_too_late /* if so we're too late to the party */
419 /* Primary thread switches to guest partition. */
420 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
421 lbz r6,HSTATE_PTID(r13)
426 li r0,LPID_RSVD /* switch to reserved LPID */
429 mtspr SPRN_SDR1,r6 /* switch to partition page table */
433 /* See if we need to flush the TLB */
434 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
435 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
436 srdi r6,r6,6 /* doubleword number */
437 sldi r6,r6,3 /* address offset */
439 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
445 23: ldarx r7,0,r6 /* if set, clear the bit */
449 /* Flush the TLB of any entries for this LPID */
450 /* use arch 2.07S as a proxy for POWER8 */
452 li r6,512 /* POWER8 has 512 sets */
454 li r6,128 /* POWER7 has 128 sets */
455 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
457 li r7,0x800 /* IS field = 0b10 */
464 /* Add timebase offset onto timebase */
465 22: ld r8,VCORE_TB_OFFSET(r5)
468 mftb r6 /* current host timebase */
470 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
471 mftb r7 /* check if lower 24 bits overflowed */
476 addis r8,r8,0x100 /* if so, increment upper 40 bits */
479 /* Load guest PCR value to select appropriate compat mode */
480 37: ld r7, VCORE_PCR(r5)
487 /* DPDES is shared between threads */
488 ld r8, VCORE_DPDES(r5)
490 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
493 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
496 /* Secondary threads wait for primary to have done partition switch */
497 20: lbz r0,VCORE_IN_GUEST(r5)
502 10: ld r8,VCORE_LPCR(r5)
506 /* Check if HDEC expires soon */
508 cmpwi r3,512 /* 1 microsecond */
509 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
512 /* Do we have a guest vcpu to run? */
514 beq kvmppc_primary_no_guest
517 /* Load up guest SLB entries */
518 lwz r5,VCPU_SLB_MAX(r4)
523 1: ld r8,VCPU_SLB_E(r6)
526 addi r6,r6,VCPU_SLB_SIZE
529 /* Increment yield count if they have a VPA */
533 li r6, LPPACA_YIELDCOUNT
538 stb r6, VCPU_VPA_DIRTY(r4)
541 /* Save purr/spurr */
544 std r5,HSTATE_PURR(r13)
545 std r6,HSTATE_SPURR(r13)
552 /* Set partition DABR */
553 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
554 lwz r5,VCPU_DABRX(r4)
559 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
561 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
564 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
566 /* Turn on TM/FP/VSX/VMX so we can restore them. */
572 oris r5, r5, (MSR_VEC | MSR_VSX)@h
576 * The user may change these outside of a transaction, so they must
577 * always be context switched.
579 ld r5, VCPU_TFHAR(r4)
580 ld r6, VCPU_TFIAR(r4)
581 ld r7, VCPU_TEXASR(r4)
584 mtspr SPRN_TEXASR, r7
587 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
588 beq skip_tm /* TM not active in guest */
590 /* Make sure the failure summary is set, otherwise we'll program check
591 * when we trechkpt. It's possible that this might have been not set
592 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
595 oris r7, r7, (TEXASR_FS)@h
596 mtspr SPRN_TEXASR, r7
599 * We need to load up the checkpointed state for the guest.
600 * We need to do this early as it will blow away any GPRs, VSRs and
605 addi r3, r31, VCPU_FPRS_TM
607 addi r3, r31, VCPU_VRS_TM
610 lwz r7, VCPU_VRSAVE_TM(r4)
611 mtspr SPRN_VRSAVE, r7
613 ld r5, VCPU_LR_TM(r4)
614 lwz r6, VCPU_CR_TM(r4)
615 ld r7, VCPU_CTR_TM(r4)
616 ld r8, VCPU_AMR_TM(r4)
617 ld r9, VCPU_TAR_TM(r4)
625 * Load up PPR and DSCR values but don't put them in the actual SPRs
626 * till the last moment to avoid running with userspace PPR and DSCR for
629 ld r29, VCPU_DSCR_TM(r4)
630 ld r30, VCPU_PPR_TM(r4)
632 std r2, PACATMSCRATCH(r13) /* Save TOC */
634 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
638 /* Load GPRs r0-r28 */
641 ld reg, VCPU_GPRS_TM(reg)(r31)
648 /* Load final GPRs */
649 ld 29, VCPU_GPRS_TM(29)(r31)
650 ld 30, VCPU_GPRS_TM(30)(r31)
651 ld 31, VCPU_GPRS_TM(31)(r31)
653 /* TM checkpointed state is now setup. All GPRs are now volatile. */
656 /* Now let's get back the state we need. */
659 ld r29, HSTATE_DSCR(r13)
661 ld r4, HSTATE_KVM_VCPU(r13)
662 ld r1, HSTATE_HOST_R1(r13)
663 ld r2, PACATMSCRATCH(r13)
665 /* Set the MSR RI since we have our registers back. */
671 /* Load guest PMU registers */
672 /* R4 is live here (vcpu pointer) */
674 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
675 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
679 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
682 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
683 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
684 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
685 lwz r6, VCPU_PMC + 8(r4)
686 lwz r7, VCPU_PMC + 12(r4)
687 lwz r8, VCPU_PMC + 16(r4)
688 lwz r9, VCPU_PMC + 20(r4)
696 ld r5, VCPU_MMCR + 8(r4)
697 ld r6, VCPU_MMCR + 16(r4)
705 ld r5, VCPU_MMCR + 24(r4)
707 lwz r7, VCPU_PMC + 24(r4)
708 lwz r8, VCPU_PMC + 28(r4)
709 ld r9, VCPU_MMCR + 32(r4)
715 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
719 /* Load up FP, VMX and VSX registers */
722 ld r14, VCPU_GPR(R14)(r4)
723 ld r15, VCPU_GPR(R15)(r4)
724 ld r16, VCPU_GPR(R16)(r4)
725 ld r17, VCPU_GPR(R17)(r4)
726 ld r18, VCPU_GPR(R18)(r4)
727 ld r19, VCPU_GPR(R19)(r4)
728 ld r20, VCPU_GPR(R20)(r4)
729 ld r21, VCPU_GPR(R21)(r4)
730 ld r22, VCPU_GPR(R22)(r4)
731 ld r23, VCPU_GPR(R23)(r4)
732 ld r24, VCPU_GPR(R24)(r4)
733 ld r25, VCPU_GPR(R25)(r4)
734 ld r26, VCPU_GPR(R26)(r4)
735 ld r27, VCPU_GPR(R27)(r4)
736 ld r28, VCPU_GPR(R28)(r4)
737 ld r29, VCPU_GPR(R29)(r4)
738 ld r30, VCPU_GPR(R30)(r4)
739 ld r31, VCPU_GPR(R31)(r4)
741 /* Switch DSCR to guest value */
746 /* Skip next section on POWER7 */
748 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
749 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
752 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
755 /* Load up POWER8-specific registers */
757 lwz r6, VCPU_PSPB(r4)
763 ld r6, VCPU_DAWRX(r4)
764 ld r7, VCPU_CIABR(r4)
774 ld r8, VCPU_EBBHR(r4)
776 ld r5, VCPU_EBBRR(r4)
777 ld r6, VCPU_BESCR(r4)
778 ld r7, VCPU_CSIGR(r4)
784 ld r5, VCPU_TCSCR(r4)
786 lwz r7, VCPU_GUEST_PID(r4)
795 * Set the decrementer to the guest decrementer.
797 ld r8,VCPU_DEC_EXPIRES(r4)
798 /* r8 is a host timebase value here, convert to guest TB */
799 ld r5,HSTATE_KVM_VCORE(r13)
800 ld r6,VCORE_TB_OFFSET(r5)
807 ld r5, VCPU_SPRG0(r4)
808 ld r6, VCPU_SPRG1(r4)
809 ld r7, VCPU_SPRG2(r4)
810 ld r8, VCPU_SPRG3(r4)
816 /* Load up DAR and DSISR */
818 lwz r6, VCPU_DSISR(r4)
822 /* Restore AMR and UAMOR, set AMOR to all 1s */
830 /* Restore state of CTRL run bit; assume 1 on entry */
844 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
852 deliver_guest_interrupt:
853 /* r11 = vcpu->arch.msr & ~MSR_HV */
854 rldicl r11, r11, 63 - MSR_HV_LG, 1
855 rotldi r11, r11, 1 + MSR_HV_LG
858 /* Check if we can deliver an external or decrementer interrupt now */
859 ld r0, VCPU_PENDING_EXC(r4)
860 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
862 andi. r8, r11, MSR_EE
864 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
865 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
869 li r0, BOOK3S_INTERRUPT_EXTERNAL
873 li r0, BOOK3S_INTERRUPT_DECREMENTER
876 12: mtspr SPRN_SRR0, r10
880 bl kvmppc_msr_interrupt
886 * R10: value for HSRR0
887 * R11: value for HSRR1
892 stb r0,VCPU_CEDED(r4) /* cancel cede */
896 /* Activate guest mode, so faults get handled by KVM */
897 li r9, KVM_GUEST_MODE_GUEST_HV
898 stb r9, HSTATE_IN_GUEST(r13)
900 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
901 /* Accumulate timing */
902 addi r3, r4, VCPU_TB_GUEST
903 bl kvmhv_accumulate_time
911 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
914 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
921 ld r1, VCPU_GPR(R1)(r4)
922 ld r2, VCPU_GPR(R2)(r4)
923 ld r3, VCPU_GPR(R3)(r4)
924 ld r5, VCPU_GPR(R5)(r4)
925 ld r6, VCPU_GPR(R6)(r4)
926 ld r7, VCPU_GPR(R7)(r4)
927 ld r8, VCPU_GPR(R8)(r4)
928 ld r9, VCPU_GPR(R9)(r4)
929 ld r10, VCPU_GPR(R10)(r4)
930 ld r11, VCPU_GPR(R11)(r4)
931 ld r12, VCPU_GPR(R12)(r4)
932 ld r13, VCPU_GPR(R13)(r4)
936 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
937 ld r0, VCPU_GPR(R0)(r4)
938 ld r4, VCPU_GPR(R4)(r4)
943 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
947 addi r3, r4, VCPU_TB_RMEXIT
948 bl kvmhv_accumulate_time
949 11: b kvmhv_switch_to_host
952 ld r4, HSTATE_KVM_VCPU(r13)
955 addi r3, r4, VCPU_TB_RMEXIT
956 bl kvmhv_accumulate_time
960 /******************************************************************************
964 *****************************************************************************/
967 * We come here from the first-level interrupt handlers.
969 .globl kvmppc_interrupt_hv
973 * R12 = interrupt vector
975 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
976 * guest R13 saved in SPRN_SCRATCH0
978 std r9, HSTATE_SCRATCH2(r13)
980 lbz r9, HSTATE_IN_GUEST(r13)
981 cmpwi r9, KVM_GUEST_MODE_HOST_HV
982 beq kvmppc_bad_host_intr
983 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
984 cmpwi r9, KVM_GUEST_MODE_GUEST
985 ld r9, HSTATE_SCRATCH2(r13)
986 beq kvmppc_interrupt_pr
988 /* We're now back in the host but in guest MMU context */
989 li r9, KVM_GUEST_MODE_HOST_HV
990 stb r9, HSTATE_IN_GUEST(r13)
992 ld r9, HSTATE_KVM_VCPU(r13)
996 std r0, VCPU_GPR(R0)(r9)
997 std r1, VCPU_GPR(R1)(r9)
998 std r2, VCPU_GPR(R2)(r9)
999 std r3, VCPU_GPR(R3)(r9)
1000 std r4, VCPU_GPR(R4)(r9)
1001 std r5, VCPU_GPR(R5)(r9)
1002 std r6, VCPU_GPR(R6)(r9)
1003 std r7, VCPU_GPR(R7)(r9)
1004 std r8, VCPU_GPR(R8)(r9)
1005 ld r0, HSTATE_SCRATCH2(r13)
1006 std r0, VCPU_GPR(R9)(r9)
1007 std r10, VCPU_GPR(R10)(r9)
1008 std r11, VCPU_GPR(R11)(r9)
1009 ld r3, HSTATE_SCRATCH0(r13)
1010 lwz r4, HSTATE_SCRATCH1(r13)
1011 std r3, VCPU_GPR(R12)(r9)
1014 ld r3, HSTATE_CFAR(r13)
1015 std r3, VCPU_CFAR(r9)
1016 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1018 ld r4, HSTATE_PPR(r13)
1019 std r4, VCPU_PPR(r9)
1020 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1022 /* Restore R1/R2 so we can handle faults */
1023 ld r1, HSTATE_HOST_R1(r13)
1026 mfspr r10, SPRN_SRR0
1027 mfspr r11, SPRN_SRR1
1028 std r10, VCPU_SRR0(r9)
1029 std r11, VCPU_SRR1(r9)
1030 andi. r0, r12, 2 /* need to read HSRR0/1? */
1032 mfspr r10, SPRN_HSRR0
1033 mfspr r11, SPRN_HSRR1
1035 1: std r10, VCPU_PC(r9)
1036 std r11, VCPU_MSR(r9)
1040 std r3, VCPU_GPR(R13)(r9)
1043 stw r12,VCPU_TRAP(r9)
1045 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1046 addi r3, r9, VCPU_TB_RMINTR
1048 bl kvmhv_accumulate_time
1049 ld r5, VCPU_GPR(R5)(r9)
1050 ld r6, VCPU_GPR(R6)(r9)
1051 ld r7, VCPU_GPR(R7)(r9)
1052 ld r8, VCPU_GPR(R8)(r9)
1055 /* Save HEIR (HV emulation assist reg) in emul_inst
1056 if this is an HEI (HV emulation interrupt, e40) */
1057 li r3,KVM_INST_FETCH_FAILED
1058 stw r3,VCPU_LAST_INST(r9)
1059 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1062 11: stw r3,VCPU_HEIR(r9)
1064 /* these are volatile across C function calls */
1067 std r3, VCPU_CTR(r9)
1068 stw r4, VCPU_XER(r9)
1070 /* If this is a page table miss then see if it's theirs or ours */
1071 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1073 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1076 /* See if this is a leftover HDEC interrupt */
1077 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1082 bge fast_guest_return
1084 /* See if this is an hcall we can handle in real mode */
1085 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1086 beq hcall_try_real_mode
1088 /* External interrupt ? */
1089 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1090 bne+ guest_exit_cont
1092 /* External interrupt, first check for host_ipi. If this is
1093 * set, we know the host wants us out so let's do it now
1099 /* Check if any CPU is heading out to the host, if so head out too */
1100 ld r5, HSTATE_KVM_VCORE(r13)
1101 lwz r0, VCORE_ENTRY_EXIT(r5)
1104 blt deliver_guest_interrupt
1106 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1107 /* Save more register state */
1110 std r6, VCPU_DAR(r9)
1111 stw r7, VCPU_DSISR(r9)
1112 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1113 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1115 std r6, VCPU_FAULT_DAR(r9)
1116 stw r7, VCPU_FAULT_DSISR(r9)
1118 /* See if it is a machine check */
1119 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1120 beq machine_check_realmode
1122 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1123 addi r3, r9, VCPU_TB_RMEXIT
1125 bl kvmhv_accumulate_time
1128 /* Save guest CTRL register, set runlatch to 1 */
1129 6: mfspr r6,SPRN_CTRLF
1130 stw r6,VCPU_CTRL(r9)
1136 /* Read the guest SLB and save it away */
1137 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1143 andis. r0,r8,SLB_ESID_V@h
1145 add r8,r8,r6 /* put index in */
1147 std r8,VCPU_SLB_E(r7)
1148 std r3,VCPU_SLB_V(r7)
1149 addi r7,r7,VCPU_SLB_SIZE
1153 stw r5,VCPU_SLB_MAX(r9)
1156 * Save the guest PURR/SPURR
1161 ld r8,VCPU_SPURR(r9)
1162 std r5,VCPU_PURR(r9)
1163 std r6,VCPU_SPURR(r9)
1168 * Restore host PURR/SPURR and add guest times
1169 * so that the time in the guest gets accounted.
1171 ld r3,HSTATE_PURR(r13)
1172 ld r4,HSTATE_SPURR(r13)
1183 /* r5 is a guest timebase value here, convert to host TB */
1184 ld r3,HSTATE_KVM_VCORE(r13)
1185 ld r4,VCORE_TB_OFFSET(r3)
1187 std r5,VCPU_DEC_EXPIRES(r9)
1191 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1192 /* Save POWER8-specific registers */
1196 std r5, VCPU_IAMR(r9)
1197 stw r6, VCPU_PSPB(r9)
1198 std r7, VCPU_FSCR(r9)
1203 std r6, VCPU_VTB(r9)
1204 std r7, VCPU_TAR(r9)
1205 mfspr r8, SPRN_EBBHR
1206 std r8, VCPU_EBBHR(r9)
1207 mfspr r5, SPRN_EBBRR
1208 mfspr r6, SPRN_BESCR
1209 mfspr r7, SPRN_CSIGR
1211 std r5, VCPU_EBBRR(r9)
1212 std r6, VCPU_BESCR(r9)
1213 std r7, VCPU_CSIGR(r9)
1214 std r8, VCPU_TACR(r9)
1215 mfspr r5, SPRN_TCSCR
1219 std r5, VCPU_TCSCR(r9)
1220 std r6, VCPU_ACOP(r9)
1221 stw r7, VCPU_GUEST_PID(r9)
1222 std r8, VCPU_WORT(r9)
1225 /* Save and reset AMR and UAMOR before turning on the MMU */
1229 std r6,VCPU_UAMOR(r9)
1233 /* Switch DSCR back to host value */
1235 ld r7, HSTATE_DSCR(r13)
1236 std r8, VCPU_DSCR(r9)
1239 /* Save non-volatile GPRs */
1240 std r14, VCPU_GPR(R14)(r9)
1241 std r15, VCPU_GPR(R15)(r9)
1242 std r16, VCPU_GPR(R16)(r9)
1243 std r17, VCPU_GPR(R17)(r9)
1244 std r18, VCPU_GPR(R18)(r9)
1245 std r19, VCPU_GPR(R19)(r9)
1246 std r20, VCPU_GPR(R20)(r9)
1247 std r21, VCPU_GPR(R21)(r9)
1248 std r22, VCPU_GPR(R22)(r9)
1249 std r23, VCPU_GPR(R23)(r9)
1250 std r24, VCPU_GPR(R24)(r9)
1251 std r25, VCPU_GPR(R25)(r9)
1252 std r26, VCPU_GPR(R26)(r9)
1253 std r27, VCPU_GPR(R27)(r9)
1254 std r28, VCPU_GPR(R28)(r9)
1255 std r29, VCPU_GPR(R29)(r9)
1256 std r30, VCPU_GPR(R30)(r9)
1257 std r31, VCPU_GPR(R31)(r9)
1260 mfspr r3, SPRN_SPRG0
1261 mfspr r4, SPRN_SPRG1
1262 mfspr r5, SPRN_SPRG2
1263 mfspr r6, SPRN_SPRG3
1264 std r3, VCPU_SPRG0(r9)
1265 std r4, VCPU_SPRG1(r9)
1266 std r5, VCPU_SPRG2(r9)
1267 std r6, VCPU_SPRG3(r9)
1273 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1276 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1280 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1284 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1285 beq 1f /* TM not active in guest. */
1287 li r3, TM_CAUSE_KVM_RESCHED
1289 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1293 /* All GPRs are volatile at this point. */
1296 /* Temporarily store r13 and r9 so we have some regs to play with */
1299 std r9, PACATMSCRATCH(r13)
1300 ld r9, HSTATE_KVM_VCPU(r13)
1302 /* Get a few more GPRs free. */
1303 std r29, VCPU_GPRS_TM(29)(r9)
1304 std r30, VCPU_GPRS_TM(30)(r9)
1305 std r31, VCPU_GPRS_TM(31)(r9)
1307 /* Save away PPR and DSCR soon so don't run with user values. */
1310 mfspr r30, SPRN_DSCR
1311 ld r29, HSTATE_DSCR(r13)
1312 mtspr SPRN_DSCR, r29
1314 /* Save all but r9, r13 & r29-r31 */
1317 .if (reg != 9) && (reg != 13)
1318 std reg, VCPU_GPRS_TM(reg)(r9)
1322 /* ... now save r13 */
1324 std r4, VCPU_GPRS_TM(13)(r9)
1325 /* ... and save r9 */
1326 ld r4, PACATMSCRATCH(r13)
1327 std r4, VCPU_GPRS_TM(9)(r9)
1329 /* Reload stack pointer and TOC. */
1330 ld r1, HSTATE_HOST_R1(r13)
1333 /* Set MSR RI now we have r1 and r13 back. */
1337 /* Save away checkpinted SPRs. */
1338 std r31, VCPU_PPR_TM(r9)
1339 std r30, VCPU_DSCR_TM(r9)
1345 std r5, VCPU_LR_TM(r9)
1346 stw r6, VCPU_CR_TM(r9)
1347 std r7, VCPU_CTR_TM(r9)
1348 std r8, VCPU_AMR_TM(r9)
1349 std r10, VCPU_TAR_TM(r9)
1351 /* Restore r12 as trap number. */
1352 lwz r12, VCPU_TRAP(r9)
1355 addi r3, r9, VCPU_FPRS_TM
1357 addi r3, r9, VCPU_VRS_TM
1359 mfspr r6, SPRN_VRSAVE
1360 stw r6, VCPU_VRSAVE_TM(r9)
1363 * We need to save these SPRs after the treclaim so that the software
1364 * error code is recorded correctly in the TEXASR. Also the user may
1365 * change these outside of a transaction, so they must always be
1368 mfspr r5, SPRN_TFHAR
1369 mfspr r6, SPRN_TFIAR
1370 mfspr r7, SPRN_TEXASR
1371 std r5, VCPU_TFHAR(r9)
1372 std r6, VCPU_TFIAR(r9)
1373 std r7, VCPU_TEXASR(r9)
1377 /* Increment yield count if they have a VPA */
1378 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1381 li r4, LPPACA_YIELDCOUNT
1386 stb r3, VCPU_VPA_DIRTY(r9)
1388 /* Save PMU registers if requested */
1389 /* r8 and cr0.eq are live here */
1392 * POWER8 seems to have a hardware bug where setting
1393 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1394 * when some counters are already negative doesn't seem
1395 * to cause a performance monitor alert (and hence interrupt).
1396 * The effect of this is that when saving the PMU state,
1397 * if there is no PMU alert pending when we read MMCR0
1398 * before freezing the counters, but one becomes pending
1399 * before we read the counters, we lose it.
1400 * To work around this, we need a way to freeze the counters
1401 * before reading MMCR0. Normally, freezing the counters
1402 * is done by writing MMCR0 (to set MMCR0[FC]) which
1403 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1404 * we can also freeze the counters using MMCR2, by writing
1405 * 1s to all the counter freeze condition bits (there are
1406 * 9 bits each for 6 counters).
1408 li r3, -1 /* set all freeze bits */
1410 mfspr r10, SPRN_MMCR2
1411 mtspr SPRN_MMCR2, r3
1413 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1415 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1416 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1417 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1418 mfspr r6, SPRN_MMCRA
1419 /* Clear MMCRA in order to disable SDAR updates */
1421 mtspr SPRN_MMCRA, r7
1423 beq 21f /* if no VPA, save PMU stuff anyway */
1424 lbz r7, LPPACA_PMCINUSE(r8)
1425 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1427 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1429 21: mfspr r5, SPRN_MMCR1
1432 std r4, VCPU_MMCR(r9)
1433 std r5, VCPU_MMCR + 8(r9)
1434 std r6, VCPU_MMCR + 16(r9)
1436 std r10, VCPU_MMCR + 24(r9)
1437 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1438 std r7, VCPU_SIAR(r9)
1439 std r8, VCPU_SDAR(r9)
1446 stw r3, VCPU_PMC(r9)
1447 stw r4, VCPU_PMC + 4(r9)
1448 stw r5, VCPU_PMC + 8(r9)
1449 stw r6, VCPU_PMC + 12(r9)
1450 stw r7, VCPU_PMC + 16(r9)
1451 stw r8, VCPU_PMC + 20(r9)
1454 mfspr r6, SPRN_SPMC1
1455 mfspr r7, SPRN_SPMC2
1456 mfspr r8, SPRN_MMCRS
1457 std r5, VCPU_SIER(r9)
1458 stw r6, VCPU_PMC + 24(r9)
1459 stw r7, VCPU_PMC + 28(r9)
1460 std r8, VCPU_MMCR + 32(r9)
1462 mtspr SPRN_MMCRS, r4
1463 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1471 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1474 kvmhv_do_exit: /* r12 = trap, r13 = paca */
1476 * POWER7/POWER8 guest -> host partition switch code.
1477 * We don't have to lock against tlbies but we do
1478 * have to coordinate the hardware threads.
1480 /* Increment the threads-exiting-guest count in the 0xff00
1481 bits of vcore->entry_exit_count */
1482 ld r5,HSTATE_KVM_VCORE(r13)
1483 addi r6,r5,VCORE_ENTRY_EXIT
1488 isync /* order stwcx. vs. reading napping_threads */
1491 * At this point we have an interrupt that we have to pass
1492 * up to the kernel or qemu; we can't handle it in real mode.
1493 * Thus we have to do a partition switch, so we have to
1494 * collect the other threads, if we are the first thread
1495 * to take an interrupt. To do this, we set the HDEC to 0,
1496 * which causes an HDEC interrupt in all threads within 2ns
1497 * because the HDEC register is shared between all 4 threads.
1498 * However, we don't need to bother if this is an HDEC
1499 * interrupt, since the other threads will already be on their
1500 * way here in that case.
1502 cmpwi r3,0x100 /* Are we the first here? */
1504 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1510 * Send an IPI to any napping threads, since an HDEC interrupt
1511 * doesn't wake CPUs up from nap.
1513 lwz r3,VCORE_NAPPING_THREADS(r5)
1514 lbz r4,HSTATE_PTID(r13)
1517 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1519 /* Order entry/exit update vs. IPIs */
1521 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1525 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1528 stbcix r0,r7,r8 /* trigger the IPI */
1530 addi r6,r6,PACA_SIZE
1533 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1536 kvmhv_switch_to_host:
1537 /* Secondary threads wait for primary to do partition switch */
1538 43: ld r5,HSTATE_KVM_VCORE(r13)
1539 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1540 lbz r3,HSTATE_PTID(r13)
1544 13: lbz r3,VCORE_IN_GUEST(r5)
1550 /* Primary thread waits for all the secondaries to exit guest */
1551 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1558 /* Primary thread switches back to host partition */
1559 ld r6,KVM_HOST_SDR1(r4)
1560 lwz r7,KVM_HOST_LPID(r4)
1561 li r8,LPID_RSVD /* switch to reserved LPID */
1564 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1569 /* DPDES is shared between threads */
1570 mfspr r7, SPRN_DPDES
1571 std r7, VCORE_DPDES(r5)
1572 /* clear DPDES so we don't get guest doorbells in the host */
1574 mtspr SPRN_DPDES, r8
1575 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1577 /* Subtract timebase offset from timebase */
1578 ld r8,VCORE_TB_OFFSET(r5)
1581 mftb r6 /* current guest timebase */
1583 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1584 mftb r7 /* check if lower 24 bits overflowed */
1589 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1593 17: ld r0, VCORE_PCR(r5)
1599 /* Signal secondary CPUs to continue */
1600 stb r0,VCORE_IN_GUEST(r5)
1601 lis r8,0x7fff /* MAX_INT@h */
1604 16: ld r8,KVM_HOST_LPCR(r4)
1608 /* load host SLB entries */
1609 ld r8,PACA_SLBSHADOWPTR(r13)
1611 .rept SLB_NUM_BOLTED
1612 li r3, SLBSHADOW_SAVEAREA
1616 andis. r7,r5,SLB_ESID_V@h
1622 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1623 /* Finish timing, if we have a vcpu */
1624 ld r4, HSTATE_KVM_VCPU(r13)
1628 bl kvmhv_accumulate_time
1631 /* Unset guest mode */
1632 li r0, KVM_GUEST_MODE_NONE
1633 stb r0, HSTATE_IN_GUEST(r13)
1635 ld r0, 112+PPC_LR_STKOFF(r1)
1641 * Check whether an HDSI is an HPTE not found fault or something else.
1642 * If it is an HPTE not found fault that is due to the guest accessing
1643 * a page that they have mapped but which we have paged out, then
1644 * we continue on with the guest exit path. In all other cases,
1645 * reflect the HDSI to the guest as a DSI.
1649 mfspr r6, SPRN_HDSISR
1650 /* HPTE not found fault or protection fault? */
1651 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1652 beq 1f /* if not, send it to the guest */
1653 andi. r0, r11, MSR_DR /* data relocation enabled? */
1656 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1657 bne 1f /* if no SLB entry found */
1658 4: std r4, VCPU_FAULT_DAR(r9)
1659 stw r6, VCPU_FAULT_DSISR(r9)
1661 /* Search the hash table. */
1662 mr r3, r9 /* vcpu pointer */
1663 li r7, 1 /* data fault */
1664 bl kvmppc_hpte_hv_fault
1665 ld r9, HSTATE_KVM_VCPU(r13)
1667 ld r11, VCPU_MSR(r9)
1668 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1669 cmpdi r3, 0 /* retry the instruction */
1671 cmpdi r3, -1 /* handle in kernel mode */
1673 cmpdi r3, -2 /* MMIO emulation; need instr word */
1676 /* Synthesize a DSI for the guest */
1677 ld r4, VCPU_FAULT_DAR(r9)
1679 1: mtspr SPRN_DAR, r4
1680 mtspr SPRN_DSISR, r6
1681 mtspr SPRN_SRR0, r10
1682 mtspr SPRN_SRR1, r11
1683 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1684 bl kvmppc_msr_interrupt
1685 fast_interrupt_c_return:
1686 6: ld r7, VCPU_CTR(r9)
1687 lwz r8, VCPU_XER(r9)
1693 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1694 ld r5, KVM_VRMA_SLB_V(r5)
1697 /* If this is for emulated MMIO, load the instruction word */
1698 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1700 /* Set guest mode to 'jump over instruction' so if lwz faults
1701 * we'll just continue at the next IP. */
1702 li r0, KVM_GUEST_MODE_SKIP
1703 stb r0, HSTATE_IN_GUEST(r13)
1705 /* Do the access with MSR:DR enabled */
1707 ori r4, r3, MSR_DR /* Enable paging for data */
1712 /* Store the result */
1713 stw r8, VCPU_LAST_INST(r9)
1715 /* Unset guest mode. */
1716 li r0, KVM_GUEST_MODE_HOST_HV
1717 stb r0, HSTATE_IN_GUEST(r13)
1721 * Similarly for an HISI, reflect it to the guest as an ISI unless
1722 * it is an HPTE not found fault for a page that we have paged out.
1725 andis. r0, r11, SRR1_ISI_NOPT@h
1727 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1730 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1731 bne 1f /* if no SLB entry found */
1733 /* Search the hash table. */
1734 mr r3, r9 /* vcpu pointer */
1737 li r7, 0 /* instruction fault */
1738 bl kvmppc_hpte_hv_fault
1739 ld r9, HSTATE_KVM_VCPU(r13)
1741 ld r11, VCPU_MSR(r9)
1742 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1743 cmpdi r3, 0 /* retry the instruction */
1744 beq fast_interrupt_c_return
1745 cmpdi r3, -1 /* handle in kernel mode */
1748 /* Synthesize an ISI for the guest */
1750 1: mtspr SPRN_SRR0, r10
1751 mtspr SPRN_SRR1, r11
1752 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1753 bl kvmppc_msr_interrupt
1754 b fast_interrupt_c_return
1756 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1757 ld r5, KVM_VRMA_SLB_V(r6)
1761 * Try to handle an hcall in real mode.
1762 * Returns to the guest if we handle it, or continues on up to
1763 * the kernel if we can't (i.e. if we don't have a handler for
1764 * it, or if the handler returns H_TOO_HARD).
1766 * r5 - r8 contain hcall args,
1767 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1769 hcall_try_real_mode:
1770 ld r3,VCPU_GPR(R3)(r9)
1772 /* sc 1 from userspace - reflect to guest syscall */
1773 bne sc_1_fast_return
1775 cmpldi r3,hcall_real_table_end - hcall_real_table
1777 /* See if this hcall is enabled for in-kernel handling */
1779 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1780 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1782 ld r0, KVM_ENABLED_HCALLS(r4)
1783 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1787 /* Get pointer to handler, if any, and call it */
1788 LOAD_REG_ADDR(r4, hcall_real_table)
1794 mr r3,r9 /* get vcpu pointer */
1795 ld r4,VCPU_GPR(R4)(r9)
1798 beq hcall_real_fallback
1799 ld r4,HSTATE_KVM_VCPU(r13)
1800 std r3,VCPU_GPR(R3)(r4)
1808 li r10, BOOK3S_INTERRUPT_SYSCALL
1809 bl kvmppc_msr_interrupt
1813 /* We've attempted a real mode hcall, but it's punted it back
1814 * to userspace. We need to restore some clobbered volatiles
1815 * before resuming the pass-it-to-qemu path */
1816 hcall_real_fallback:
1817 li r12,BOOK3S_INTERRUPT_SYSCALL
1818 ld r9, HSTATE_KVM_VCPU(r13)
1822 .globl hcall_real_table
1824 .long 0 /* 0 - unused */
1825 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1826 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1827 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1828 .long 0 /* 0x10 - H_CLEAR_MOD */
1829 .long 0 /* 0x14 - H_CLEAR_REF */
1830 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1831 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1832 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1833 .long 0 /* 0x24 - H_SET_SPRG0 */
1834 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1849 #ifdef CONFIG_KVM_XICS
1850 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1851 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1852 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1853 .long 0 /* 0x70 - H_IPOLL */
1854 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1856 .long 0 /* 0x64 - H_EOI */
1857 .long 0 /* 0x68 - H_CPPR */
1858 .long 0 /* 0x6c - H_IPI */
1859 .long 0 /* 0x70 - H_IPOLL */
1860 .long 0 /* 0x74 - H_XIRR */
1888 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1889 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1905 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1909 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2024 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2025 .globl hcall_real_table_end
2026 hcall_real_table_end:
2028 _GLOBAL(kvmppc_h_set_xdabr)
2029 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2031 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2034 6: li r3, H_PARAMETER
2037 _GLOBAL(kvmppc_h_set_dabr)
2038 li r5, DABRX_USER | DABRX_KERNEL
2042 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2043 std r4,VCPU_DABR(r3)
2044 stw r5, VCPU_DABRX(r3)
2045 mtspr SPRN_DABRX, r5
2046 /* Work around P7 bug where DABR can get corrupted on mtspr */
2047 1: mtspr SPRN_DABR,r4
2055 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2056 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2057 rlwimi r5, r4, 1, DAWRX_WT
2059 std r4, VCPU_DAWR(r3)
2060 std r5, VCPU_DAWRX(r3)
2062 mtspr SPRN_DAWRX, r5
2066 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2068 std r11,VCPU_MSR(r3)
2070 stb r0,VCPU_CEDED(r3)
2071 sync /* order setting ceded vs. testing prodded */
2072 lbz r5,VCPU_PRODDED(r3)
2074 bne kvm_cede_prodded
2075 li r0,0 /* set trap to 0 to say hcall is handled */
2076 stw r0,VCPU_TRAP(r3)
2078 std r0,VCPU_GPR(R3)(r3)
2081 * Set our bit in the bitmask of napping threads unless all the
2082 * other threads are already napping, in which case we send this
2085 ld r5,HSTATE_KVM_VCORE(r13)
2086 lbz r6,HSTATE_PTID(r13)
2087 lwz r8,VCORE_ENTRY_EXIT(r5)
2091 addi r6,r5,VCORE_NAPPING_THREADS
2099 /* order napping_threads update vs testing entry_exit_count */
2102 stb r0,HSTATE_NAPPING(r13)
2103 lwz r7,VCORE_ENTRY_EXIT(r5)
2105 bge 33f /* another thread already exiting */
2108 * Although not specifically required by the architecture, POWER7
2109 * preserves the following registers in nap mode, even if an SMT mode
2110 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2111 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2113 /* Save non-volatile GPRs */
2114 std r14, VCPU_GPR(R14)(r3)
2115 std r15, VCPU_GPR(R15)(r3)
2116 std r16, VCPU_GPR(R16)(r3)
2117 std r17, VCPU_GPR(R17)(r3)
2118 std r18, VCPU_GPR(R18)(r3)
2119 std r19, VCPU_GPR(R19)(r3)
2120 std r20, VCPU_GPR(R20)(r3)
2121 std r21, VCPU_GPR(R21)(r3)
2122 std r22, VCPU_GPR(R22)(r3)
2123 std r23, VCPU_GPR(R23)(r3)
2124 std r24, VCPU_GPR(R24)(r3)
2125 std r25, VCPU_GPR(R25)(r3)
2126 std r26, VCPU_GPR(R26)(r3)
2127 std r27, VCPU_GPR(R27)(r3)
2128 std r28, VCPU_GPR(R28)(r3)
2129 std r29, VCPU_GPR(R29)(r3)
2130 std r30, VCPU_GPR(R30)(r3)
2131 std r31, VCPU_GPR(R31)(r3)
2137 * Set DEC to the smaller of DEC and HDEC, so that we wake
2138 * no later than the end of our timeslice (HDEC interrupts
2139 * don't wake us from nap).
2148 /* save expiry time of guest decrementer */
2151 ld r4, HSTATE_KVM_VCPU(r13)
2152 ld r5, HSTATE_KVM_VCORE(r13)
2153 ld r6, VCORE_TB_OFFSET(r5)
2154 subf r3, r6, r3 /* convert to host TB value */
2155 std r3, VCPU_DEC_EXPIRES(r4)
2157 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2158 ld r4, HSTATE_KVM_VCPU(r13)
2159 addi r3, r4, VCPU_TB_CEDE
2160 bl kvmhv_accumulate_time
2163 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2166 * Take a nap until a decrementer or external or doobell interrupt
2167 * occurs, with PECE1 and PECE0 set in LPCR.
2168 * On POWER8, if we are ceding, also set PECEDP.
2169 * Also clear the runlatch bit before napping.
2172 mfspr r0, SPRN_CTRLF
2174 mtspr SPRN_CTRLT, r0
2177 stb r0,HSTATE_HWTHREAD_REQ(r13)
2179 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2181 rlwimi r5, r3, 0, LPCR_PECEDP
2182 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2186 std r0, HSTATE_SCRATCH0(r13)
2188 ld r0, HSTATE_SCRATCH0(r13)
2200 /* get vcpu pointer */
2201 ld r4, HSTATE_KVM_VCPU(r13)
2203 /* Woken by external or decrementer interrupt */
2204 ld r1, HSTATE_HOST_R1(r13)
2206 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2207 addi r3, r4, VCPU_TB_RMINTR
2208 bl kvmhv_accumulate_time
2211 /* load up FP state */
2214 /* Restore guest decrementer */
2215 ld r3, VCPU_DEC_EXPIRES(r4)
2216 ld r5, HSTATE_KVM_VCORE(r13)
2217 ld r6, VCORE_TB_OFFSET(r5)
2218 add r3, r3, r6 /* convert host TB to guest TB value */
2224 ld r14, VCPU_GPR(R14)(r4)
2225 ld r15, VCPU_GPR(R15)(r4)
2226 ld r16, VCPU_GPR(R16)(r4)
2227 ld r17, VCPU_GPR(R17)(r4)
2228 ld r18, VCPU_GPR(R18)(r4)
2229 ld r19, VCPU_GPR(R19)(r4)
2230 ld r20, VCPU_GPR(R20)(r4)
2231 ld r21, VCPU_GPR(R21)(r4)
2232 ld r22, VCPU_GPR(R22)(r4)
2233 ld r23, VCPU_GPR(R23)(r4)
2234 ld r24, VCPU_GPR(R24)(r4)
2235 ld r25, VCPU_GPR(R25)(r4)
2236 ld r26, VCPU_GPR(R26)(r4)
2237 ld r27, VCPU_GPR(R27)(r4)
2238 ld r28, VCPU_GPR(R28)(r4)
2239 ld r29, VCPU_GPR(R29)(r4)
2240 ld r30, VCPU_GPR(R30)(r4)
2241 ld r31, VCPU_GPR(R31)(r4)
2243 /* Check the wake reason in SRR1 to see why we got here */
2244 bl kvmppc_check_wake_reason
2246 /* clear our bit in vcore->napping_threads */
2247 34: ld r5,HSTATE_KVM_VCORE(r13)
2248 lbz r7,HSTATE_PTID(r13)
2251 addi r6,r5,VCORE_NAPPING_THREADS
2257 stb r0,HSTATE_NAPPING(r13)
2259 /* See if the wake reason means we need to exit */
2260 stw r12, VCPU_TRAP(r4)
2265 /* see if any other thread is already exiting */
2266 lwz r0,VCORE_ENTRY_EXIT(r5)
2270 b kvmppc_cede_reentry /* if not go back to guest */
2272 /* cede when already previously prodded case */
2275 stb r0,VCPU_PRODDED(r3)
2276 sync /* order testing prodded vs. clearing ceded */
2277 stb r0,VCPU_CEDED(r3)
2281 /* we've ceded but we want to give control to the host */
2283 b hcall_real_fallback
2285 /* Try to handle a machine check in real mode */
2286 machine_check_realmode:
2287 mr r3, r9 /* get vcpu pointer */
2288 bl kvmppc_realmode_machine_check
2290 cmpdi r3, 0 /* Did we handle MCE ? */
2291 ld r9, HSTATE_KVM_VCPU(r13)
2292 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2294 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2295 * machine check interrupt (set HSRR0 to 0x200). And for handled
2296 * errors (no-fatal), just go back to guest execution with current
2297 * HSRR0 instead of exiting guest. This new approach will inject
2298 * machine check to guest for fatal error causing guest to crash.
2300 * The old code used to return to host for unhandled errors which
2301 * was causing guest to hang with soft lockups inside guest and
2302 * makes it difficult to recover guest instance.
2305 ld r11, VCPU_MSR(r9)
2306 bne 2f /* Continue guest execution. */
2307 /* If not, deliver a machine check. SRR0/1 are already set */
2308 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2309 ld r11, VCPU_MSR(r9)
2310 bl kvmppc_msr_interrupt
2311 2: b fast_interrupt_c_return
2314 * Check the reason we woke from nap, and take appropriate action.
2316 * 0 if nothing needs to be done
2317 * 1 if something happened that needs to be handled by the host
2318 * -1 if there was a guest wakeup (IPI)
2320 * Also sets r12 to the interrupt vector for any interrupt that needs
2321 * to be handled now by the host (0x500 for external interrupt), or zero.
2322 * Modifies r0, r6, r7, r8.
2324 kvmppc_check_wake_reason:
2327 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2329 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2330 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2331 cmpwi r6, 8 /* was it an external interrupt? */
2332 li r12, BOOK3S_INTERRUPT_EXTERNAL
2333 beq kvmppc_read_intr /* if so, see what it was */
2336 cmpwi r6, 6 /* was it the decrementer? */
2339 cmpwi r6, 5 /* privileged doorbell? */
2341 cmpwi r6, 3 /* hypervisor doorbell? */
2343 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2344 li r3, 1 /* anything else, return 1 */
2347 /* hypervisor doorbell */
2348 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2353 * Determine what sort of external interrupt is pending (if any).
2355 * 0 if no interrupt is pending
2356 * 1 if an interrupt is pending that needs to be handled by the host
2357 * -1 if there was a guest wakeup IPI (which has now been cleared)
2358 * Modifies r0, r6, r7, r8, returns value in r3.
2361 /* see if a host IPI is pending */
2363 lbz r0, HSTATE_HOST_IPI(r13)
2367 /* Now read the interrupt from the ICP */
2368 ld r6, HSTATE_XICS_PHYS(r13)
2374 * Save XIRR for later. Since we get in in reverse endian on LE
2375 * systems, save it byte reversed and fetch it back in host endian.
2377 li r3, HSTATE_SAVED_XIRR
2379 #ifdef __LITTLE_ENDIAN__
2380 lwz r3, HSTATE_SAVED_XIRR(r13)
2384 rlwinm. r3, r3, 0, 0xffffff
2386 beq 1f /* if nothing pending in the ICP */
2388 /* We found something in the ICP...
2390 * If it's not an IPI, stash it in the PACA and return to
2391 * the host, we don't (yet) handle directing real external
2392 * interrupts directly to the guest
2394 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2397 /* It's an IPI, clear the MFRR and EOI it */
2400 stbcix r3, r6, r8 /* clear the IPI */
2401 stwcix r0, r6, r7 /* EOI it */
2404 /* We need to re-check host IPI now in case it got set in the
2405 * meantime. If it's clear, we bounce the interrupt to the
2408 lbz r0, HSTATE_HOST_IPI(r13)
2412 /* OK, it's an IPI for us */
2416 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2417 * the PACA earlier, it will be picked up by the host ICP driver
2422 43: /* We raced with the host, we need to resend that IPI, bummer */
2424 stbcix r0, r6, r8 /* set the IPI */
2430 * Save away FP, VMX and VSX registers.
2432 * N.B. r30 and r31 are volatile across this function,
2433 * thus it is not callable from C.
2440 #ifdef CONFIG_ALTIVEC
2442 oris r8,r8,MSR_VEC@h
2443 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2447 oris r8,r8,MSR_VSX@h
2448 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2451 addi r3,r3,VCPU_FPRS
2453 #ifdef CONFIG_ALTIVEC
2455 addi r3,r31,VCPU_VRS
2457 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2459 mfspr r6,SPRN_VRSAVE
2460 stw r6,VCPU_VRSAVE(r31)
2465 * Load up FP, VMX and VSX registers
2467 * N.B. r30 and r31 are volatile across this function,
2468 * thus it is not callable from C.
2475 #ifdef CONFIG_ALTIVEC
2477 oris r8,r8,MSR_VEC@h
2478 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2482 oris r8,r8,MSR_VSX@h
2483 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2486 addi r3,r4,VCPU_FPRS
2488 #ifdef CONFIG_ALTIVEC
2490 addi r3,r31,VCPU_VRS
2492 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2494 lwz r7,VCPU_VRSAVE(r31)
2495 mtspr SPRN_VRSAVE,r7
2501 * We come here if we get any exception or interrupt while we are
2502 * executing host real mode code while in guest MMU context.
2503 * For now just spin, but we should do something better.
2505 kvmppc_bad_host_intr:
2509 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2510 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2511 * r11 has the guest MSR value (in/out)
2512 * r9 has a vcpu pointer (in)
2513 * r0 is used as a scratch register
2515 kvmppc_msr_interrupt:
2516 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2517 cmpwi r0, 2 /* Check if we are in transactional state.. */
2518 ld r11, VCPU_INTR_MSR(r9)
2520 /* ... if transactional, change to suspended */
2522 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2526 * This works around a hardware bug on POWER8E processors, where
2527 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2528 * performance monitor interrupt. Instead, when we need to have
2529 * an interrupt pending, we have to arrange for a counter to overflow.
2533 mtspr SPRN_MMCR2, r3
2534 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2535 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2536 mtspr SPRN_MMCR0, r3
2543 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2545 * Start timing an activity
2546 * r3 = pointer to time accumulation struct, r4 = vcpu
2549 ld r5, HSTATE_KVM_VCORE(r13)
2550 lbz r6, VCORE_IN_GUEST(r5)
2552 beq 5f /* if in guest, need to */
2553 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2556 std r3, VCPU_CUR_ACTIVITY(r4)
2557 std r5, VCPU_ACTIVITY_START(r4)
2561 * Accumulate time to one activity and start another.
2562 * r3 = pointer to new time accumulation struct, r4 = vcpu
2564 kvmhv_accumulate_time:
2565 ld r5, HSTATE_KVM_VCORE(r13)
2566 lbz r8, VCORE_IN_GUEST(r5)
2568 beq 4f /* if in guest, need to */
2569 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2570 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2571 ld r6, VCPU_ACTIVITY_START(r4)
2572 std r3, VCPU_CUR_ACTIVITY(r4)
2575 std r7, VCPU_ACTIVITY_START(r4)
2579 ld r8, TAS_SEQCOUNT(r5)
2582 std r8, TAS_SEQCOUNT(r5)
2584 ld r7, TAS_TOTAL(r5)
2586 std r7, TAS_TOTAL(r5)
2592 3: std r3, TAS_MIN(r5)
2598 std r8, TAS_SEQCOUNT(r5)