2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
54 /* Read the instruction */
55 insn.word = kvm_get_inst((u32 *) epc, vcpu);
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
63 switch (insn.r_format.func) {
65 arch->gprs[insn.r_format.rd] = epc + 8;
68 nextpc = arch->gprs[insn.r_format.rs];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn.i_format.rt) {
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
121 dspcontrol = rddsp(0x01);
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
132 /* These are unconditional and in j_format. */
134 arch->gprs[31] = instpc + 8;
139 epc |= (insn.j_format.target << 2);
143 /* These are conditional and in i_format. */
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 case blez_op: /* POP06 */
165 #ifndef CONFIG_CPU_MIPSR6
166 case blezl_op: /* removed in R6 */
168 if (insn.i_format.rt != 0)
170 if ((long)arch->gprs[insn.i_format.rs] <= 0)
171 epc = epc + 4 + (insn.i_format.simmediate << 2);
177 case bgtz_op: /* POP07 */
178 #ifndef CONFIG_CPU_MIPSR6
179 case bgtzl_op: /* removed in R6 */
181 if (insn.i_format.rt != 0)
183 if ((long)arch->gprs[insn.i_format.rs] > 0)
184 epc = epc + 4 + (insn.i_format.simmediate << 2);
190 /* And now the FPA/cp1 branch instructions. */
192 kvm_err("%s: unsupported cop1_op\n", __func__);
195 #ifdef CONFIG_CPU_MIPSR6
196 /* R6 added the following compact branches with forbidden slots */
197 case blezl_op: /* POP26 */
198 case bgtzl_op: /* POP27 */
199 /* only rt == 0 isn't compact branch */
200 if (insn.i_format.rt != 0)
205 /* only rs == rt == 0 is reserved, rest are compact branches */
206 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
211 /* only rs == 0 isn't compact branch */
212 if (insn.i_format.rs != 0)
217 * If we've hit an exception on the forbidden slot, then
218 * the branch must not have been taken.
225 /* Compact branches not supported before R6 */
233 kvm_err("%s: unaligned epc\n", __func__);
237 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
241 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
243 unsigned long branch_pc;
244 enum emulation_result er = EMULATE_DONE;
246 if (cause & CAUSEF_BD) {
247 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
248 if (branch_pc == KVM_INVALID_INST) {
251 vcpu->arch.pc = branch_pc;
252 kvm_debug("BD update_pc(): New PC: %#lx\n",
258 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
264 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
265 * @vcpu: Virtual CPU.
267 * Returns: 1 if the CP0_Count timer is disabled by either the guest
268 * CP0_Cause.DC bit or the count_ctl.DC bit.
269 * 0 otherwise (in which case CP0_Count timer is running).
271 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
273 struct mips_coproc *cop0 = vcpu->arch.cop0;
275 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
276 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
280 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
282 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
284 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
286 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
291 now_ns = ktime_to_ns(now);
292 delta = now_ns + vcpu->arch.count_dyn_bias;
294 if (delta >= vcpu->arch.count_period) {
295 /* If delta is out of safe range the bias needs adjusting */
296 periods = div64_s64(now_ns, vcpu->arch.count_period);
297 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
298 /* Recalculate delta with new bias */
299 delta = now_ns + vcpu->arch.count_dyn_bias;
303 * We've ensured that:
304 * delta < count_period
306 * Therefore the intermediate delta*count_hz will never overflow since
307 * at the boundary condition:
308 * delta = count_period
309 * delta = NSEC_PER_SEC * 2^32 / count_hz
310 * delta * count_hz = NSEC_PER_SEC * 2^32
312 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
316 * kvm_mips_count_time() - Get effective current time.
317 * @vcpu: Virtual CPU.
319 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
320 * except when the master disable bit is set in count_ctl, in which case it is
321 * count_resume, i.e. the time that the count was disabled.
323 * Returns: Effective monotonic ktime for CP0_Count.
325 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
327 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
328 return vcpu->arch.count_resume;
334 * kvm_mips_read_count_running() - Read the current count value as if running.
335 * @vcpu: Virtual CPU.
336 * @now: Kernel time to read CP0_Count at.
338 * Returns the current guest CP0_Count register at time @now and handles if the
339 * timer interrupt is pending and hasn't been handled yet.
341 * Returns: The current value of the guest CP0_Count register.
343 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
345 struct mips_coproc *cop0 = vcpu->arch.cop0;
346 ktime_t expires, threshold;
350 /* Calculate the biased and scaled guest CP0_Count */
351 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
352 compare = kvm_read_c0_guest_compare(cop0);
355 * Find whether CP0_Count has reached the closest timer interrupt. If
356 * not, we shouldn't inject it.
358 if ((s32)(count - compare) < 0)
362 * The CP0_Count we're going to return has already reached the closest
363 * timer interrupt. Quickly check if it really is a new interrupt by
364 * looking at whether the interval until the hrtimer expiry time is
365 * less than 1/4 of the timer period.
367 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
368 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
369 if (ktime_before(expires, threshold)) {
371 * Cancel it while we handle it so there's no chance of
372 * interference with the timeout handler.
374 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
376 /* Nothing should be waiting on the timeout */
377 kvm_mips_callbacks->queue_timer_int(vcpu);
380 * Restart the timer if it was running based on the expiry time
381 * we read, so that we don't push it back 2 periods.
384 expires = ktime_add_ns(expires,
385 vcpu->arch.count_period);
386 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
395 * kvm_mips_read_count() - Read the current count value.
396 * @vcpu: Virtual CPU.
398 * Read the current guest CP0_Count value, taking into account whether the timer
401 * Returns: The current guest CP0_Count value.
403 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
405 struct mips_coproc *cop0 = vcpu->arch.cop0;
407 /* If count disabled just read static copy of count */
408 if (kvm_mips_count_disabled(vcpu))
409 return kvm_read_c0_guest_count(cop0);
411 return kvm_mips_read_count_running(vcpu, ktime_get());
415 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
416 * @vcpu: Virtual CPU.
417 * @count: Output pointer for CP0_Count value at point of freeze.
419 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
420 * at the point it was frozen. It is guaranteed that any pending interrupts at
421 * the point it was frozen are handled, and none after that point.
423 * This is useful where the time/CP0_Count is needed in the calculation of the
426 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
428 * Returns: The ktime at the point of freeze.
430 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
434 /* stop hrtimer before finding time */
435 hrtimer_cancel(&vcpu->arch.comparecount_timer);
438 /* find count at this point and handle pending hrtimer */
439 *count = kvm_mips_read_count_running(vcpu, now);
445 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
446 * @vcpu: Virtual CPU.
447 * @now: ktime at point of resume.
448 * @count: CP0_Count at point of resume.
450 * Resumes the timer and updates the timer expiry based on @now and @count.
451 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
452 * parameters need to be changed.
454 * It is guaranteed that a timer interrupt immediately after resume will be
455 * handled, but not if CP_Compare is exactly at @count. That case is already
456 * handled by kvm_mips_freeze_timer().
458 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
460 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
461 ktime_t now, u32 count)
463 struct mips_coproc *cop0 = vcpu->arch.cop0;
468 /* Calculate timeout (wrap 0 to 2^32) */
469 compare = kvm_read_c0_guest_compare(cop0);
470 delta = (u64)(u32)(compare - count - 1) + 1;
471 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
472 expire = ktime_add_ns(now, delta);
474 /* Update hrtimer to use new timeout */
475 hrtimer_cancel(&vcpu->arch.comparecount_timer);
476 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
480 * kvm_mips_write_count() - Modify the count and update timer.
481 * @vcpu: Virtual CPU.
482 * @count: Guest CP0_Count value to set.
484 * Sets the CP0_Count value and updates the timer accordingly.
486 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
488 struct mips_coproc *cop0 = vcpu->arch.cop0;
492 now = kvm_mips_count_time(vcpu);
493 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
495 if (kvm_mips_count_disabled(vcpu))
496 /* The timer's disabled, adjust the static count */
497 kvm_write_c0_guest_count(cop0, count);
500 kvm_mips_resume_hrtimer(vcpu, now, count);
504 * kvm_mips_init_count() - Initialise timer.
505 * @vcpu: Virtual CPU.
507 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
508 * it going if it's enabled.
510 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
513 vcpu->arch.count_hz = 100*1000*1000;
514 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
515 vcpu->arch.count_hz);
516 vcpu->arch.count_dyn_bias = 0;
519 kvm_mips_write_count(vcpu, 0);
523 * kvm_mips_set_count_hz() - Update the frequency of the timer.
524 * @vcpu: Virtual CPU.
525 * @count_hz: Frequency of CP0_Count timer in Hz.
527 * Change the frequency of the CP0_Count timer. This is done atomically so that
528 * CP0_Count is continuous and no timer interrupt is lost.
530 * Returns: -EINVAL if @count_hz is out of range.
533 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
535 struct mips_coproc *cop0 = vcpu->arch.cop0;
540 /* ensure the frequency is in a sensible range... */
541 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
543 /* ... and has actually changed */
544 if (vcpu->arch.count_hz == count_hz)
547 /* Safely freeze timer so we can keep it continuous */
548 dc = kvm_mips_count_disabled(vcpu);
550 now = kvm_mips_count_time(vcpu);
551 count = kvm_read_c0_guest_count(cop0);
553 now = kvm_mips_freeze_hrtimer(vcpu, &count);
556 /* Update the frequency */
557 vcpu->arch.count_hz = count_hz;
558 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
559 vcpu->arch.count_dyn_bias = 0;
561 /* Calculate adjusted bias so dynamic count is unchanged */
562 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
564 /* Update and resume hrtimer */
566 kvm_mips_resume_hrtimer(vcpu, now, count);
571 * kvm_mips_write_compare() - Modify compare and update timer.
572 * @vcpu: Virtual CPU.
573 * @compare: New CP0_Compare value.
574 * @ack: Whether to acknowledge timer interrupt.
576 * Update CP0_Compare to a new value and update the timeout.
577 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
578 * any pending timer interrupt is preserved.
580 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
582 struct mips_coproc *cop0 = vcpu->arch.cop0;
584 u32 old_compare = kvm_read_c0_guest_compare(cop0);
588 /* if unchanged, must just be an ack */
589 if (old_compare == compare) {
592 kvm_mips_callbacks->dequeue_timer_int(vcpu);
593 kvm_write_c0_guest_compare(cop0, compare);
597 /* freeze_hrtimer() takes care of timer interrupts <= count */
598 dc = kvm_mips_count_disabled(vcpu);
600 now = kvm_mips_freeze_hrtimer(vcpu, &count);
603 kvm_mips_callbacks->dequeue_timer_int(vcpu);
605 kvm_write_c0_guest_compare(cop0, compare);
607 /* resume_hrtimer() takes care of timer interrupts > count */
609 kvm_mips_resume_hrtimer(vcpu, now, count);
613 * kvm_mips_count_disable() - Disable count.
614 * @vcpu: Virtual CPU.
616 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
617 * time will be handled but not after.
619 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
620 * count_ctl.DC has been set (count disabled).
622 * Returns: The time that the timer was stopped.
624 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
626 struct mips_coproc *cop0 = vcpu->arch.cop0;
631 hrtimer_cancel(&vcpu->arch.comparecount_timer);
633 /* Set the static count from the dynamic count, handling pending TI */
635 count = kvm_mips_read_count_running(vcpu, now);
636 kvm_write_c0_guest_count(cop0, count);
642 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
643 * @vcpu: Virtual CPU.
645 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
646 * before the final stop time will be handled if the timer isn't disabled by
647 * count_ctl.DC, but not after.
649 * Assumes CP0_Cause.DC is clear (count enabled).
651 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
653 struct mips_coproc *cop0 = vcpu->arch.cop0;
655 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
656 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
657 kvm_mips_count_disable(vcpu);
661 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
662 * @vcpu: Virtual CPU.
664 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
665 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
666 * potentially before even returning, so the caller should be careful with
667 * ordering of CP0_Cause modifications so as not to lose it.
669 * Assumes CP0_Cause.DC is set (count disabled).
671 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
673 struct mips_coproc *cop0 = vcpu->arch.cop0;
676 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
679 * Set the dynamic count to match the static count.
680 * This starts the hrtimer if count_ctl.DC allows it.
681 * Otherwise it conveniently updates the biases.
683 count = kvm_read_c0_guest_count(cop0);
684 kvm_mips_write_count(vcpu, count);
688 * kvm_mips_set_count_ctl() - Update the count control KVM register.
689 * @vcpu: Virtual CPU.
690 * @count_ctl: Count control register new value.
692 * Set the count control KVM register. The timer is updated accordingly.
694 * Returns: -EINVAL if reserved bits are set.
697 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
699 struct mips_coproc *cop0 = vcpu->arch.cop0;
700 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
705 /* Only allow defined bits to be changed */
706 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
709 /* Apply new value */
710 vcpu->arch.count_ctl = count_ctl;
712 /* Master CP0_Count disable */
713 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
714 /* Is CP0_Cause.DC already disabling CP0_Count? */
715 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
716 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
717 /* Just record the current time */
718 vcpu->arch.count_resume = ktime_get();
719 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
720 /* disable timer and record current time */
721 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
724 * Calculate timeout relative to static count at resume
725 * time (wrap 0 to 2^32).
727 count = kvm_read_c0_guest_count(cop0);
728 compare = kvm_read_c0_guest_compare(cop0);
729 delta = (u64)(u32)(compare - count - 1) + 1;
730 delta = div_u64(delta * NSEC_PER_SEC,
731 vcpu->arch.count_hz);
732 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
734 /* Handle pending interrupt */
736 if (ktime_compare(now, expire) >= 0)
737 /* Nothing should be waiting on the timeout */
738 kvm_mips_callbacks->queue_timer_int(vcpu);
740 /* Resume hrtimer without changing bias */
741 count = kvm_mips_read_count_running(vcpu, now);
742 kvm_mips_resume_hrtimer(vcpu, now, count);
750 * kvm_mips_set_count_resume() - Update the count resume KVM register.
751 * @vcpu: Virtual CPU.
752 * @count_resume: Count resume register new value.
754 * Set the count resume KVM register.
756 * Returns: -EINVAL if out of valid range (0..now).
759 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
762 * It doesn't make sense for the resume time to be in the future, as it
763 * would be possible for the next interrupt to be more than a full
764 * period in the future.
766 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
769 vcpu->arch.count_resume = ns_to_ktime(count_resume);
774 * kvm_mips_count_timeout() - Push timer forward on timeout.
775 * @vcpu: Virtual CPU.
777 * Handle an hrtimer event by push the hrtimer forward a period.
779 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
781 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
783 /* Add the Count period to the current expiry time */
784 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
785 vcpu->arch.count_period);
786 return HRTIMER_RESTART;
789 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
791 struct mips_coproc *cop0 = vcpu->arch.cop0;
792 enum emulation_result er = EMULATE_DONE;
794 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
795 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
796 kvm_read_c0_guest_epc(cop0));
797 kvm_clear_c0_guest_status(cop0, ST0_EXL);
798 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
800 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
801 kvm_clear_c0_guest_status(cop0, ST0_ERL);
802 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
804 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
812 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
814 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
815 vcpu->arch.pending_exceptions);
817 ++vcpu->stat.wait_exits;
818 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
819 if (!vcpu->arch.pending_exceptions) {
821 kvm_vcpu_block(vcpu);
824 * We we are runnable, then definitely go off to user space to
825 * check if any I/O interrupts are pending.
827 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
828 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
829 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
837 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
838 * we can catch this, if things ever change
840 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
842 struct mips_coproc *cop0 = vcpu->arch.cop0;
843 unsigned long pc = vcpu->arch.pc;
845 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
850 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
851 * @vcpu: VCPU with changed mappings.
852 * @tlb: TLB entry being removed.
854 * This is called to indicate a single change in guest MMU mappings, so that we
855 * can arrange TLB flushes on this and other CPUs.
857 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
858 struct kvm_mips_tlb *tlb)
863 /* No need to flush for entries which are already invalid */
864 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
866 /* User address space doesn't need flushing for KSeg2/3 changes */
867 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
872 * Probe the shadow host TLB for the entry being overwritten, if one
873 * matches, invalidate it
875 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
877 /* Invalidate the whole ASID on other CPUs */
878 cpu = smp_processor_id();
879 for_each_possible_cpu(i) {
883 vcpu->arch.guest_user_asid[i] = 0;
884 vcpu->arch.guest_kernel_asid[i] = 0;
890 /* Write Guest TLB Entry @ Index */
891 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
893 struct mips_coproc *cop0 = vcpu->arch.cop0;
894 int index = kvm_read_c0_guest_index(cop0);
895 struct kvm_mips_tlb *tlb = NULL;
896 unsigned long pc = vcpu->arch.pc;
898 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
899 kvm_debug("%s: illegal index: %d\n", __func__, index);
900 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
901 pc, index, kvm_read_c0_guest_entryhi(cop0),
902 kvm_read_c0_guest_entrylo0(cop0),
903 kvm_read_c0_guest_entrylo1(cop0),
904 kvm_read_c0_guest_pagemask(cop0));
905 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
908 tlb = &vcpu->arch.guest_tlb[index];
910 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
912 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
913 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
914 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
915 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
917 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
918 pc, index, kvm_read_c0_guest_entryhi(cop0),
919 kvm_read_c0_guest_entrylo0(cop0),
920 kvm_read_c0_guest_entrylo1(cop0),
921 kvm_read_c0_guest_pagemask(cop0));
926 /* Write Guest TLB Entry @ Random Index */
927 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
929 struct mips_coproc *cop0 = vcpu->arch.cop0;
930 struct kvm_mips_tlb *tlb = NULL;
931 unsigned long pc = vcpu->arch.pc;
934 get_random_bytes(&index, sizeof(index));
935 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
937 tlb = &vcpu->arch.guest_tlb[index];
939 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
941 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
942 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
943 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
944 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
946 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
947 pc, index, kvm_read_c0_guest_entryhi(cop0),
948 kvm_read_c0_guest_entrylo0(cop0),
949 kvm_read_c0_guest_entrylo1(cop0));
954 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
956 struct mips_coproc *cop0 = vcpu->arch.cop0;
957 long entryhi = kvm_read_c0_guest_entryhi(cop0);
958 unsigned long pc = vcpu->arch.pc;
961 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
963 kvm_write_c0_guest_index(cop0, index);
965 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
972 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
973 * @vcpu: Virtual CPU.
975 * Finds the mask of bits which are writable in the guest's Config1 CP0
976 * register, by userland (currently read-only to the guest).
978 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
980 unsigned int mask = 0;
982 /* Permit FPU to be present if FPU is supported */
983 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
984 mask |= MIPS_CONF1_FP;
990 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
991 * @vcpu: Virtual CPU.
993 * Finds the mask of bits which are writable in the guest's Config3 CP0
994 * register, by userland (currently read-only to the guest).
996 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
998 /* Config4 and ULRI are optional */
999 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
1001 /* Permit MSA to be present if MSA is supported */
1002 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1003 mask |= MIPS_CONF3_MSA;
1009 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1010 * @vcpu: Virtual CPU.
1012 * Finds the mask of bits which are writable in the guest's Config4 CP0
1013 * register, by userland (currently read-only to the guest).
1015 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1017 /* Config5 is optional */
1018 unsigned int mask = MIPS_CONF_M;
1021 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
1027 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1028 * @vcpu: Virtual CPU.
1030 * Finds the mask of bits which are writable in the guest's Config5 CP0
1031 * register, by the guest itself.
1033 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1035 unsigned int mask = 0;
1037 /* Permit MSAEn changes if MSA supported and enabled */
1038 if (kvm_mips_guest_has_msa(&vcpu->arch))
1039 mask |= MIPS_CONF5_MSAEN;
1042 * Permit guest FPU mode changes if FPU is enabled and the relevant
1043 * feature exists according to FIR register.
1045 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1047 mask |= MIPS_CONF5_FRE;
1048 /* We don't support UFR or UFE */
1054 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1055 u32 *opc, u32 cause,
1056 struct kvm_run *run,
1057 struct kvm_vcpu *vcpu)
1059 struct mips_coproc *cop0 = vcpu->arch.cop0;
1060 enum emulation_result er = EMULATE_DONE;
1062 unsigned long curr_pc;
1066 * Update PC and hold onto current PC in case there is
1067 * an error and we want to rollback the PC
1069 curr_pc = vcpu->arch.pc;
1070 er = update_pc(vcpu, cause);
1071 if (er == EMULATE_FAIL)
1074 if (inst.co_format.co) {
1075 switch (inst.co_format.func) {
1076 case tlbr_op: /* Read indexed TLB entry */
1077 er = kvm_mips_emul_tlbr(vcpu);
1079 case tlbwi_op: /* Write indexed */
1080 er = kvm_mips_emul_tlbwi(vcpu);
1082 case tlbwr_op: /* Write random */
1083 er = kvm_mips_emul_tlbwr(vcpu);
1085 case tlbp_op: /* TLB Probe */
1086 er = kvm_mips_emul_tlbp(vcpu);
1089 kvm_err("!!!COP0_RFE!!!\n");
1092 er = kvm_mips_emul_eret(vcpu);
1093 goto dont_update_pc;
1095 er = kvm_mips_emul_wait(vcpu);
1099 rt = inst.c0r_format.rt;
1100 rd = inst.c0r_format.rd;
1101 sel = inst.c0r_format.sel;
1103 switch (inst.c0r_format.rs) {
1105 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1106 cop0->stat[rd][sel]++;
1109 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1110 vcpu->arch.gprs[rt] =
1111 (s32)kvm_mips_read_count(vcpu);
1112 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1113 vcpu->arch.gprs[rt] = 0x0;
1114 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1115 kvm_mips_trans_mfc0(inst, opc, vcpu);
1118 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1120 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1121 kvm_mips_trans_mfc0(inst, opc, vcpu);
1125 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1126 KVM_TRACE_COP0(rd, sel),
1127 vcpu->arch.gprs[rt]);
1131 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1133 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1134 KVM_TRACE_COP0(rd, sel),
1135 vcpu->arch.gprs[rt]);
1139 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1140 cop0->stat[rd][sel]++;
1142 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1143 KVM_TRACE_COP0(rd, sel),
1144 vcpu->arch.gprs[rt]);
1146 if ((rd == MIPS_CP0_TLB_INDEX)
1147 && (vcpu->arch.gprs[rt] >=
1148 KVM_MIPS_GUEST_TLB_SIZE)) {
1149 kvm_err("Invalid TLB Index: %ld",
1150 vcpu->arch.gprs[rt]);
1154 #define C0_EBASE_CORE_MASK 0xff
1155 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1156 /* Preserve CORE number */
1157 kvm_change_c0_guest_ebase(cop0,
1158 ~(C0_EBASE_CORE_MASK),
1159 vcpu->arch.gprs[rt]);
1160 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1161 kvm_read_c0_guest_ebase(cop0));
1162 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1164 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1165 if (((kvm_read_c0_guest_entryhi(cop0) &
1166 KVM_ENTRYHI_ASID) != nasid)) {
1167 trace_kvm_asid_change(vcpu,
1168 kvm_read_c0_guest_entryhi(cop0)
1173 * Regenerate/invalidate kernel MMU
1175 * The user MMU context will be
1176 * regenerated lazily on re-entry to
1177 * guest user if the guest ASID actually
1181 cpu = smp_processor_id();
1182 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
1184 vcpu->arch.guest_kernel_asid[cpu] =
1185 vcpu->arch.guest_kernel_mm.context.asid[cpu];
1186 for_each_possible_cpu(i)
1188 vcpu->arch.guest_kernel_asid[i] = 0;
1191 kvm_write_c0_guest_entryhi(cop0,
1192 vcpu->arch.gprs[rt]);
1194 /* Are we writing to COUNT */
1195 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1196 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1198 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1199 /* If we are writing to COMPARE */
1200 /* Clear pending timer interrupt, if any */
1201 kvm_mips_write_compare(vcpu,
1202 vcpu->arch.gprs[rt],
1204 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1205 unsigned int old_val, val, change;
1207 old_val = kvm_read_c0_guest_status(cop0);
1208 val = vcpu->arch.gprs[rt];
1209 change = val ^ old_val;
1211 /* Make sure that the NMI bit is never set */
1215 * Don't allow CU1 or FR to be set unless FPU
1216 * capability enabled and exists in guest
1219 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1220 val &= ~(ST0_CU1 | ST0_FR);
1223 * Also don't allow FR to be set if host doesn't
1226 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1230 /* Handle changes in FPU mode */
1234 * FPU and Vector register state is made
1235 * UNPREDICTABLE by a change of FR, so don't
1236 * even bother saving it.
1238 if (change & ST0_FR)
1242 * If MSA state is already live, it is undefined
1243 * how it interacts with FR=0 FPU state, and we
1244 * don't want to hit reserved instruction
1245 * exceptions trying to save the MSA state later
1246 * when CU=1 && FR=1, so play it safe and save
1249 if (change & ST0_CU1 && !(val & ST0_FR) &&
1250 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1254 * Propagate CU1 (FPU enable) changes
1255 * immediately if the FPU context is already
1256 * loaded. When disabling we leave the context
1257 * loaded so it can be quickly enabled again in
1260 if (change & ST0_CU1 &&
1261 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1262 change_c0_status(ST0_CU1, val);
1266 kvm_write_c0_guest_status(cop0, val);
1268 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1270 * If FPU present, we need CU1/FR bits to take
1271 * effect fairly soon.
1273 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1274 kvm_mips_trans_mtc0(inst, opc, vcpu);
1276 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1277 unsigned int old_val, val, change, wrmask;
1279 old_val = kvm_read_c0_guest_config5(cop0);
1280 val = vcpu->arch.gprs[rt];
1282 /* Only a few bits are writable in Config5 */
1283 wrmask = kvm_mips_config5_wrmask(vcpu);
1284 change = (val ^ old_val) & wrmask;
1285 val = old_val ^ change;
1288 /* Handle changes in FPU/MSA modes */
1292 * Propagate FRE changes immediately if the FPU
1293 * context is already loaded.
1295 if (change & MIPS_CONF5_FRE &&
1296 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1297 change_c0_config5(MIPS_CONF5_FRE, val);
1300 * Propagate MSAEn changes immediately if the
1301 * MSA context is already loaded. When disabling
1302 * we leave the context loaded so it can be
1303 * quickly enabled again in the near future.
1305 if (change & MIPS_CONF5_MSAEN &&
1306 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1307 change_c0_config5(MIPS_CONF5_MSAEN,
1312 kvm_write_c0_guest_config5(cop0, val);
1313 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1314 u32 old_cause, new_cause;
1316 old_cause = kvm_read_c0_guest_cause(cop0);
1317 new_cause = vcpu->arch.gprs[rt];
1318 /* Update R/W bits */
1319 kvm_change_c0_guest_cause(cop0, 0x08800300,
1321 /* DC bit enabling/disabling timer? */
1322 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1323 if (new_cause & CAUSEF_DC)
1324 kvm_mips_count_disable_cause(vcpu);
1326 kvm_mips_count_enable_cause(vcpu);
1328 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1329 u32 mask = MIPS_HWRENA_CPUNUM |
1330 MIPS_HWRENA_SYNCISTEP |
1334 if (kvm_read_c0_guest_config3(cop0) &
1336 mask |= MIPS_HWRENA_ULR;
1337 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1339 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1340 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1341 kvm_mips_trans_mtc0(inst, opc, vcpu);
1347 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1348 vcpu->arch.pc, rt, rd, sel);
1349 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1350 KVM_TRACE_COP0(rd, sel),
1351 vcpu->arch.gprs[rt]);
1356 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1357 cop0->stat[MIPS_CP0_STATUS][0]++;
1360 vcpu->arch.gprs[rt] =
1361 kvm_read_c0_guest_status(cop0);
1363 if (inst.mfmc0_format.sc) {
1364 kvm_debug("[%#lx] mfmc0_op: EI\n",
1366 kvm_set_c0_guest_status(cop0, ST0_IE);
1368 kvm_debug("[%#lx] mfmc0_op: DI\n",
1370 kvm_clear_c0_guest_status(cop0, ST0_IE);
1377 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1379 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1381 * We don't support any shadow register sets, so
1382 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1388 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1389 vcpu->arch.gprs[rt]);
1390 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1394 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1395 vcpu->arch.pc, inst.c0r_format.rs);
1402 /* Rollback PC only if emulation was unsuccessful */
1403 if (er == EMULATE_FAIL)
1404 vcpu->arch.pc = curr_pc;
1408 * This is for special instructions whose emulation
1409 * updates the PC, so do not overwrite the PC under
1416 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1418 struct kvm_run *run,
1419 struct kvm_vcpu *vcpu)
1421 enum emulation_result er = EMULATE_DO_MMIO;
1424 void *data = run->mmio.data;
1425 unsigned long curr_pc;
1428 * Update PC and hold onto current PC in case there is
1429 * an error and we want to rollback the PC
1431 curr_pc = vcpu->arch.pc;
1432 er = update_pc(vcpu, cause);
1433 if (er == EMULATE_FAIL)
1436 rt = inst.i_format.rt;
1438 switch (inst.i_format.opcode) {
1441 if (bytes > sizeof(run->mmio.data)) {
1442 kvm_err("%s: bad MMIO length: %d\n", __func__,
1445 run->mmio.phys_addr =
1446 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1448 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1452 run->mmio.len = bytes;
1453 run->mmio.is_write = 1;
1454 vcpu->mmio_needed = 1;
1455 vcpu->mmio_is_write = 1;
1456 *(u8 *) data = vcpu->arch.gprs[rt];
1457 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1458 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1465 if (bytes > sizeof(run->mmio.data)) {
1466 kvm_err("%s: bad MMIO length: %d\n", __func__,
1469 run->mmio.phys_addr =
1470 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1472 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1477 run->mmio.len = bytes;
1478 run->mmio.is_write = 1;
1479 vcpu->mmio_needed = 1;
1480 vcpu->mmio_is_write = 1;
1481 *(u32 *) data = vcpu->arch.gprs[rt];
1483 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1484 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1485 vcpu->arch.gprs[rt], *(u32 *) data);
1490 if (bytes > sizeof(run->mmio.data)) {
1491 kvm_err("%s: bad MMIO length: %d\n", __func__,
1494 run->mmio.phys_addr =
1495 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1497 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1502 run->mmio.len = bytes;
1503 run->mmio.is_write = 1;
1504 vcpu->mmio_needed = 1;
1505 vcpu->mmio_is_write = 1;
1506 *(u16 *) data = vcpu->arch.gprs[rt];
1508 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1509 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1510 vcpu->arch.gprs[rt], *(u32 *) data);
1514 kvm_err("Store not yet supported (inst=0x%08x)\n",
1520 /* Rollback PC if emulation was unsuccessful */
1521 if (er == EMULATE_FAIL)
1522 vcpu->arch.pc = curr_pc;
1527 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1528 u32 cause, struct kvm_run *run,
1529 struct kvm_vcpu *vcpu)
1531 enum emulation_result er = EMULATE_DO_MMIO;
1535 rt = inst.i_format.rt;
1536 op = inst.i_format.opcode;
1538 vcpu->arch.pending_load_cause = cause;
1539 vcpu->arch.io_gpr = rt;
1544 if (bytes > sizeof(run->mmio.data)) {
1545 kvm_err("%s: bad MMIO length: %d\n", __func__,
1550 run->mmio.phys_addr =
1551 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1553 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1558 run->mmio.len = bytes;
1559 run->mmio.is_write = 0;
1560 vcpu->mmio_needed = 1;
1561 vcpu->mmio_is_write = 0;
1567 if (bytes > sizeof(run->mmio.data)) {
1568 kvm_err("%s: bad MMIO length: %d\n", __func__,
1573 run->mmio.phys_addr =
1574 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1576 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1581 run->mmio.len = bytes;
1582 run->mmio.is_write = 0;
1583 vcpu->mmio_needed = 1;
1584 vcpu->mmio_is_write = 0;
1587 vcpu->mmio_needed = 2;
1589 vcpu->mmio_needed = 1;
1596 if (bytes > sizeof(run->mmio.data)) {
1597 kvm_err("%s: bad MMIO length: %d\n", __func__,
1602 run->mmio.phys_addr =
1603 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1605 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1610 run->mmio.len = bytes;
1611 run->mmio.is_write = 0;
1612 vcpu->mmio_is_write = 0;
1615 vcpu->mmio_needed = 2;
1617 vcpu->mmio_needed = 1;
1622 kvm_err("Load not yet supported (inst=0x%08x)\n",
1631 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1632 u32 *opc, u32 cause,
1633 struct kvm_run *run,
1634 struct kvm_vcpu *vcpu)
1636 struct mips_coproc *cop0 = vcpu->arch.cop0;
1637 enum emulation_result er = EMULATE_DONE;
1638 u32 cache, op_inst, op, base;
1640 struct kvm_vcpu_arch *arch = &vcpu->arch;
1642 unsigned long curr_pc;
1645 * Update PC and hold onto current PC in case there is
1646 * an error and we want to rollback the PC
1648 curr_pc = vcpu->arch.pc;
1649 er = update_pc(vcpu, cause);
1650 if (er == EMULATE_FAIL)
1653 base = inst.i_format.rs;
1654 op_inst = inst.i_format.rt;
1655 if (cpu_has_mips_r6)
1656 offset = inst.spec3_format.simmediate;
1658 offset = inst.i_format.simmediate;
1659 cache = op_inst & CacheOp_Cache;
1660 op = op_inst & CacheOp_Op;
1662 va = arch->gprs[base] + offset;
1664 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1665 cache, op, base, arch->gprs[base], offset);
1668 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1669 * invalidate the caches entirely by stepping through all the
1672 if (op == Index_Writeback_Inv) {
1673 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1674 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1675 arch->gprs[base], offset);
1677 if (cache == Cache_D)
1679 else if (cache == Cache_I)
1682 kvm_err("%s: unsupported CACHE INDEX operation\n",
1684 return EMULATE_FAIL;
1687 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1688 kvm_mips_trans_cache_index(inst, opc, vcpu);
1694 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1695 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1696 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1697 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1698 __func__, va, vcpu, read_c0_entryhi());
1703 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1704 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1707 /* If an entry already exists then skip */
1708 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1712 * If address not in the guest TLB, then give the guest a fault,
1713 * the resulting handler will do the right thing
1715 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1716 (kvm_read_c0_guest_entryhi
1717 (cop0) & KVM_ENTRYHI_ASID));
1720 vcpu->arch.host_cp0_badvaddr = va;
1721 vcpu->arch.pc = curr_pc;
1722 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1725 goto dont_update_pc;
1727 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1729 * Check if the entry is valid, if not then setup a TLB
1730 * invalid exception to the guest
1732 if (!TLB_IS_VALID(*tlb, va)) {
1733 vcpu->arch.host_cp0_badvaddr = va;
1734 vcpu->arch.pc = curr_pc;
1735 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1738 goto dont_update_pc;
1741 * We fault an entry from the guest tlb to the
1744 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1745 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1746 __func__, va, index, vcpu,
1754 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1755 cache, op, base, arch->gprs[base], offset);
1763 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1764 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1765 flush_dcache_line(va);
1767 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1769 * Replace the CACHE instruction, with a SYNCI, not the same,
1772 kvm_mips_trans_cache_va(inst, opc, vcpu);
1774 } else if (op_inst == Hit_Invalidate_I) {
1775 flush_dcache_line(va);
1776 flush_icache_line(va);
1778 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1779 /* Replace the CACHE instruction, with a SYNCI */
1780 kvm_mips_trans_cache_va(inst, opc, vcpu);
1783 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1784 cache, op, base, arch->gprs[base], offset);
1790 /* Rollback PC only if emulation was unsuccessful */
1791 if (er == EMULATE_FAIL)
1792 vcpu->arch.pc = curr_pc;
1796 * This is for exceptions whose emulation updates the PC, so do not
1797 * overwrite the PC under any circumstances
1803 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1804 struct kvm_run *run,
1805 struct kvm_vcpu *vcpu)
1807 union mips_instruction inst;
1808 enum emulation_result er = EMULATE_DONE;
1810 /* Fetch the instruction. */
1811 if (cause & CAUSEF_BD)
1814 inst.word = kvm_get_inst(opc, vcpu);
1816 switch (inst.r_format.opcode) {
1818 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1823 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1830 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1833 #ifndef CONFIG_CPU_MIPSR6
1835 ++vcpu->stat.cache_exits;
1836 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1837 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1841 switch (inst.spec3_format.func) {
1843 ++vcpu->stat.cache_exits;
1844 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1845 er = kvm_mips_emulate_cache(inst, opc, cause, run,
1856 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1858 kvm_arch_vcpu_dump_regs(vcpu);
1866 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1868 struct kvm_run *run,
1869 struct kvm_vcpu *vcpu)
1871 struct mips_coproc *cop0 = vcpu->arch.cop0;
1872 struct kvm_vcpu_arch *arch = &vcpu->arch;
1873 enum emulation_result er = EMULATE_DONE;
1875 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1877 kvm_write_c0_guest_epc(cop0, arch->pc);
1878 kvm_set_c0_guest_status(cop0, ST0_EXL);
1880 if (cause & CAUSEF_BD)
1881 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1883 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1885 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1887 kvm_change_c0_guest_cause(cop0, (0xff),
1888 (EXCCODE_SYS << CAUSEB_EXCCODE));
1890 /* Set PC to the exception entry point */
1891 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1894 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1901 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1903 struct kvm_run *run,
1904 struct kvm_vcpu *vcpu)
1906 struct mips_coproc *cop0 = vcpu->arch.cop0;
1907 struct kvm_vcpu_arch *arch = &vcpu->arch;
1908 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1909 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1911 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1913 kvm_write_c0_guest_epc(cop0, arch->pc);
1914 kvm_set_c0_guest_status(cop0, ST0_EXL);
1916 if (cause & CAUSEF_BD)
1917 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1919 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1921 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1924 /* set pc to the exception entry point */
1925 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1928 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1931 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1934 kvm_change_c0_guest_cause(cop0, (0xff),
1935 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1937 /* setup badvaddr, context and entryhi registers for the guest */
1938 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1939 /* XXXKYMA: is the context register used by linux??? */
1940 kvm_write_c0_guest_entryhi(cop0, entryhi);
1941 /* Blow away the shadow host TLBs */
1942 kvm_mips_flush_host_tlb(1);
1944 return EMULATE_DONE;
1947 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1949 struct kvm_run *run,
1950 struct kvm_vcpu *vcpu)
1952 struct mips_coproc *cop0 = vcpu->arch.cop0;
1953 struct kvm_vcpu_arch *arch = &vcpu->arch;
1954 unsigned long entryhi =
1955 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1956 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1958 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1960 kvm_write_c0_guest_epc(cop0, arch->pc);
1961 kvm_set_c0_guest_status(cop0, ST0_EXL);
1963 if (cause & CAUSEF_BD)
1964 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1966 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1968 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1971 /* set pc to the exception entry point */
1972 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1975 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1977 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1980 kvm_change_c0_guest_cause(cop0, (0xff),
1981 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1983 /* setup badvaddr, context and entryhi registers for the guest */
1984 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1985 /* XXXKYMA: is the context register used by linux??? */
1986 kvm_write_c0_guest_entryhi(cop0, entryhi);
1987 /* Blow away the shadow host TLBs */
1988 kvm_mips_flush_host_tlb(1);
1990 return EMULATE_DONE;
1993 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1995 struct kvm_run *run,
1996 struct kvm_vcpu *vcpu)
1998 struct mips_coproc *cop0 = vcpu->arch.cop0;
1999 struct kvm_vcpu_arch *arch = &vcpu->arch;
2000 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2001 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2003 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2005 kvm_write_c0_guest_epc(cop0, arch->pc);
2006 kvm_set_c0_guest_status(cop0, ST0_EXL);
2008 if (cause & CAUSEF_BD)
2009 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2011 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2013 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2016 /* Set PC to the exception entry point */
2017 arch->pc = KVM_GUEST_KSEG0 + 0x0;
2019 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2021 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2024 kvm_change_c0_guest_cause(cop0, (0xff),
2025 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2027 /* setup badvaddr, context and entryhi registers for the guest */
2028 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2029 /* XXXKYMA: is the context register used by linux??? */
2030 kvm_write_c0_guest_entryhi(cop0, entryhi);
2031 /* Blow away the shadow host TLBs */
2032 kvm_mips_flush_host_tlb(1);
2034 return EMULATE_DONE;
2037 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2039 struct kvm_run *run,
2040 struct kvm_vcpu *vcpu)
2042 struct mips_coproc *cop0 = vcpu->arch.cop0;
2043 struct kvm_vcpu_arch *arch = &vcpu->arch;
2044 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2045 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2047 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2049 kvm_write_c0_guest_epc(cop0, arch->pc);
2050 kvm_set_c0_guest_status(cop0, ST0_EXL);
2052 if (cause & CAUSEF_BD)
2053 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2055 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2057 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2060 /* Set PC to the exception entry point */
2061 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2063 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2065 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2068 kvm_change_c0_guest_cause(cop0, (0xff),
2069 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2071 /* setup badvaddr, context and entryhi registers for the guest */
2072 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2073 /* XXXKYMA: is the context register used by linux??? */
2074 kvm_write_c0_guest_entryhi(cop0, entryhi);
2075 /* Blow away the shadow host TLBs */
2076 kvm_mips_flush_host_tlb(1);
2078 return EMULATE_DONE;
2081 /* TLBMOD: store into address matching TLB with Dirty bit off */
2082 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2083 struct kvm_run *run,
2084 struct kvm_vcpu *vcpu)
2086 enum emulation_result er = EMULATE_DONE;
2088 struct mips_coproc *cop0 = vcpu->arch.cop0;
2089 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2090 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2093 /* If address not in the guest TLB, then we are in trouble */
2094 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2096 /* XXXKYMA Invalidate and retry */
2097 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2098 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2100 kvm_mips_dump_guest_tlbs(vcpu);
2101 kvm_mips_dump_host_tlbs();
2102 return EMULATE_FAIL;
2106 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2110 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2112 struct kvm_run *run,
2113 struct kvm_vcpu *vcpu)
2115 struct mips_coproc *cop0 = vcpu->arch.cop0;
2116 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2117 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2118 struct kvm_vcpu_arch *arch = &vcpu->arch;
2120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2122 kvm_write_c0_guest_epc(cop0, arch->pc);
2123 kvm_set_c0_guest_status(cop0, ST0_EXL);
2125 if (cause & CAUSEF_BD)
2126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2130 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2133 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2135 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2137 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2140 kvm_change_c0_guest_cause(cop0, (0xff),
2141 (EXCCODE_MOD << CAUSEB_EXCCODE));
2143 /* setup badvaddr, context and entryhi registers for the guest */
2144 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2145 /* XXXKYMA: is the context register used by linux??? */
2146 kvm_write_c0_guest_entryhi(cop0, entryhi);
2147 /* Blow away the shadow host TLBs */
2148 kvm_mips_flush_host_tlb(1);
2150 return EMULATE_DONE;
2153 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2155 struct kvm_run *run,
2156 struct kvm_vcpu *vcpu)
2158 struct mips_coproc *cop0 = vcpu->arch.cop0;
2159 struct kvm_vcpu_arch *arch = &vcpu->arch;
2161 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2163 kvm_write_c0_guest_epc(cop0, arch->pc);
2164 kvm_set_c0_guest_status(cop0, ST0_EXL);
2166 if (cause & CAUSEF_BD)
2167 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2169 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2173 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2175 kvm_change_c0_guest_cause(cop0, (0xff),
2176 (EXCCODE_CPU << CAUSEB_EXCCODE));
2177 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2179 return EMULATE_DONE;
2182 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2184 struct kvm_run *run,
2185 struct kvm_vcpu *vcpu)
2187 struct mips_coproc *cop0 = vcpu->arch.cop0;
2188 struct kvm_vcpu_arch *arch = &vcpu->arch;
2189 enum emulation_result er = EMULATE_DONE;
2191 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2193 kvm_write_c0_guest_epc(cop0, arch->pc);
2194 kvm_set_c0_guest_status(cop0, ST0_EXL);
2196 if (cause & CAUSEF_BD)
2197 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2199 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2201 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2203 kvm_change_c0_guest_cause(cop0, (0xff),
2204 (EXCCODE_RI << CAUSEB_EXCCODE));
2206 /* Set PC to the exception entry point */
2207 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2210 kvm_err("Trying to deliver RI when EXL is already set\n");
2217 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2219 struct kvm_run *run,
2220 struct kvm_vcpu *vcpu)
2222 struct mips_coproc *cop0 = vcpu->arch.cop0;
2223 struct kvm_vcpu_arch *arch = &vcpu->arch;
2224 enum emulation_result er = EMULATE_DONE;
2226 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2228 kvm_write_c0_guest_epc(cop0, arch->pc);
2229 kvm_set_c0_guest_status(cop0, ST0_EXL);
2231 if (cause & CAUSEF_BD)
2232 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2234 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2236 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2238 kvm_change_c0_guest_cause(cop0, (0xff),
2239 (EXCCODE_BP << CAUSEB_EXCCODE));
2241 /* Set PC to the exception entry point */
2242 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2245 kvm_err("Trying to deliver BP when EXL is already set\n");
2252 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2254 struct kvm_run *run,
2255 struct kvm_vcpu *vcpu)
2257 struct mips_coproc *cop0 = vcpu->arch.cop0;
2258 struct kvm_vcpu_arch *arch = &vcpu->arch;
2259 enum emulation_result er = EMULATE_DONE;
2261 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2263 kvm_write_c0_guest_epc(cop0, arch->pc);
2264 kvm_set_c0_guest_status(cop0, ST0_EXL);
2266 if (cause & CAUSEF_BD)
2267 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2269 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2271 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2273 kvm_change_c0_guest_cause(cop0, (0xff),
2274 (EXCCODE_TR << CAUSEB_EXCCODE));
2276 /* Set PC to the exception entry point */
2277 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2280 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2287 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2289 struct kvm_run *run,
2290 struct kvm_vcpu *vcpu)
2292 struct mips_coproc *cop0 = vcpu->arch.cop0;
2293 struct kvm_vcpu_arch *arch = &vcpu->arch;
2294 enum emulation_result er = EMULATE_DONE;
2296 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2298 kvm_write_c0_guest_epc(cop0, arch->pc);
2299 kvm_set_c0_guest_status(cop0, ST0_EXL);
2301 if (cause & CAUSEF_BD)
2302 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2304 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2306 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2308 kvm_change_c0_guest_cause(cop0, (0xff),
2309 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2311 /* Set PC to the exception entry point */
2312 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2315 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2322 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2324 struct kvm_run *run,
2325 struct kvm_vcpu *vcpu)
2327 struct mips_coproc *cop0 = vcpu->arch.cop0;
2328 struct kvm_vcpu_arch *arch = &vcpu->arch;
2329 enum emulation_result er = EMULATE_DONE;
2331 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2333 kvm_write_c0_guest_epc(cop0, arch->pc);
2334 kvm_set_c0_guest_status(cop0, ST0_EXL);
2336 if (cause & CAUSEF_BD)
2337 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2339 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2341 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2343 kvm_change_c0_guest_cause(cop0, (0xff),
2344 (EXCCODE_FPE << CAUSEB_EXCCODE));
2346 /* Set PC to the exception entry point */
2347 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2350 kvm_err("Trying to deliver FPE when EXL is already set\n");
2357 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2359 struct kvm_run *run,
2360 struct kvm_vcpu *vcpu)
2362 struct mips_coproc *cop0 = vcpu->arch.cop0;
2363 struct kvm_vcpu_arch *arch = &vcpu->arch;
2364 enum emulation_result er = EMULATE_DONE;
2366 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2368 kvm_write_c0_guest_epc(cop0, arch->pc);
2369 kvm_set_c0_guest_status(cop0, ST0_EXL);
2371 if (cause & CAUSEF_BD)
2372 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2374 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2376 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2378 kvm_change_c0_guest_cause(cop0, (0xff),
2379 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2381 /* Set PC to the exception entry point */
2382 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2385 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2392 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2393 struct kvm_run *run,
2394 struct kvm_vcpu *vcpu)
2396 struct mips_coproc *cop0 = vcpu->arch.cop0;
2397 struct kvm_vcpu_arch *arch = &vcpu->arch;
2398 enum emulation_result er = EMULATE_DONE;
2399 unsigned long curr_pc;
2400 union mips_instruction inst;
2403 * Update PC and hold onto current PC in case there is
2404 * an error and we want to rollback the PC
2406 curr_pc = vcpu->arch.pc;
2407 er = update_pc(vcpu, cause);
2408 if (er == EMULATE_FAIL)
2411 /* Fetch the instruction. */
2412 if (cause & CAUSEF_BD)
2415 inst.word = kvm_get_inst(opc, vcpu);
2417 if (inst.word == KVM_INVALID_INST) {
2418 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2419 return EMULATE_FAIL;
2422 if (inst.r_format.opcode == spec3_op &&
2423 inst.r_format.func == rdhwr_op &&
2424 inst.r_format.rs == 0 &&
2425 (inst.r_format.re >> 3) == 0) {
2426 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2427 int rd = inst.r_format.rd;
2428 int rt = inst.r_format.rt;
2429 int sel = inst.r_format.re & 0x7;
2431 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2432 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2433 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2438 case MIPS_HWR_CPUNUM: /* CPU number */
2439 arch->gprs[rt] = vcpu->vcpu_id;
2441 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
2442 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2443 current_cpu_data.icache.linesz);
2445 case MIPS_HWR_CC: /* Read count register */
2446 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2448 case MIPS_HWR_CCRES: /* Count register resolution */
2449 switch (current_cpu_data.cputype) {
2458 case MIPS_HWR_ULR: /* Read UserLocal register */
2459 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2463 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2467 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2468 vcpu->arch.gprs[rt]);
2470 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2475 return EMULATE_DONE;
2479 * Rollback PC (if in branch delay slot then the PC already points to
2480 * branch target), and pass the RI exception to the guest OS.
2482 vcpu->arch.pc = curr_pc;
2483 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2486 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2487 struct kvm_run *run)
2489 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2490 enum emulation_result er = EMULATE_DONE;
2492 if (run->mmio.len > sizeof(*gpr)) {
2493 kvm_err("Bad MMIO length: %d", run->mmio.len);
2498 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2499 if (er == EMULATE_FAIL)
2502 switch (run->mmio.len) {
2504 *gpr = *(s32 *) run->mmio.data;
2508 if (vcpu->mmio_needed == 2)
2509 *gpr = *(s16 *) run->mmio.data;
2511 *gpr = *(u16 *)run->mmio.data;
2515 if (vcpu->mmio_needed == 2)
2516 *gpr = *(s8 *) run->mmio.data;
2518 *gpr = *(u8 *) run->mmio.data;
2522 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2523 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2524 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2531 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2533 struct kvm_run *run,
2534 struct kvm_vcpu *vcpu)
2536 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2537 struct mips_coproc *cop0 = vcpu->arch.cop0;
2538 struct kvm_vcpu_arch *arch = &vcpu->arch;
2539 enum emulation_result er = EMULATE_DONE;
2541 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2543 kvm_write_c0_guest_epc(cop0, arch->pc);
2544 kvm_set_c0_guest_status(cop0, ST0_EXL);
2546 if (cause & CAUSEF_BD)
2547 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2549 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2551 kvm_change_c0_guest_cause(cop0, (0xff),
2552 (exccode << CAUSEB_EXCCODE));
2554 /* Set PC to the exception entry point */
2555 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2556 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2558 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2559 exccode, kvm_read_c0_guest_epc(cop0),
2560 kvm_read_c0_guest_badvaddr(cop0));
2562 kvm_err("Trying to deliver EXC when EXL is already set\n");
2569 enum emulation_result kvm_mips_check_privilege(u32 cause,
2571 struct kvm_run *run,
2572 struct kvm_vcpu *vcpu)
2574 enum emulation_result er = EMULATE_DONE;
2575 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2576 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2578 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2587 case EXCCODE_MSAFPE:
2589 case EXCCODE_MSADIS:
2593 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2594 er = EMULATE_PRIV_FAIL;
2602 * We we are accessing Guest kernel space, then send an
2603 * address error exception to the guest
2605 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2606 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2609 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2610 er = EMULATE_PRIV_FAIL;
2616 * We we are accessing Guest kernel space, then send an
2617 * address error exception to the guest
2619 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2620 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2623 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2624 er = EMULATE_PRIV_FAIL;
2629 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2631 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2633 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2635 er = EMULATE_PRIV_FAIL;
2638 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2640 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2642 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2644 er = EMULATE_PRIV_FAIL;
2647 er = EMULATE_PRIV_FAIL;
2652 if (er == EMULATE_PRIV_FAIL)
2653 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2659 * User Address (UA) fault, this could happen if
2660 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2661 * case we pass on the fault to the guest kernel and let it handle it.
2662 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2663 * case we inject the TLB from the Guest TLB into the shadow host TLB
2665 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2667 struct kvm_run *run,
2668 struct kvm_vcpu *vcpu)
2670 enum emulation_result er = EMULATE_DONE;
2671 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2672 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2675 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2676 vcpu->arch.host_cp0_badvaddr);
2679 * KVM would not have got the exception if this entry was valid in the
2680 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2681 * send the guest an exception. The guest exc handler should then inject
2682 * an entry into the guest TLB.
2684 index = kvm_mips_guest_tlb_lookup(vcpu,
2686 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2689 if (exccode == EXCCODE_TLBL) {
2690 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2691 } else if (exccode == EXCCODE_TLBS) {
2692 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2694 kvm_err("%s: invalid exc code: %d\n", __func__,
2699 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2702 * Check if the entry is valid, if not then setup a TLB invalid
2703 * exception to the guest
2705 if (!TLB_IS_VALID(*tlb, va)) {
2706 if (exccode == EXCCODE_TLBL) {
2707 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2709 } else if (exccode == EXCCODE_TLBS) {
2710 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2713 kvm_err("%s: invalid exc code: %d\n", __func__,
2718 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2719 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2721 * OK we have a Guest TLB entry, now inject it into the
2724 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2725 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2726 __func__, va, index, vcpu,