Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[cascardo/linux.git] / arch / mips / kvm / emulate.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Instruction/Exception emulation
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
26 #include <asm/inst.h>
27
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31
32 #include "interrupt.h"
33 #include "commpage.h"
34
35 #include "trace.h"
36
37 /*
38  * Compute the return address and do emulate branch simulation, if required.
39  * This function should be called only in branch delay slot active.
40  */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42         unsigned long instpc)
43 {
44         unsigned int dspcontrol;
45         union mips_instruction insn;
46         struct kvm_vcpu_arch *arch = &vcpu->arch;
47         long epc = instpc;
48         long nextpc = KVM_INVALID_INST;
49
50         if (epc & 3)
51                 goto unaligned;
52
53         /* Read the instruction */
54         insn.word = kvm_get_inst((u32 *) epc, vcpu);
55
56         if (insn.word == KVM_INVALID_INST)
57                 return KVM_INVALID_INST;
58
59         switch (insn.i_format.opcode) {
60                 /* jr and jalr are in r_format format. */
61         case spec_op:
62                 switch (insn.r_format.func) {
63                 case jalr_op:
64                         arch->gprs[insn.r_format.rd] = epc + 8;
65                         /* Fall through */
66                 case jr_op:
67                         nextpc = arch->gprs[insn.r_format.rs];
68                         break;
69                 }
70                 break;
71
72                 /*
73                  * This group contains:
74                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
75                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
76                  */
77         case bcond_op:
78                 switch (insn.i_format.rt) {
79                 case bltz_op:
80                 case bltzl_op:
81                         if ((long)arch->gprs[insn.i_format.rs] < 0)
82                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
83                         else
84                                 epc += 8;
85                         nextpc = epc;
86                         break;
87
88                 case bgez_op:
89                 case bgezl_op:
90                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
91                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
92                         else
93                                 epc += 8;
94                         nextpc = epc;
95                         break;
96
97                 case bltzal_op:
98                 case bltzall_op:
99                         arch->gprs[31] = epc + 8;
100                         if ((long)arch->gprs[insn.i_format.rs] < 0)
101                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
102                         else
103                                 epc += 8;
104                         nextpc = epc;
105                         break;
106
107                 case bgezal_op:
108                 case bgezall_op:
109                         arch->gprs[31] = epc + 8;
110                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
111                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
112                         else
113                                 epc += 8;
114                         nextpc = epc;
115                         break;
116                 case bposge32_op:
117                         if (!cpu_has_dsp)
118                                 goto sigill;
119
120                         dspcontrol = rddsp(0x01);
121
122                         if (dspcontrol >= 32)
123                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
124                         else
125                                 epc += 8;
126                         nextpc = epc;
127                         break;
128                 }
129                 break;
130
131                 /* These are unconditional and in j_format. */
132         case jal_op:
133                 arch->gprs[31] = instpc + 8;
134         case j_op:
135                 epc += 4;
136                 epc >>= 28;
137                 epc <<= 28;
138                 epc |= (insn.j_format.target << 2);
139                 nextpc = epc;
140                 break;
141
142                 /* These are conditional and in i_format. */
143         case beq_op:
144         case beql_op:
145                 if (arch->gprs[insn.i_format.rs] ==
146                     arch->gprs[insn.i_format.rt])
147                         epc = epc + 4 + (insn.i_format.simmediate << 2);
148                 else
149                         epc += 8;
150                 nextpc = epc;
151                 break;
152
153         case bne_op:
154         case bnel_op:
155                 if (arch->gprs[insn.i_format.rs] !=
156                     arch->gprs[insn.i_format.rt])
157                         epc = epc + 4 + (insn.i_format.simmediate << 2);
158                 else
159                         epc += 8;
160                 nextpc = epc;
161                 break;
162
163         case blez_op:   /* POP06 */
164 #ifndef CONFIG_CPU_MIPSR6
165         case blezl_op:  /* removed in R6 */
166 #endif
167                 if (insn.i_format.rt != 0)
168                         goto compact_branch;
169                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
170                         epc = epc + 4 + (insn.i_format.simmediate << 2);
171                 else
172                         epc += 8;
173                 nextpc = epc;
174                 break;
175
176         case bgtz_op:   /* POP07 */
177 #ifndef CONFIG_CPU_MIPSR6
178         case bgtzl_op:  /* removed in R6 */
179 #endif
180                 if (insn.i_format.rt != 0)
181                         goto compact_branch;
182                 if ((long)arch->gprs[insn.i_format.rs] > 0)
183                         epc = epc + 4 + (insn.i_format.simmediate << 2);
184                 else
185                         epc += 8;
186                 nextpc = epc;
187                 break;
188
189                 /* And now the FPA/cp1 branch instructions. */
190         case cop1_op:
191                 kvm_err("%s: unsupported cop1_op\n", __func__);
192                 break;
193
194 #ifdef CONFIG_CPU_MIPSR6
195         /* R6 added the following compact branches with forbidden slots */
196         case blezl_op:  /* POP26 */
197         case bgtzl_op:  /* POP27 */
198                 /* only rt == 0 isn't compact branch */
199                 if (insn.i_format.rt != 0)
200                         goto compact_branch;
201                 break;
202         case pop10_op:
203         case pop30_op:
204                 /* only rs == rt == 0 is reserved, rest are compact branches */
205                 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
206                         goto compact_branch;
207                 break;
208         case pop66_op:
209         case pop76_op:
210                 /* only rs == 0 isn't compact branch */
211                 if (insn.i_format.rs != 0)
212                         goto compact_branch;
213                 break;
214 compact_branch:
215                 /*
216                  * If we've hit an exception on the forbidden slot, then
217                  * the branch must not have been taken.
218                  */
219                 epc += 8;
220                 nextpc = epc;
221                 break;
222 #else
223 compact_branch:
224                 /* Compact branches not supported before R6 */
225                 break;
226 #endif
227         }
228
229         return nextpc;
230
231 unaligned:
232         kvm_err("%s: unaligned epc\n", __func__);
233         return nextpc;
234
235 sigill:
236         kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
237         return nextpc;
238 }
239
240 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
241 {
242         unsigned long branch_pc;
243         enum emulation_result er = EMULATE_DONE;
244
245         if (cause & CAUSEF_BD) {
246                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
247                 if (branch_pc == KVM_INVALID_INST) {
248                         er = EMULATE_FAIL;
249                 } else {
250                         vcpu->arch.pc = branch_pc;
251                         kvm_debug("BD update_pc(): New PC: %#lx\n",
252                                   vcpu->arch.pc);
253                 }
254         } else
255                 vcpu->arch.pc += 4;
256
257         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
258
259         return er;
260 }
261
262 /**
263  * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
264  * @vcpu:       Virtual CPU.
265  *
266  * Returns:     1 if the CP0_Count timer is disabled by either the guest
267  *              CP0_Cause.DC bit or the count_ctl.DC bit.
268  *              0 otherwise (in which case CP0_Count timer is running).
269  */
270 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
271 {
272         struct mips_coproc *cop0 = vcpu->arch.cop0;
273
274         return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
275                 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
276 }
277
278 /**
279  * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
280  *
281  * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
282  *
283  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
284  */
285 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
286 {
287         s64 now_ns, periods;
288         u64 delta;
289
290         now_ns = ktime_to_ns(now);
291         delta = now_ns + vcpu->arch.count_dyn_bias;
292
293         if (delta >= vcpu->arch.count_period) {
294                 /* If delta is out of safe range the bias needs adjusting */
295                 periods = div64_s64(now_ns, vcpu->arch.count_period);
296                 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
297                 /* Recalculate delta with new bias */
298                 delta = now_ns + vcpu->arch.count_dyn_bias;
299         }
300
301         /*
302          * We've ensured that:
303          *   delta < count_period
304          *
305          * Therefore the intermediate delta*count_hz will never overflow since
306          * at the boundary condition:
307          *   delta = count_period
308          *   delta = NSEC_PER_SEC * 2^32 / count_hz
309          *   delta * count_hz = NSEC_PER_SEC * 2^32
310          */
311         return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
312 }
313
314 /**
315  * kvm_mips_count_time() - Get effective current time.
316  * @vcpu:       Virtual CPU.
317  *
318  * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
319  * except when the master disable bit is set in count_ctl, in which case it is
320  * count_resume, i.e. the time that the count was disabled.
321  *
322  * Returns:     Effective monotonic ktime for CP0_Count.
323  */
324 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
325 {
326         if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
327                 return vcpu->arch.count_resume;
328
329         return ktime_get();
330 }
331
332 /**
333  * kvm_mips_read_count_running() - Read the current count value as if running.
334  * @vcpu:       Virtual CPU.
335  * @now:        Kernel time to read CP0_Count at.
336  *
337  * Returns the current guest CP0_Count register at time @now and handles if the
338  * timer interrupt is pending and hasn't been handled yet.
339  *
340  * Returns:     The current value of the guest CP0_Count register.
341  */
342 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
343 {
344         struct mips_coproc *cop0 = vcpu->arch.cop0;
345         ktime_t expires, threshold;
346         u32 count, compare;
347         int running;
348
349         /* Calculate the biased and scaled guest CP0_Count */
350         count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
351         compare = kvm_read_c0_guest_compare(cop0);
352
353         /*
354          * Find whether CP0_Count has reached the closest timer interrupt. If
355          * not, we shouldn't inject it.
356          */
357         if ((s32)(count - compare) < 0)
358                 return count;
359
360         /*
361          * The CP0_Count we're going to return has already reached the closest
362          * timer interrupt. Quickly check if it really is a new interrupt by
363          * looking at whether the interval until the hrtimer expiry time is
364          * less than 1/4 of the timer period.
365          */
366         expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
367         threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
368         if (ktime_before(expires, threshold)) {
369                 /*
370                  * Cancel it while we handle it so there's no chance of
371                  * interference with the timeout handler.
372                  */
373                 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
374
375                 /* Nothing should be waiting on the timeout */
376                 kvm_mips_callbacks->queue_timer_int(vcpu);
377
378                 /*
379                  * Restart the timer if it was running based on the expiry time
380                  * we read, so that we don't push it back 2 periods.
381                  */
382                 if (running) {
383                         expires = ktime_add_ns(expires,
384                                                vcpu->arch.count_period);
385                         hrtimer_start(&vcpu->arch.comparecount_timer, expires,
386                                       HRTIMER_MODE_ABS);
387                 }
388         }
389
390         return count;
391 }
392
393 /**
394  * kvm_mips_read_count() - Read the current count value.
395  * @vcpu:       Virtual CPU.
396  *
397  * Read the current guest CP0_Count value, taking into account whether the timer
398  * is stopped.
399  *
400  * Returns:     The current guest CP0_Count value.
401  */
402 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
403 {
404         struct mips_coproc *cop0 = vcpu->arch.cop0;
405
406         /* If count disabled just read static copy of count */
407         if (kvm_mips_count_disabled(vcpu))
408                 return kvm_read_c0_guest_count(cop0);
409
410         return kvm_mips_read_count_running(vcpu, ktime_get());
411 }
412
413 /**
414  * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
415  * @vcpu:       Virtual CPU.
416  * @count:      Output pointer for CP0_Count value at point of freeze.
417  *
418  * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
419  * at the point it was frozen. It is guaranteed that any pending interrupts at
420  * the point it was frozen are handled, and none after that point.
421  *
422  * This is useful where the time/CP0_Count is needed in the calculation of the
423  * new parameters.
424  *
425  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
426  *
427  * Returns:     The ktime at the point of freeze.
428  */
429 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
430 {
431         ktime_t now;
432
433         /* stop hrtimer before finding time */
434         hrtimer_cancel(&vcpu->arch.comparecount_timer);
435         now = ktime_get();
436
437         /* find count at this point and handle pending hrtimer */
438         *count = kvm_mips_read_count_running(vcpu, now);
439
440         return now;
441 }
442
443 /**
444  * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
445  * @vcpu:       Virtual CPU.
446  * @now:        ktime at point of resume.
447  * @count:      CP0_Count at point of resume.
448  *
449  * Resumes the timer and updates the timer expiry based on @now and @count.
450  * This can be used in conjunction with kvm_mips_freeze_timer() when timer
451  * parameters need to be changed.
452  *
453  * It is guaranteed that a timer interrupt immediately after resume will be
454  * handled, but not if CP_Compare is exactly at @count. That case is already
455  * handled by kvm_mips_freeze_timer().
456  *
457  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
458  */
459 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
460                                     ktime_t now, u32 count)
461 {
462         struct mips_coproc *cop0 = vcpu->arch.cop0;
463         u32 compare;
464         u64 delta;
465         ktime_t expire;
466
467         /* Calculate timeout (wrap 0 to 2^32) */
468         compare = kvm_read_c0_guest_compare(cop0);
469         delta = (u64)(u32)(compare - count - 1) + 1;
470         delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
471         expire = ktime_add_ns(now, delta);
472
473         /* Update hrtimer to use new timeout */
474         hrtimer_cancel(&vcpu->arch.comparecount_timer);
475         hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
476 }
477
478 /**
479  * kvm_mips_write_count() - Modify the count and update timer.
480  * @vcpu:       Virtual CPU.
481  * @count:      Guest CP0_Count value to set.
482  *
483  * Sets the CP0_Count value and updates the timer accordingly.
484  */
485 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
486 {
487         struct mips_coproc *cop0 = vcpu->arch.cop0;
488         ktime_t now;
489
490         /* Calculate bias */
491         now = kvm_mips_count_time(vcpu);
492         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
493
494         if (kvm_mips_count_disabled(vcpu))
495                 /* The timer's disabled, adjust the static count */
496                 kvm_write_c0_guest_count(cop0, count);
497         else
498                 /* Update timeout */
499                 kvm_mips_resume_hrtimer(vcpu, now, count);
500 }
501
502 /**
503  * kvm_mips_init_count() - Initialise timer.
504  * @vcpu:       Virtual CPU.
505  *
506  * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
507  * it going if it's enabled.
508  */
509 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
510 {
511         /* 100 MHz */
512         vcpu->arch.count_hz = 100*1000*1000;
513         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
514                                           vcpu->arch.count_hz);
515         vcpu->arch.count_dyn_bias = 0;
516
517         /* Starting at 0 */
518         kvm_mips_write_count(vcpu, 0);
519 }
520
521 /**
522  * kvm_mips_set_count_hz() - Update the frequency of the timer.
523  * @vcpu:       Virtual CPU.
524  * @count_hz:   Frequency of CP0_Count timer in Hz.
525  *
526  * Change the frequency of the CP0_Count timer. This is done atomically so that
527  * CP0_Count is continuous and no timer interrupt is lost.
528  *
529  * Returns:     -EINVAL if @count_hz is out of range.
530  *              0 on success.
531  */
532 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
533 {
534         struct mips_coproc *cop0 = vcpu->arch.cop0;
535         int dc;
536         ktime_t now;
537         u32 count;
538
539         /* ensure the frequency is in a sensible range... */
540         if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
541                 return -EINVAL;
542         /* ... and has actually changed */
543         if (vcpu->arch.count_hz == count_hz)
544                 return 0;
545
546         /* Safely freeze timer so we can keep it continuous */
547         dc = kvm_mips_count_disabled(vcpu);
548         if (dc) {
549                 now = kvm_mips_count_time(vcpu);
550                 count = kvm_read_c0_guest_count(cop0);
551         } else {
552                 now = kvm_mips_freeze_hrtimer(vcpu, &count);
553         }
554
555         /* Update the frequency */
556         vcpu->arch.count_hz = count_hz;
557         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
558         vcpu->arch.count_dyn_bias = 0;
559
560         /* Calculate adjusted bias so dynamic count is unchanged */
561         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
562
563         /* Update and resume hrtimer */
564         if (!dc)
565                 kvm_mips_resume_hrtimer(vcpu, now, count);
566         return 0;
567 }
568
569 /**
570  * kvm_mips_write_compare() - Modify compare and update timer.
571  * @vcpu:       Virtual CPU.
572  * @compare:    New CP0_Compare value.
573  * @ack:        Whether to acknowledge timer interrupt.
574  *
575  * Update CP0_Compare to a new value and update the timeout.
576  * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
577  * any pending timer interrupt is preserved.
578  */
579 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
580 {
581         struct mips_coproc *cop0 = vcpu->arch.cop0;
582         int dc;
583         u32 old_compare = kvm_read_c0_guest_compare(cop0);
584         ktime_t now;
585         u32 count;
586
587         /* if unchanged, must just be an ack */
588         if (old_compare == compare) {
589                 if (!ack)
590                         return;
591                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
592                 kvm_write_c0_guest_compare(cop0, compare);
593                 return;
594         }
595
596         /* freeze_hrtimer() takes care of timer interrupts <= count */
597         dc = kvm_mips_count_disabled(vcpu);
598         if (!dc)
599                 now = kvm_mips_freeze_hrtimer(vcpu, &count);
600
601         if (ack)
602                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
603
604         kvm_write_c0_guest_compare(cop0, compare);
605
606         /* resume_hrtimer() takes care of timer interrupts > count */
607         if (!dc)
608                 kvm_mips_resume_hrtimer(vcpu, now, count);
609 }
610
611 /**
612  * kvm_mips_count_disable() - Disable count.
613  * @vcpu:       Virtual CPU.
614  *
615  * Disable the CP0_Count timer. A timer interrupt on or before the final stop
616  * time will be handled but not after.
617  *
618  * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
619  * count_ctl.DC has been set (count disabled).
620  *
621  * Returns:     The time that the timer was stopped.
622  */
623 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
624 {
625         struct mips_coproc *cop0 = vcpu->arch.cop0;
626         u32 count;
627         ktime_t now;
628
629         /* Stop hrtimer */
630         hrtimer_cancel(&vcpu->arch.comparecount_timer);
631
632         /* Set the static count from the dynamic count, handling pending TI */
633         now = ktime_get();
634         count = kvm_mips_read_count_running(vcpu, now);
635         kvm_write_c0_guest_count(cop0, count);
636
637         return now;
638 }
639
640 /**
641  * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
642  * @vcpu:       Virtual CPU.
643  *
644  * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
645  * before the final stop time will be handled if the timer isn't disabled by
646  * count_ctl.DC, but not after.
647  *
648  * Assumes CP0_Cause.DC is clear (count enabled).
649  */
650 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
651 {
652         struct mips_coproc *cop0 = vcpu->arch.cop0;
653
654         kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
655         if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
656                 kvm_mips_count_disable(vcpu);
657 }
658
659 /**
660  * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
661  * @vcpu:       Virtual CPU.
662  *
663  * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
664  * the start time will be handled if the timer isn't disabled by count_ctl.DC,
665  * potentially before even returning, so the caller should be careful with
666  * ordering of CP0_Cause modifications so as not to lose it.
667  *
668  * Assumes CP0_Cause.DC is set (count disabled).
669  */
670 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
671 {
672         struct mips_coproc *cop0 = vcpu->arch.cop0;
673         u32 count;
674
675         kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
676
677         /*
678          * Set the dynamic count to match the static count.
679          * This starts the hrtimer if count_ctl.DC allows it.
680          * Otherwise it conveniently updates the biases.
681          */
682         count = kvm_read_c0_guest_count(cop0);
683         kvm_mips_write_count(vcpu, count);
684 }
685
686 /**
687  * kvm_mips_set_count_ctl() - Update the count control KVM register.
688  * @vcpu:       Virtual CPU.
689  * @count_ctl:  Count control register new value.
690  *
691  * Set the count control KVM register. The timer is updated accordingly.
692  *
693  * Returns:     -EINVAL if reserved bits are set.
694  *              0 on success.
695  */
696 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
697 {
698         struct mips_coproc *cop0 = vcpu->arch.cop0;
699         s64 changed = count_ctl ^ vcpu->arch.count_ctl;
700         s64 delta;
701         ktime_t expire, now;
702         u32 count, compare;
703
704         /* Only allow defined bits to be changed */
705         if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
706                 return -EINVAL;
707
708         /* Apply new value */
709         vcpu->arch.count_ctl = count_ctl;
710
711         /* Master CP0_Count disable */
712         if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
713                 /* Is CP0_Cause.DC already disabling CP0_Count? */
714                 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
715                         if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
716                                 /* Just record the current time */
717                                 vcpu->arch.count_resume = ktime_get();
718                 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
719                         /* disable timer and record current time */
720                         vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
721                 } else {
722                         /*
723                          * Calculate timeout relative to static count at resume
724                          * time (wrap 0 to 2^32).
725                          */
726                         count = kvm_read_c0_guest_count(cop0);
727                         compare = kvm_read_c0_guest_compare(cop0);
728                         delta = (u64)(u32)(compare - count - 1) + 1;
729                         delta = div_u64(delta * NSEC_PER_SEC,
730                                         vcpu->arch.count_hz);
731                         expire = ktime_add_ns(vcpu->arch.count_resume, delta);
732
733                         /* Handle pending interrupt */
734                         now = ktime_get();
735                         if (ktime_compare(now, expire) >= 0)
736                                 /* Nothing should be waiting on the timeout */
737                                 kvm_mips_callbacks->queue_timer_int(vcpu);
738
739                         /* Resume hrtimer without changing bias */
740                         count = kvm_mips_read_count_running(vcpu, now);
741                         kvm_mips_resume_hrtimer(vcpu, now, count);
742                 }
743         }
744
745         return 0;
746 }
747
748 /**
749  * kvm_mips_set_count_resume() - Update the count resume KVM register.
750  * @vcpu:               Virtual CPU.
751  * @count_resume:       Count resume register new value.
752  *
753  * Set the count resume KVM register.
754  *
755  * Returns:     -EINVAL if out of valid range (0..now).
756  *              0 on success.
757  */
758 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
759 {
760         /*
761          * It doesn't make sense for the resume time to be in the future, as it
762          * would be possible for the next interrupt to be more than a full
763          * period in the future.
764          */
765         if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
766                 return -EINVAL;
767
768         vcpu->arch.count_resume = ns_to_ktime(count_resume);
769         return 0;
770 }
771
772 /**
773  * kvm_mips_count_timeout() - Push timer forward on timeout.
774  * @vcpu:       Virtual CPU.
775  *
776  * Handle an hrtimer event by push the hrtimer forward a period.
777  *
778  * Returns:     The hrtimer_restart value to return to the hrtimer subsystem.
779  */
780 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
781 {
782         /* Add the Count period to the current expiry time */
783         hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
784                                vcpu->arch.count_period);
785         return HRTIMER_RESTART;
786 }
787
788 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
789 {
790         struct mips_coproc *cop0 = vcpu->arch.cop0;
791         enum emulation_result er = EMULATE_DONE;
792
793         if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
794                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
795                           kvm_read_c0_guest_epc(cop0));
796                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
797                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
798
799         } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
800                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
801                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
802         } else {
803                 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
804                         vcpu->arch.pc);
805                 er = EMULATE_FAIL;
806         }
807
808         return er;
809 }
810
811 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
812 {
813         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
814                   vcpu->arch.pending_exceptions);
815
816         ++vcpu->stat.wait_exits;
817         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
818         if (!vcpu->arch.pending_exceptions) {
819                 vcpu->arch.wait = 1;
820                 kvm_vcpu_block(vcpu);
821
822                 /*
823                  * We we are runnable, then definitely go off to user space to
824                  * check if any I/O interrupts are pending.
825                  */
826                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
827                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
828                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
829                 }
830         }
831
832         return EMULATE_DONE;
833 }
834
835 /*
836  * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
837  * we can catch this, if things ever change
838  */
839 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
840 {
841         struct mips_coproc *cop0 = vcpu->arch.cop0;
842         unsigned long pc = vcpu->arch.pc;
843
844         kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
845         return EMULATE_FAIL;
846 }
847
848 /**
849  * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
850  * @vcpu:       VCPU with changed mappings.
851  * @tlb:        TLB entry being removed.
852  *
853  * This is called to indicate a single change in guest MMU mappings, so that we
854  * can arrange TLB flushes on this and other CPUs.
855  */
856 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
857                                           struct kvm_mips_tlb *tlb)
858 {
859         int cpu, i;
860         bool user;
861
862         /* No need to flush for entries which are already invalid */
863         if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
864                 return;
865         /* User address space doesn't need flushing for KSeg2/3 changes */
866         user = tlb->tlb_hi < KVM_GUEST_KSEG0;
867
868         preempt_disable();
869
870         /*
871          * Probe the shadow host TLB for the entry being overwritten, if one
872          * matches, invalidate it
873          */
874         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
875
876         /* Invalidate the whole ASID on other CPUs */
877         cpu = smp_processor_id();
878         for_each_possible_cpu(i) {
879                 if (i == cpu)
880                         continue;
881                 if (user)
882                         vcpu->arch.guest_user_asid[i] = 0;
883                 vcpu->arch.guest_kernel_asid[i] = 0;
884         }
885
886         preempt_enable();
887 }
888
889 /* Write Guest TLB Entry @ Index */
890 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
891 {
892         struct mips_coproc *cop0 = vcpu->arch.cop0;
893         int index = kvm_read_c0_guest_index(cop0);
894         struct kvm_mips_tlb *tlb = NULL;
895         unsigned long pc = vcpu->arch.pc;
896
897         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
898                 kvm_debug("%s: illegal index: %d\n", __func__, index);
899                 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
900                           pc, index, kvm_read_c0_guest_entryhi(cop0),
901                           kvm_read_c0_guest_entrylo0(cop0),
902                           kvm_read_c0_guest_entrylo1(cop0),
903                           kvm_read_c0_guest_pagemask(cop0));
904                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
905         }
906
907         tlb = &vcpu->arch.guest_tlb[index];
908
909         kvm_mips_invalidate_guest_tlb(vcpu, tlb);
910
911         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
912         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
913         tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
914         tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
915
916         kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
917                   pc, index, kvm_read_c0_guest_entryhi(cop0),
918                   kvm_read_c0_guest_entrylo0(cop0),
919                   kvm_read_c0_guest_entrylo1(cop0),
920                   kvm_read_c0_guest_pagemask(cop0));
921
922         return EMULATE_DONE;
923 }
924
925 /* Write Guest TLB Entry @ Random Index */
926 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
927 {
928         struct mips_coproc *cop0 = vcpu->arch.cop0;
929         struct kvm_mips_tlb *tlb = NULL;
930         unsigned long pc = vcpu->arch.pc;
931         int index;
932
933         get_random_bytes(&index, sizeof(index));
934         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
935
936         tlb = &vcpu->arch.guest_tlb[index];
937
938         kvm_mips_invalidate_guest_tlb(vcpu, tlb);
939
940         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
941         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
942         tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
943         tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
944
945         kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
946                   pc, index, kvm_read_c0_guest_entryhi(cop0),
947                   kvm_read_c0_guest_entrylo0(cop0),
948                   kvm_read_c0_guest_entrylo1(cop0));
949
950         return EMULATE_DONE;
951 }
952
953 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
954 {
955         struct mips_coproc *cop0 = vcpu->arch.cop0;
956         long entryhi = kvm_read_c0_guest_entryhi(cop0);
957         unsigned long pc = vcpu->arch.pc;
958         int index = -1;
959
960         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
961
962         kvm_write_c0_guest_index(cop0, index);
963
964         kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
965                   index);
966
967         return EMULATE_DONE;
968 }
969
970 /**
971  * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
972  * @vcpu:       Virtual CPU.
973  *
974  * Finds the mask of bits which are writable in the guest's Config1 CP0
975  * register, by userland (currently read-only to the guest).
976  */
977 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
978 {
979         unsigned int mask = 0;
980
981         /* Permit FPU to be present if FPU is supported */
982         if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
983                 mask |= MIPS_CONF1_FP;
984
985         return mask;
986 }
987
988 /**
989  * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
990  * @vcpu:       Virtual CPU.
991  *
992  * Finds the mask of bits which are writable in the guest's Config3 CP0
993  * register, by userland (currently read-only to the guest).
994  */
995 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
996 {
997         /* Config4 and ULRI are optional */
998         unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
999
1000         /* Permit MSA to be present if MSA is supported */
1001         if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1002                 mask |= MIPS_CONF3_MSA;
1003
1004         return mask;
1005 }
1006
1007 /**
1008  * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1009  * @vcpu:       Virtual CPU.
1010  *
1011  * Finds the mask of bits which are writable in the guest's Config4 CP0
1012  * register, by userland (currently read-only to the guest).
1013  */
1014 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1015 {
1016         /* Config5 is optional */
1017         unsigned int mask = MIPS_CONF_M;
1018
1019         /* KScrExist */
1020         mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
1021
1022         return mask;
1023 }
1024
1025 /**
1026  * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1027  * @vcpu:       Virtual CPU.
1028  *
1029  * Finds the mask of bits which are writable in the guest's Config5 CP0
1030  * register, by the guest itself.
1031  */
1032 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1033 {
1034         unsigned int mask = 0;
1035
1036         /* Permit MSAEn changes if MSA supported and enabled */
1037         if (kvm_mips_guest_has_msa(&vcpu->arch))
1038                 mask |= MIPS_CONF5_MSAEN;
1039
1040         /*
1041          * Permit guest FPU mode changes if FPU is enabled and the relevant
1042          * feature exists according to FIR register.
1043          */
1044         if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1045                 if (cpu_has_fre)
1046                         mask |= MIPS_CONF5_FRE;
1047                 /* We don't support UFR or UFE */
1048         }
1049
1050         return mask;
1051 }
1052
1053 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1054                                            u32 *opc, u32 cause,
1055                                            struct kvm_run *run,
1056                                            struct kvm_vcpu *vcpu)
1057 {
1058         struct mips_coproc *cop0 = vcpu->arch.cop0;
1059         enum emulation_result er = EMULATE_DONE;
1060         u32 rt, rd, sel;
1061         unsigned long curr_pc;
1062         int cpu, i;
1063
1064         /*
1065          * Update PC and hold onto current PC in case there is
1066          * an error and we want to rollback the PC
1067          */
1068         curr_pc = vcpu->arch.pc;
1069         er = update_pc(vcpu, cause);
1070         if (er == EMULATE_FAIL)
1071                 return er;
1072
1073         if (inst.co_format.co) {
1074                 switch (inst.co_format.func) {
1075                 case tlbr_op:   /*  Read indexed TLB entry  */
1076                         er = kvm_mips_emul_tlbr(vcpu);
1077                         break;
1078                 case tlbwi_op:  /*  Write indexed  */
1079                         er = kvm_mips_emul_tlbwi(vcpu);
1080                         break;
1081                 case tlbwr_op:  /*  Write random  */
1082                         er = kvm_mips_emul_tlbwr(vcpu);
1083                         break;
1084                 case tlbp_op:   /* TLB Probe */
1085                         er = kvm_mips_emul_tlbp(vcpu);
1086                         break;
1087                 case rfe_op:
1088                         kvm_err("!!!COP0_RFE!!!\n");
1089                         break;
1090                 case eret_op:
1091                         er = kvm_mips_emul_eret(vcpu);
1092                         goto dont_update_pc;
1093                 case wait_op:
1094                         er = kvm_mips_emul_wait(vcpu);
1095                         break;
1096                 }
1097         } else {
1098                 rt = inst.c0r_format.rt;
1099                 rd = inst.c0r_format.rd;
1100                 sel = inst.c0r_format.sel;
1101
1102                 switch (inst.c0r_format.rs) {
1103                 case mfc_op:
1104 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1105                         cop0->stat[rd][sel]++;
1106 #endif
1107                         /* Get reg */
1108                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1109                                 vcpu->arch.gprs[rt] =
1110                                     (s32)kvm_mips_read_count(vcpu);
1111                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1112                                 vcpu->arch.gprs[rt] = 0x0;
1113 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1114                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1115 #endif
1116                         } else {
1117                                 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1118
1119 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1120                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1121 #endif
1122                         }
1123
1124                         trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1125                                       KVM_TRACE_COP0(rd, sel),
1126                                       vcpu->arch.gprs[rt]);
1127                         break;
1128
1129                 case dmfc_op:
1130                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1131
1132                         trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1133                                       KVM_TRACE_COP0(rd, sel),
1134                                       vcpu->arch.gprs[rt]);
1135                         break;
1136
1137                 case mtc_op:
1138 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1139                         cop0->stat[rd][sel]++;
1140 #endif
1141                         trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1142                                       KVM_TRACE_COP0(rd, sel),
1143                                       vcpu->arch.gprs[rt]);
1144
1145                         if ((rd == MIPS_CP0_TLB_INDEX)
1146                             && (vcpu->arch.gprs[rt] >=
1147                                 KVM_MIPS_GUEST_TLB_SIZE)) {
1148                                 kvm_err("Invalid TLB Index: %ld",
1149                                         vcpu->arch.gprs[rt]);
1150                                 er = EMULATE_FAIL;
1151                                 break;
1152                         }
1153 #define C0_EBASE_CORE_MASK 0xff
1154                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1155                                 /* Preserve CORE number */
1156                                 kvm_change_c0_guest_ebase(cop0,
1157                                                           ~(C0_EBASE_CORE_MASK),
1158                                                           vcpu->arch.gprs[rt]);
1159                                 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1160                                         kvm_read_c0_guest_ebase(cop0));
1161                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1162                                 u32 nasid =
1163                                         vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1164                                 if (((kvm_read_c0_guest_entryhi(cop0) &
1165                                       KVM_ENTRYHI_ASID) != nasid)) {
1166                                         trace_kvm_asid_change(vcpu,
1167                                                 kvm_read_c0_guest_entryhi(cop0)
1168                                                         & KVM_ENTRYHI_ASID,
1169                                                 nasid);
1170
1171                                         /*
1172                                          * Regenerate/invalidate kernel MMU
1173                                          * context.
1174                                          * The user MMU context will be
1175                                          * regenerated lazily on re-entry to
1176                                          * guest user if the guest ASID actually
1177                                          * changes.
1178                                          */
1179                                         preempt_disable();
1180                                         cpu = smp_processor_id();
1181                                         kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
1182                                                                 cpu, vcpu);
1183                                         vcpu->arch.guest_kernel_asid[cpu] =
1184                                                 vcpu->arch.guest_kernel_mm.context.asid[cpu];
1185                                         for_each_possible_cpu(i)
1186                                                 if (i != cpu)
1187                                                         vcpu->arch.guest_kernel_asid[i] = 0;
1188                                         preempt_enable();
1189                                 }
1190                                 kvm_write_c0_guest_entryhi(cop0,
1191                                                            vcpu->arch.gprs[rt]);
1192                         }
1193                         /* Are we writing to COUNT */
1194                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1195                                 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1196                                 goto done;
1197                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1198                                 /* If we are writing to COMPARE */
1199                                 /* Clear pending timer interrupt, if any */
1200                                 kvm_mips_write_compare(vcpu,
1201                                                        vcpu->arch.gprs[rt],
1202                                                        true);
1203                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1204                                 unsigned int old_val, val, change;
1205
1206                                 old_val = kvm_read_c0_guest_status(cop0);
1207                                 val = vcpu->arch.gprs[rt];
1208                                 change = val ^ old_val;
1209
1210                                 /* Make sure that the NMI bit is never set */
1211                                 val &= ~ST0_NMI;
1212
1213                                 /*
1214                                  * Don't allow CU1 or FR to be set unless FPU
1215                                  * capability enabled and exists in guest
1216                                  * configuration.
1217                                  */
1218                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1219                                         val &= ~(ST0_CU1 | ST0_FR);
1220
1221                                 /*
1222                                  * Also don't allow FR to be set if host doesn't
1223                                  * support it.
1224                                  */
1225                                 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1226                                         val &= ~ST0_FR;
1227
1228
1229                                 /* Handle changes in FPU mode */
1230                                 preempt_disable();
1231
1232                                 /*
1233                                  * FPU and Vector register state is made
1234                                  * UNPREDICTABLE by a change of FR, so don't
1235                                  * even bother saving it.
1236                                  */
1237                                 if (change & ST0_FR)
1238                                         kvm_drop_fpu(vcpu);
1239
1240                                 /*
1241                                  * If MSA state is already live, it is undefined
1242                                  * how it interacts with FR=0 FPU state, and we
1243                                  * don't want to hit reserved instruction
1244                                  * exceptions trying to save the MSA state later
1245                                  * when CU=1 && FR=1, so play it safe and save
1246                                  * it first.
1247                                  */
1248                                 if (change & ST0_CU1 && !(val & ST0_FR) &&
1249                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1250                                         kvm_lose_fpu(vcpu);
1251
1252                                 /*
1253                                  * Propagate CU1 (FPU enable) changes
1254                                  * immediately if the FPU context is already
1255                                  * loaded. When disabling we leave the context
1256                                  * loaded so it can be quickly enabled again in
1257                                  * the near future.
1258                                  */
1259                                 if (change & ST0_CU1 &&
1260                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1261                                         change_c0_status(ST0_CU1, val);
1262
1263                                 preempt_enable();
1264
1265                                 kvm_write_c0_guest_status(cop0, val);
1266
1267 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1268                                 /*
1269                                  * If FPU present, we need CU1/FR bits to take
1270                                  * effect fairly soon.
1271                                  */
1272                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1273                                         kvm_mips_trans_mtc0(inst, opc, vcpu);
1274 #endif
1275                         } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1276                                 unsigned int old_val, val, change, wrmask;
1277
1278                                 old_val = kvm_read_c0_guest_config5(cop0);
1279                                 val = vcpu->arch.gprs[rt];
1280
1281                                 /* Only a few bits are writable in Config5 */
1282                                 wrmask = kvm_mips_config5_wrmask(vcpu);
1283                                 change = (val ^ old_val) & wrmask;
1284                                 val = old_val ^ change;
1285
1286
1287                                 /* Handle changes in FPU/MSA modes */
1288                                 preempt_disable();
1289
1290                                 /*
1291                                  * Propagate FRE changes immediately if the FPU
1292                                  * context is already loaded.
1293                                  */
1294                                 if (change & MIPS_CONF5_FRE &&
1295                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1296                                         change_c0_config5(MIPS_CONF5_FRE, val);
1297
1298                                 /*
1299                                  * Propagate MSAEn changes immediately if the
1300                                  * MSA context is already loaded. When disabling
1301                                  * we leave the context loaded so it can be
1302                                  * quickly enabled again in the near future.
1303                                  */
1304                                 if (change & MIPS_CONF5_MSAEN &&
1305                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1306                                         change_c0_config5(MIPS_CONF5_MSAEN,
1307                                                           val);
1308
1309                                 preempt_enable();
1310
1311                                 kvm_write_c0_guest_config5(cop0, val);
1312                         } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1313                                 u32 old_cause, new_cause;
1314
1315                                 old_cause = kvm_read_c0_guest_cause(cop0);
1316                                 new_cause = vcpu->arch.gprs[rt];
1317                                 /* Update R/W bits */
1318                                 kvm_change_c0_guest_cause(cop0, 0x08800300,
1319                                                           new_cause);
1320                                 /* DC bit enabling/disabling timer? */
1321                                 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1322                                         if (new_cause & CAUSEF_DC)
1323                                                 kvm_mips_count_disable_cause(vcpu);
1324                                         else
1325                                                 kvm_mips_count_enable_cause(vcpu);
1326                                 }
1327                         } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1328                                 u32 mask = MIPS_HWRENA_CPUNUM |
1329                                            MIPS_HWRENA_SYNCISTEP |
1330                                            MIPS_HWRENA_CC |
1331                                            MIPS_HWRENA_CCRES;
1332
1333                                 if (kvm_read_c0_guest_config3(cop0) &
1334                                     MIPS_CONF3_ULRI)
1335                                         mask |= MIPS_HWRENA_ULR;
1336                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1337                         } else {
1338                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1339 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1340                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
1341 #endif
1342                         }
1343                         break;
1344
1345                 case dmtc_op:
1346                         kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1347                                 vcpu->arch.pc, rt, rd, sel);
1348                         trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1349                                       KVM_TRACE_COP0(rd, sel),
1350                                       vcpu->arch.gprs[rt]);
1351                         er = EMULATE_FAIL;
1352                         break;
1353
1354                 case mfmc0_op:
1355 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1356                         cop0->stat[MIPS_CP0_STATUS][0]++;
1357 #endif
1358                         if (rt != 0)
1359                                 vcpu->arch.gprs[rt] =
1360                                     kvm_read_c0_guest_status(cop0);
1361                         /* EI */
1362                         if (inst.mfmc0_format.sc) {
1363                                 kvm_debug("[%#lx] mfmc0_op: EI\n",
1364                                           vcpu->arch.pc);
1365                                 kvm_set_c0_guest_status(cop0, ST0_IE);
1366                         } else {
1367                                 kvm_debug("[%#lx] mfmc0_op: DI\n",
1368                                           vcpu->arch.pc);
1369                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
1370                         }
1371
1372                         break;
1373
1374                 case wrpgpr_op:
1375                         {
1376                                 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1377                                 u32 pss =
1378                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1379                                 /*
1380                                  * We don't support any shadow register sets, so
1381                                  * SRSCtl[PSS] == SRSCtl[CSS] = 0
1382                                  */
1383                                 if (css || pss) {
1384                                         er = EMULATE_FAIL;
1385                                         break;
1386                                 }
1387                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1388                                           vcpu->arch.gprs[rt]);
1389                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1390                         }
1391                         break;
1392                 default:
1393                         kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1394                                 vcpu->arch.pc, inst.c0r_format.rs);
1395                         er = EMULATE_FAIL;
1396                         break;
1397                 }
1398         }
1399
1400 done:
1401         /* Rollback PC only if emulation was unsuccessful */
1402         if (er == EMULATE_FAIL)
1403                 vcpu->arch.pc = curr_pc;
1404
1405 dont_update_pc:
1406         /*
1407          * This is for special instructions whose emulation
1408          * updates the PC, so do not overwrite the PC under
1409          * any circumstances
1410          */
1411
1412         return er;
1413 }
1414
1415 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1416                                              u32 cause,
1417                                              struct kvm_run *run,
1418                                              struct kvm_vcpu *vcpu)
1419 {
1420         enum emulation_result er = EMULATE_DO_MMIO;
1421         u32 rt;
1422         u32 bytes;
1423         void *data = run->mmio.data;
1424         unsigned long curr_pc;
1425
1426         /*
1427          * Update PC and hold onto current PC in case there is
1428          * an error and we want to rollback the PC
1429          */
1430         curr_pc = vcpu->arch.pc;
1431         er = update_pc(vcpu, cause);
1432         if (er == EMULATE_FAIL)
1433                 return er;
1434
1435         rt = inst.i_format.rt;
1436
1437         switch (inst.i_format.opcode) {
1438         case sb_op:
1439                 bytes = 1;
1440                 if (bytes > sizeof(run->mmio.data)) {
1441                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1442                                run->mmio.len);
1443                 }
1444                 run->mmio.phys_addr =
1445                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1446                                                    host_cp0_badvaddr);
1447                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1448                         er = EMULATE_FAIL;
1449                         break;
1450                 }
1451                 run->mmio.len = bytes;
1452                 run->mmio.is_write = 1;
1453                 vcpu->mmio_needed = 1;
1454                 vcpu->mmio_is_write = 1;
1455                 *(u8 *) data = vcpu->arch.gprs[rt];
1456                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1457                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1458                           *(u8 *) data);
1459
1460                 break;
1461
1462         case sw_op:
1463                 bytes = 4;
1464                 if (bytes > sizeof(run->mmio.data)) {
1465                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1466                                run->mmio.len);
1467                 }
1468                 run->mmio.phys_addr =
1469                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1470                                                    host_cp0_badvaddr);
1471                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1472                         er = EMULATE_FAIL;
1473                         break;
1474                 }
1475
1476                 run->mmio.len = bytes;
1477                 run->mmio.is_write = 1;
1478                 vcpu->mmio_needed = 1;
1479                 vcpu->mmio_is_write = 1;
1480                 *(u32 *) data = vcpu->arch.gprs[rt];
1481
1482                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1483                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1484                           vcpu->arch.gprs[rt], *(u32 *) data);
1485                 break;
1486
1487         case sh_op:
1488                 bytes = 2;
1489                 if (bytes > sizeof(run->mmio.data)) {
1490                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1491                                run->mmio.len);
1492                 }
1493                 run->mmio.phys_addr =
1494                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1495                                                    host_cp0_badvaddr);
1496                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1497                         er = EMULATE_FAIL;
1498                         break;
1499                 }
1500
1501                 run->mmio.len = bytes;
1502                 run->mmio.is_write = 1;
1503                 vcpu->mmio_needed = 1;
1504                 vcpu->mmio_is_write = 1;
1505                 *(u16 *) data = vcpu->arch.gprs[rt];
1506
1507                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1508                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1509                           vcpu->arch.gprs[rt], *(u32 *) data);
1510                 break;
1511
1512         default:
1513                 kvm_err("Store not yet supported (inst=0x%08x)\n",
1514                         inst.word);
1515                 er = EMULATE_FAIL;
1516                 break;
1517         }
1518
1519         /* Rollback PC if emulation was unsuccessful */
1520         if (er == EMULATE_FAIL)
1521                 vcpu->arch.pc = curr_pc;
1522
1523         return er;
1524 }
1525
1526 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1527                                             u32 cause, struct kvm_run *run,
1528                                             struct kvm_vcpu *vcpu)
1529 {
1530         enum emulation_result er = EMULATE_DO_MMIO;
1531         u32 op, rt;
1532         u32 bytes;
1533
1534         rt = inst.i_format.rt;
1535         op = inst.i_format.opcode;
1536
1537         vcpu->arch.pending_load_cause = cause;
1538         vcpu->arch.io_gpr = rt;
1539
1540         switch (op) {
1541         case lw_op:
1542                 bytes = 4;
1543                 if (bytes > sizeof(run->mmio.data)) {
1544                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1545                                run->mmio.len);
1546                         er = EMULATE_FAIL;
1547                         break;
1548                 }
1549                 run->mmio.phys_addr =
1550                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1551                                                    host_cp0_badvaddr);
1552                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1553                         er = EMULATE_FAIL;
1554                         break;
1555                 }
1556
1557                 run->mmio.len = bytes;
1558                 run->mmio.is_write = 0;
1559                 vcpu->mmio_needed = 1;
1560                 vcpu->mmio_is_write = 0;
1561                 break;
1562
1563         case lh_op:
1564         case lhu_op:
1565                 bytes = 2;
1566                 if (bytes > sizeof(run->mmio.data)) {
1567                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1568                                run->mmio.len);
1569                         er = EMULATE_FAIL;
1570                         break;
1571                 }
1572                 run->mmio.phys_addr =
1573                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1574                                                    host_cp0_badvaddr);
1575                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1576                         er = EMULATE_FAIL;
1577                         break;
1578                 }
1579
1580                 run->mmio.len = bytes;
1581                 run->mmio.is_write = 0;
1582                 vcpu->mmio_needed = 1;
1583                 vcpu->mmio_is_write = 0;
1584
1585                 if (op == lh_op)
1586                         vcpu->mmio_needed = 2;
1587                 else
1588                         vcpu->mmio_needed = 1;
1589
1590                 break;
1591
1592         case lbu_op:
1593         case lb_op:
1594                 bytes = 1;
1595                 if (bytes > sizeof(run->mmio.data)) {
1596                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1597                                run->mmio.len);
1598                         er = EMULATE_FAIL;
1599                         break;
1600                 }
1601                 run->mmio.phys_addr =
1602                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1603                                                    host_cp0_badvaddr);
1604                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1605                         er = EMULATE_FAIL;
1606                         break;
1607                 }
1608
1609                 run->mmio.len = bytes;
1610                 run->mmio.is_write = 0;
1611                 vcpu->mmio_is_write = 0;
1612
1613                 if (op == lb_op)
1614                         vcpu->mmio_needed = 2;
1615                 else
1616                         vcpu->mmio_needed = 1;
1617
1618                 break;
1619
1620         default:
1621                 kvm_err("Load not yet supported (inst=0x%08x)\n",
1622                         inst.word);
1623                 er = EMULATE_FAIL;
1624                 break;
1625         }
1626
1627         return er;
1628 }
1629
1630 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1631                                              u32 *opc, u32 cause,
1632                                              struct kvm_run *run,
1633                                              struct kvm_vcpu *vcpu)
1634 {
1635         struct mips_coproc *cop0 = vcpu->arch.cop0;
1636         enum emulation_result er = EMULATE_DONE;
1637         u32 cache, op_inst, op, base;
1638         s16 offset;
1639         struct kvm_vcpu_arch *arch = &vcpu->arch;
1640         unsigned long va;
1641         unsigned long curr_pc;
1642
1643         /*
1644          * Update PC and hold onto current PC in case there is
1645          * an error and we want to rollback the PC
1646          */
1647         curr_pc = vcpu->arch.pc;
1648         er = update_pc(vcpu, cause);
1649         if (er == EMULATE_FAIL)
1650                 return er;
1651
1652         base = inst.i_format.rs;
1653         op_inst = inst.i_format.rt;
1654         if (cpu_has_mips_r6)
1655                 offset = inst.spec3_format.simmediate;
1656         else
1657                 offset = inst.i_format.simmediate;
1658         cache = op_inst & CacheOp_Cache;
1659         op = op_inst & CacheOp_Op;
1660
1661         va = arch->gprs[base] + offset;
1662
1663         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1664                   cache, op, base, arch->gprs[base], offset);
1665
1666         /*
1667          * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1668          * invalidate the caches entirely by stepping through all the
1669          * ways/indexes
1670          */
1671         if (op == Index_Writeback_Inv) {
1672                 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1673                           vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1674                           arch->gprs[base], offset);
1675
1676                 if (cache == Cache_D)
1677                         r4k_blast_dcache();
1678                 else if (cache == Cache_I)
1679                         r4k_blast_icache();
1680                 else {
1681                         kvm_err("%s: unsupported CACHE INDEX operation\n",
1682                                 __func__);
1683                         return EMULATE_FAIL;
1684                 }
1685
1686 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1687                 kvm_mips_trans_cache_index(inst, opc, vcpu);
1688 #endif
1689                 goto done;
1690         }
1691
1692         preempt_disable();
1693         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1694                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1695                     kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1696                         kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1697                                 __func__, va, vcpu, read_c0_entryhi());
1698                         er = EMULATE_FAIL;
1699                         preempt_enable();
1700                         goto done;
1701                 }
1702         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1703                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1704                 int index;
1705
1706                 /* If an entry already exists then skip */
1707                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1708                         goto skip_fault;
1709
1710                 /*
1711                  * If address not in the guest TLB, then give the guest a fault,
1712                  * the resulting handler will do the right thing
1713                  */
1714                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1715                                                   (kvm_read_c0_guest_entryhi
1716                                                    (cop0) & KVM_ENTRYHI_ASID));
1717
1718                 if (index < 0) {
1719                         vcpu->arch.host_cp0_badvaddr = va;
1720                         vcpu->arch.pc = curr_pc;
1721                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1722                                                          vcpu);
1723                         preempt_enable();
1724                         goto dont_update_pc;
1725                 } else {
1726                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1727                         /*
1728                          * Check if the entry is valid, if not then setup a TLB
1729                          * invalid exception to the guest
1730                          */
1731                         if (!TLB_IS_VALID(*tlb, va)) {
1732                                 vcpu->arch.host_cp0_badvaddr = va;
1733                                 vcpu->arch.pc = curr_pc;
1734                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1735                                                                 run, vcpu);
1736                                 preempt_enable();
1737                                 goto dont_update_pc;
1738                         }
1739                         /*
1740                          * We fault an entry from the guest tlb to the
1741                          * shadow host TLB
1742                          */
1743                         if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1744                                 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1745                                         __func__, va, index, vcpu,
1746                                         read_c0_entryhi());
1747                                 er = EMULATE_FAIL;
1748                                 preempt_enable();
1749                                 goto done;
1750                         }
1751                 }
1752         } else {
1753                 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1754                         cache, op, base, arch->gprs[base], offset);
1755                 er = EMULATE_FAIL;
1756                 preempt_enable();
1757                 goto done;
1758
1759         }
1760
1761 skip_fault:
1762         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1763         if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1764                 flush_dcache_line(va);
1765
1766 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1767                 /*
1768                  * Replace the CACHE instruction, with a SYNCI, not the same,
1769                  * but avoids a trap
1770                  */
1771                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1772 #endif
1773         } else if (op_inst == Hit_Invalidate_I) {
1774                 flush_dcache_line(va);
1775                 flush_icache_line(va);
1776
1777 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1778                 /* Replace the CACHE instruction, with a SYNCI */
1779                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1780 #endif
1781         } else {
1782                 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1783                         cache, op, base, arch->gprs[base], offset);
1784                 er = EMULATE_FAIL;
1785         }
1786
1787         preempt_enable();
1788 done:
1789         /* Rollback PC only if emulation was unsuccessful */
1790         if (er == EMULATE_FAIL)
1791                 vcpu->arch.pc = curr_pc;
1792
1793 dont_update_pc:
1794         /*
1795          * This is for exceptions whose emulation updates the PC, so do not
1796          * overwrite the PC under any circumstances
1797          */
1798
1799         return er;
1800 }
1801
1802 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1803                                             struct kvm_run *run,
1804                                             struct kvm_vcpu *vcpu)
1805 {
1806         union mips_instruction inst;
1807         enum emulation_result er = EMULATE_DONE;
1808
1809         /* Fetch the instruction. */
1810         if (cause & CAUSEF_BD)
1811                 opc += 1;
1812
1813         inst.word = kvm_get_inst(opc, vcpu);
1814
1815         switch (inst.r_format.opcode) {
1816         case cop0_op:
1817                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1818                 break;
1819         case sb_op:
1820         case sh_op:
1821         case sw_op:
1822                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1823                 break;
1824         case lb_op:
1825         case lbu_op:
1826         case lhu_op:
1827         case lh_op:
1828         case lw_op:
1829                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1830                 break;
1831
1832 #ifndef CONFIG_CPU_MIPSR6
1833         case cache_op:
1834                 ++vcpu->stat.cache_exits;
1835                 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1836                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1837                 break;
1838 #else
1839         case spec3_op:
1840                 switch (inst.spec3_format.func) {
1841                 case cache6_op:
1842                         ++vcpu->stat.cache_exits;
1843                         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1844                         er = kvm_mips_emulate_cache(inst, opc, cause, run,
1845                                                     vcpu);
1846                         break;
1847                 default:
1848                         goto unknown;
1849                 };
1850                 break;
1851 unknown:
1852 #endif
1853
1854         default:
1855                 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1856                         inst.word);
1857                 kvm_arch_vcpu_dump_regs(vcpu);
1858                 er = EMULATE_FAIL;
1859                 break;
1860         }
1861
1862         return er;
1863 }
1864
1865 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1866                                                u32 *opc,
1867                                                struct kvm_run *run,
1868                                                struct kvm_vcpu *vcpu)
1869 {
1870         struct mips_coproc *cop0 = vcpu->arch.cop0;
1871         struct kvm_vcpu_arch *arch = &vcpu->arch;
1872         enum emulation_result er = EMULATE_DONE;
1873
1874         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1875                 /* save old pc */
1876                 kvm_write_c0_guest_epc(cop0, arch->pc);
1877                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1878
1879                 if (cause & CAUSEF_BD)
1880                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1881                 else
1882                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1883
1884                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1885
1886                 kvm_change_c0_guest_cause(cop0, (0xff),
1887                                           (EXCCODE_SYS << CAUSEB_EXCCODE));
1888
1889                 /* Set PC to the exception entry point */
1890                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1891
1892         } else {
1893                 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1894                 er = EMULATE_FAIL;
1895         }
1896
1897         return er;
1898 }
1899
1900 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1901                                                   u32 *opc,
1902                                                   struct kvm_run *run,
1903                                                   struct kvm_vcpu *vcpu)
1904 {
1905         struct mips_coproc *cop0 = vcpu->arch.cop0;
1906         struct kvm_vcpu_arch *arch = &vcpu->arch;
1907         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1908                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1909
1910         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1911                 /* save old pc */
1912                 kvm_write_c0_guest_epc(cop0, arch->pc);
1913                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1914
1915                 if (cause & CAUSEF_BD)
1916                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1917                 else
1918                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1919
1920                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1921                           arch->pc);
1922
1923                 /* set pc to the exception entry point */
1924                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1925
1926         } else {
1927                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1928                           arch->pc);
1929
1930                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1931         }
1932
1933         kvm_change_c0_guest_cause(cop0, (0xff),
1934                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1935
1936         /* setup badvaddr, context and entryhi registers for the guest */
1937         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1938         /* XXXKYMA: is the context register used by linux??? */
1939         kvm_write_c0_guest_entryhi(cop0, entryhi);
1940         /* Blow away the shadow host TLBs */
1941         kvm_mips_flush_host_tlb(1);
1942
1943         return EMULATE_DONE;
1944 }
1945
1946 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1947                                                  u32 *opc,
1948                                                  struct kvm_run *run,
1949                                                  struct kvm_vcpu *vcpu)
1950 {
1951         struct mips_coproc *cop0 = vcpu->arch.cop0;
1952         struct kvm_vcpu_arch *arch = &vcpu->arch;
1953         unsigned long entryhi =
1954                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1955                 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1956
1957         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1958                 /* save old pc */
1959                 kvm_write_c0_guest_epc(cop0, arch->pc);
1960                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1961
1962                 if (cause & CAUSEF_BD)
1963                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1964                 else
1965                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1966
1967                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1968                           arch->pc);
1969
1970                 /* set pc to the exception entry point */
1971                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1972
1973         } else {
1974                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1975                           arch->pc);
1976                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1977         }
1978
1979         kvm_change_c0_guest_cause(cop0, (0xff),
1980                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1981
1982         /* setup badvaddr, context and entryhi registers for the guest */
1983         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1984         /* XXXKYMA: is the context register used by linux??? */
1985         kvm_write_c0_guest_entryhi(cop0, entryhi);
1986         /* Blow away the shadow host TLBs */
1987         kvm_mips_flush_host_tlb(1);
1988
1989         return EMULATE_DONE;
1990 }
1991
1992 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1993                                                   u32 *opc,
1994                                                   struct kvm_run *run,
1995                                                   struct kvm_vcpu *vcpu)
1996 {
1997         struct mips_coproc *cop0 = vcpu->arch.cop0;
1998         struct kvm_vcpu_arch *arch = &vcpu->arch;
1999         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2000                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2001
2002         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2003                 /* save old pc */
2004                 kvm_write_c0_guest_epc(cop0, arch->pc);
2005                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2006
2007                 if (cause & CAUSEF_BD)
2008                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2009                 else
2010                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2011
2012                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2013                           arch->pc);
2014
2015                 /* Set PC to the exception entry point */
2016                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
2017         } else {
2018                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2019                           arch->pc);
2020                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2021         }
2022
2023         kvm_change_c0_guest_cause(cop0, (0xff),
2024                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
2025
2026         /* setup badvaddr, context and entryhi registers for the guest */
2027         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2028         /* XXXKYMA: is the context register used by linux??? */
2029         kvm_write_c0_guest_entryhi(cop0, entryhi);
2030         /* Blow away the shadow host TLBs */
2031         kvm_mips_flush_host_tlb(1);
2032
2033         return EMULATE_DONE;
2034 }
2035
2036 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2037                                                  u32 *opc,
2038                                                  struct kvm_run *run,
2039                                                  struct kvm_vcpu *vcpu)
2040 {
2041         struct mips_coproc *cop0 = vcpu->arch.cop0;
2042         struct kvm_vcpu_arch *arch = &vcpu->arch;
2043         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2044                 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2045
2046         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2047                 /* save old pc */
2048                 kvm_write_c0_guest_epc(cop0, arch->pc);
2049                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2050
2051                 if (cause & CAUSEF_BD)
2052                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2053                 else
2054                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2055
2056                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2057                           arch->pc);
2058
2059                 /* Set PC to the exception entry point */
2060                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2061         } else {
2062                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2063                           arch->pc);
2064                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2065         }
2066
2067         kvm_change_c0_guest_cause(cop0, (0xff),
2068                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
2069
2070         /* setup badvaddr, context and entryhi registers for the guest */
2071         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2072         /* XXXKYMA: is the context register used by linux??? */
2073         kvm_write_c0_guest_entryhi(cop0, entryhi);
2074         /* Blow away the shadow host TLBs */
2075         kvm_mips_flush_host_tlb(1);
2076
2077         return EMULATE_DONE;
2078 }
2079
2080 /* TLBMOD: store into address matching TLB with Dirty bit off */
2081 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2082                                              struct kvm_run *run,
2083                                              struct kvm_vcpu *vcpu)
2084 {
2085         enum emulation_result er = EMULATE_DONE;
2086 #ifdef DEBUG
2087         struct mips_coproc *cop0 = vcpu->arch.cop0;
2088         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2089                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2090         int index;
2091
2092         /* If address not in the guest TLB, then we are in trouble */
2093         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2094         if (index < 0) {
2095                 /* XXXKYMA Invalidate and retry */
2096                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2097                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2098                      __func__, entryhi);
2099                 kvm_mips_dump_guest_tlbs(vcpu);
2100                 kvm_mips_dump_host_tlbs();
2101                 return EMULATE_FAIL;
2102         }
2103 #endif
2104
2105         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2106         return er;
2107 }
2108
2109 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2110                                               u32 *opc,
2111                                               struct kvm_run *run,
2112                                               struct kvm_vcpu *vcpu)
2113 {
2114         struct mips_coproc *cop0 = vcpu->arch.cop0;
2115         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2116                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2117         struct kvm_vcpu_arch *arch = &vcpu->arch;
2118
2119         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2120                 /* save old pc */
2121                 kvm_write_c0_guest_epc(cop0, arch->pc);
2122                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2123
2124                 if (cause & CAUSEF_BD)
2125                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2126                 else
2127                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2128
2129                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2130                           arch->pc);
2131
2132                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2133         } else {
2134                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2135                           arch->pc);
2136                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2137         }
2138
2139         kvm_change_c0_guest_cause(cop0, (0xff),
2140                                   (EXCCODE_MOD << CAUSEB_EXCCODE));
2141
2142         /* setup badvaddr, context and entryhi registers for the guest */
2143         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2144         /* XXXKYMA: is the context register used by linux??? */
2145         kvm_write_c0_guest_entryhi(cop0, entryhi);
2146         /* Blow away the shadow host TLBs */
2147         kvm_mips_flush_host_tlb(1);
2148
2149         return EMULATE_DONE;
2150 }
2151
2152 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2153                                                u32 *opc,
2154                                                struct kvm_run *run,
2155                                                struct kvm_vcpu *vcpu)
2156 {
2157         struct mips_coproc *cop0 = vcpu->arch.cop0;
2158         struct kvm_vcpu_arch *arch = &vcpu->arch;
2159
2160         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2161                 /* save old pc */
2162                 kvm_write_c0_guest_epc(cop0, arch->pc);
2163                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2164
2165                 if (cause & CAUSEF_BD)
2166                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2167                 else
2168                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2169
2170         }
2171
2172         arch->pc = KVM_GUEST_KSEG0 + 0x180;
2173
2174         kvm_change_c0_guest_cause(cop0, (0xff),
2175                                   (EXCCODE_CPU << CAUSEB_EXCCODE));
2176         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2177
2178         return EMULATE_DONE;
2179 }
2180
2181 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2182                                               u32 *opc,
2183                                               struct kvm_run *run,
2184                                               struct kvm_vcpu *vcpu)
2185 {
2186         struct mips_coproc *cop0 = vcpu->arch.cop0;
2187         struct kvm_vcpu_arch *arch = &vcpu->arch;
2188         enum emulation_result er = EMULATE_DONE;
2189
2190         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2191                 /* save old pc */
2192                 kvm_write_c0_guest_epc(cop0, arch->pc);
2193                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2194
2195                 if (cause & CAUSEF_BD)
2196                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2197                 else
2198                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2199
2200                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2201
2202                 kvm_change_c0_guest_cause(cop0, (0xff),
2203                                           (EXCCODE_RI << CAUSEB_EXCCODE));
2204
2205                 /* Set PC to the exception entry point */
2206                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2207
2208         } else {
2209                 kvm_err("Trying to deliver RI when EXL is already set\n");
2210                 er = EMULATE_FAIL;
2211         }
2212
2213         return er;
2214 }
2215
2216 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2217                                               u32 *opc,
2218                                               struct kvm_run *run,
2219                                               struct kvm_vcpu *vcpu)
2220 {
2221         struct mips_coproc *cop0 = vcpu->arch.cop0;
2222         struct kvm_vcpu_arch *arch = &vcpu->arch;
2223         enum emulation_result er = EMULATE_DONE;
2224
2225         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2226                 /* save old pc */
2227                 kvm_write_c0_guest_epc(cop0, arch->pc);
2228                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2229
2230                 if (cause & CAUSEF_BD)
2231                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2232                 else
2233                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2234
2235                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2236
2237                 kvm_change_c0_guest_cause(cop0, (0xff),
2238                                           (EXCCODE_BP << CAUSEB_EXCCODE));
2239
2240                 /* Set PC to the exception entry point */
2241                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2242
2243         } else {
2244                 kvm_err("Trying to deliver BP when EXL is already set\n");
2245                 er = EMULATE_FAIL;
2246         }
2247
2248         return er;
2249 }
2250
2251 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2252                                                 u32 *opc,
2253                                                 struct kvm_run *run,
2254                                                 struct kvm_vcpu *vcpu)
2255 {
2256         struct mips_coproc *cop0 = vcpu->arch.cop0;
2257         struct kvm_vcpu_arch *arch = &vcpu->arch;
2258         enum emulation_result er = EMULATE_DONE;
2259
2260         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2261                 /* save old pc */
2262                 kvm_write_c0_guest_epc(cop0, arch->pc);
2263                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2264
2265                 if (cause & CAUSEF_BD)
2266                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2267                 else
2268                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2269
2270                 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2271
2272                 kvm_change_c0_guest_cause(cop0, (0xff),
2273                                           (EXCCODE_TR << CAUSEB_EXCCODE));
2274
2275                 /* Set PC to the exception entry point */
2276                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2277
2278         } else {
2279                 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2280                 er = EMULATE_FAIL;
2281         }
2282
2283         return er;
2284 }
2285
2286 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2287                                                   u32 *opc,
2288                                                   struct kvm_run *run,
2289                                                   struct kvm_vcpu *vcpu)
2290 {
2291         struct mips_coproc *cop0 = vcpu->arch.cop0;
2292         struct kvm_vcpu_arch *arch = &vcpu->arch;
2293         enum emulation_result er = EMULATE_DONE;
2294
2295         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2296                 /* save old pc */
2297                 kvm_write_c0_guest_epc(cop0, arch->pc);
2298                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2299
2300                 if (cause & CAUSEF_BD)
2301                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2302                 else
2303                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2304
2305                 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2306
2307                 kvm_change_c0_guest_cause(cop0, (0xff),
2308                                           (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2309
2310                 /* Set PC to the exception entry point */
2311                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2312
2313         } else {
2314                 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2315                 er = EMULATE_FAIL;
2316         }
2317
2318         return er;
2319 }
2320
2321 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2322                                                u32 *opc,
2323                                                struct kvm_run *run,
2324                                                struct kvm_vcpu *vcpu)
2325 {
2326         struct mips_coproc *cop0 = vcpu->arch.cop0;
2327         struct kvm_vcpu_arch *arch = &vcpu->arch;
2328         enum emulation_result er = EMULATE_DONE;
2329
2330         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2331                 /* save old pc */
2332                 kvm_write_c0_guest_epc(cop0, arch->pc);
2333                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2334
2335                 if (cause & CAUSEF_BD)
2336                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2337                 else
2338                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2339
2340                 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2341
2342                 kvm_change_c0_guest_cause(cop0, (0xff),
2343                                           (EXCCODE_FPE << CAUSEB_EXCCODE));
2344
2345                 /* Set PC to the exception entry point */
2346                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2347
2348         } else {
2349                 kvm_err("Trying to deliver FPE when EXL is already set\n");
2350                 er = EMULATE_FAIL;
2351         }
2352
2353         return er;
2354 }
2355
2356 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2357                                                   u32 *opc,
2358                                                   struct kvm_run *run,
2359                                                   struct kvm_vcpu *vcpu)
2360 {
2361         struct mips_coproc *cop0 = vcpu->arch.cop0;
2362         struct kvm_vcpu_arch *arch = &vcpu->arch;
2363         enum emulation_result er = EMULATE_DONE;
2364
2365         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2366                 /* save old pc */
2367                 kvm_write_c0_guest_epc(cop0, arch->pc);
2368                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2369
2370                 if (cause & CAUSEF_BD)
2371                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2372                 else
2373                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2374
2375                 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2376
2377                 kvm_change_c0_guest_cause(cop0, (0xff),
2378                                           (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2379
2380                 /* Set PC to the exception entry point */
2381                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2382
2383         } else {
2384                 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2385                 er = EMULATE_FAIL;
2386         }
2387
2388         return er;
2389 }
2390
2391 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2392                                          struct kvm_run *run,
2393                                          struct kvm_vcpu *vcpu)
2394 {
2395         struct mips_coproc *cop0 = vcpu->arch.cop0;
2396         struct kvm_vcpu_arch *arch = &vcpu->arch;
2397         enum emulation_result er = EMULATE_DONE;
2398         unsigned long curr_pc;
2399         union mips_instruction inst;
2400
2401         /*
2402          * Update PC and hold onto current PC in case there is
2403          * an error and we want to rollback the PC
2404          */
2405         curr_pc = vcpu->arch.pc;
2406         er = update_pc(vcpu, cause);
2407         if (er == EMULATE_FAIL)
2408                 return er;
2409
2410         /* Fetch the instruction. */
2411         if (cause & CAUSEF_BD)
2412                 opc += 1;
2413
2414         inst.word = kvm_get_inst(opc, vcpu);
2415
2416         if (inst.word == KVM_INVALID_INST) {
2417                 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2418                 return EMULATE_FAIL;
2419         }
2420
2421         if (inst.r_format.opcode == spec3_op &&
2422             inst.r_format.func == rdhwr_op &&
2423             inst.r_format.rs == 0 &&
2424             (inst.r_format.re >> 3) == 0) {
2425                 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2426                 int rd = inst.r_format.rd;
2427                 int rt = inst.r_format.rt;
2428                 int sel = inst.r_format.re & 0x7;
2429
2430                 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2431                 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2432                         kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2433                                   rd, opc);
2434                         goto emulate_ri;
2435                 }
2436                 switch (rd) {
2437                 case MIPS_HWR_CPUNUM:           /* CPU number */
2438                         arch->gprs[rt] = vcpu->vcpu_id;
2439                         break;
2440                 case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
2441                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2442                                              current_cpu_data.icache.linesz);
2443                         break;
2444                 case MIPS_HWR_CC:               /* Read count register */
2445                         arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2446                         break;
2447                 case MIPS_HWR_CCRES:            /* Count register resolution */
2448                         switch (current_cpu_data.cputype) {
2449                         case CPU_20KC:
2450                         case CPU_25KF:
2451                                 arch->gprs[rt] = 1;
2452                                 break;
2453                         default:
2454                                 arch->gprs[rt] = 2;
2455                         }
2456                         break;
2457                 case MIPS_HWR_ULR:              /* Read UserLocal register */
2458                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2459                         break;
2460
2461                 default:
2462                         kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2463                         goto emulate_ri;
2464                 }
2465
2466                 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2467                               vcpu->arch.gprs[rt]);
2468         } else {
2469                 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2470                           opc, inst.word);
2471                 goto emulate_ri;
2472         }
2473
2474         return EMULATE_DONE;
2475
2476 emulate_ri:
2477         /*
2478          * Rollback PC (if in branch delay slot then the PC already points to
2479          * branch target), and pass the RI exception to the guest OS.
2480          */
2481         vcpu->arch.pc = curr_pc;
2482         return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2483 }
2484
2485 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2486                                                   struct kvm_run *run)
2487 {
2488         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2489         enum emulation_result er = EMULATE_DONE;
2490
2491         if (run->mmio.len > sizeof(*gpr)) {
2492                 kvm_err("Bad MMIO length: %d", run->mmio.len);
2493                 er = EMULATE_FAIL;
2494                 goto done;
2495         }
2496
2497         er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2498         if (er == EMULATE_FAIL)
2499                 return er;
2500
2501         switch (run->mmio.len) {
2502         case 4:
2503                 *gpr = *(s32 *) run->mmio.data;
2504                 break;
2505
2506         case 2:
2507                 if (vcpu->mmio_needed == 2)
2508                         *gpr = *(s16 *) run->mmio.data;
2509                 else
2510                         *gpr = *(u16 *)run->mmio.data;
2511
2512                 break;
2513         case 1:
2514                 if (vcpu->mmio_needed == 2)
2515                         *gpr = *(s8 *) run->mmio.data;
2516                 else
2517                         *gpr = *(u8 *) run->mmio.data;
2518                 break;
2519         }
2520
2521         if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2522                 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2523                           vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2524                           vcpu->mmio_needed);
2525
2526 done:
2527         return er;
2528 }
2529
2530 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2531                                                   u32 *opc,
2532                                                   struct kvm_run *run,
2533                                                   struct kvm_vcpu *vcpu)
2534 {
2535         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2536         struct mips_coproc *cop0 = vcpu->arch.cop0;
2537         struct kvm_vcpu_arch *arch = &vcpu->arch;
2538         enum emulation_result er = EMULATE_DONE;
2539
2540         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2541                 /* save old pc */
2542                 kvm_write_c0_guest_epc(cop0, arch->pc);
2543                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2544
2545                 if (cause & CAUSEF_BD)
2546                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2547                 else
2548                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2549
2550                 kvm_change_c0_guest_cause(cop0, (0xff),
2551                                           (exccode << CAUSEB_EXCCODE));
2552
2553                 /* Set PC to the exception entry point */
2554                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2555                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2556
2557                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2558                           exccode, kvm_read_c0_guest_epc(cop0),
2559                           kvm_read_c0_guest_badvaddr(cop0));
2560         } else {
2561                 kvm_err("Trying to deliver EXC when EXL is already set\n");
2562                 er = EMULATE_FAIL;
2563         }
2564
2565         return er;
2566 }
2567
2568 enum emulation_result kvm_mips_check_privilege(u32 cause,
2569                                                u32 *opc,
2570                                                struct kvm_run *run,
2571                                                struct kvm_vcpu *vcpu)
2572 {
2573         enum emulation_result er = EMULATE_DONE;
2574         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2575         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2576
2577         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2578
2579         if (usermode) {
2580                 switch (exccode) {
2581                 case EXCCODE_INT:
2582                 case EXCCODE_SYS:
2583                 case EXCCODE_BP:
2584                 case EXCCODE_RI:
2585                 case EXCCODE_TR:
2586                 case EXCCODE_MSAFPE:
2587                 case EXCCODE_FPE:
2588                 case EXCCODE_MSADIS:
2589                         break;
2590
2591                 case EXCCODE_CPU:
2592                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2593                                 er = EMULATE_PRIV_FAIL;
2594                         break;
2595
2596                 case EXCCODE_MOD:
2597                         break;
2598
2599                 case EXCCODE_TLBL:
2600                         /*
2601                          * We we are accessing Guest kernel space, then send an
2602                          * address error exception to the guest
2603                          */
2604                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2605                                 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2606                                           badvaddr);
2607                                 cause &= ~0xff;
2608                                 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2609                                 er = EMULATE_PRIV_FAIL;
2610                         }
2611                         break;
2612
2613                 case EXCCODE_TLBS:
2614                         /*
2615                          * We we are accessing Guest kernel space, then send an
2616                          * address error exception to the guest
2617                          */
2618                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2619                                 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2620                                           badvaddr);
2621                                 cause &= ~0xff;
2622                                 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2623                                 er = EMULATE_PRIV_FAIL;
2624                         }
2625                         break;
2626
2627                 case EXCCODE_ADES:
2628                         kvm_debug("%s: address error ST @ %#lx\n", __func__,
2629                                   badvaddr);
2630                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2631                                 cause &= ~0xff;
2632                                 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2633                         }
2634                         er = EMULATE_PRIV_FAIL;
2635                         break;
2636                 case EXCCODE_ADEL:
2637                         kvm_debug("%s: address error LD @ %#lx\n", __func__,
2638                                   badvaddr);
2639                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2640                                 cause &= ~0xff;
2641                                 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2642                         }
2643                         er = EMULATE_PRIV_FAIL;
2644                         break;
2645                 default:
2646                         er = EMULATE_PRIV_FAIL;
2647                         break;
2648                 }
2649         }
2650
2651         if (er == EMULATE_PRIV_FAIL)
2652                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2653
2654         return er;
2655 }
2656
2657 /*
2658  * User Address (UA) fault, this could happen if
2659  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2660  *     case we pass on the fault to the guest kernel and let it handle it.
2661  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2662  *     case we inject the TLB from the Guest TLB into the shadow host TLB
2663  */
2664 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2665                                               u32 *opc,
2666                                               struct kvm_run *run,
2667                                               struct kvm_vcpu *vcpu)
2668 {
2669         enum emulation_result er = EMULATE_DONE;
2670         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2671         unsigned long va = vcpu->arch.host_cp0_badvaddr;
2672         int index;
2673
2674         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2675                   vcpu->arch.host_cp0_badvaddr);
2676
2677         /*
2678          * KVM would not have got the exception if this entry was valid in the
2679          * shadow host TLB. Check the Guest TLB, if the entry is not there then
2680          * send the guest an exception. The guest exc handler should then inject
2681          * an entry into the guest TLB.
2682          */
2683         index = kvm_mips_guest_tlb_lookup(vcpu,
2684                       (va & VPN2_MASK) |
2685                       (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2686                        KVM_ENTRYHI_ASID));
2687         if (index < 0) {
2688                 if (exccode == EXCCODE_TLBL) {
2689                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2690                 } else if (exccode == EXCCODE_TLBS) {
2691                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2692                 } else {
2693                         kvm_err("%s: invalid exc code: %d\n", __func__,
2694                                 exccode);
2695                         er = EMULATE_FAIL;
2696                 }
2697         } else {
2698                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2699
2700                 /*
2701                  * Check if the entry is valid, if not then setup a TLB invalid
2702                  * exception to the guest
2703                  */
2704                 if (!TLB_IS_VALID(*tlb, va)) {
2705                         if (exccode == EXCCODE_TLBL) {
2706                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2707                                                                 vcpu);
2708                         } else if (exccode == EXCCODE_TLBS) {
2709                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2710                                                                 vcpu);
2711                         } else {
2712                                 kvm_err("%s: invalid exc code: %d\n", __func__,
2713                                         exccode);
2714                                 er = EMULATE_FAIL;
2715                         }
2716                 } else {
2717                         kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2718                                   tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2719                         /*
2720                          * OK we have a Guest TLB entry, now inject it into the
2721                          * shadow host TLB
2722                          */
2723                         if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2724                                 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2725                                         __func__, va, index, vcpu,
2726                                         read_c0_entryhi());
2727                                 er = EMULATE_FAIL;
2728                         }
2729                 }
2730         }
2731
2732         return er;
2733 }