Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / arch / mips / kvm / trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15
16 #include <linux/kvm_host.h>
17
18 #include "interrupt.h"
19
20 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
21 {
22         gpa_t gpa;
23         gva_t kseg = KSEGX(gva);
24
25         if ((kseg == CKSEG0) || (kseg == CKSEG1))
26                 gpa = CPHYSADDR(gva);
27         else {
28                 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
29                 kvm_mips_dump_host_tlbs();
30                 gpa = KVM_INVALID_ADDR;
31         }
32
33         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
34
35         return gpa;
36 }
37
38 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
39 {
40         struct mips_coproc *cop0 = vcpu->arch.cop0;
41         struct kvm_run *run = vcpu->run;
42         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
43         u32 cause = vcpu->arch.host_cp0_cause;
44         enum emulation_result er = EMULATE_DONE;
45         int ret = RESUME_GUEST;
46
47         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
48                 /* FPU Unusable */
49                 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
50                     (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
51                         /*
52                          * Unusable/no FPU in guest:
53                          * deliver guest COP1 Unusable Exception
54                          */
55                         er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
56                 } else {
57                         /* Restore FPU state */
58                         kvm_own_fpu(vcpu);
59                         er = EMULATE_DONE;
60                 }
61         } else {
62                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
63         }
64
65         switch (er) {
66         case EMULATE_DONE:
67                 ret = RESUME_GUEST;
68                 break;
69
70         case EMULATE_FAIL:
71                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
72                 ret = RESUME_HOST;
73                 break;
74
75         case EMULATE_WAIT:
76                 run->exit_reason = KVM_EXIT_INTR;
77                 ret = RESUME_HOST;
78                 break;
79
80         default:
81                 BUG();
82         }
83         return ret;
84 }
85
86 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
87 {
88         struct kvm_run *run = vcpu->run;
89         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
90         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
91         u32 cause = vcpu->arch.host_cp0_cause;
92         enum emulation_result er = EMULATE_DONE;
93         int ret = RESUME_GUEST;
94
95         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
96             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
97                 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
98                           cause, opc, badvaddr);
99                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
100
101                 if (er == EMULATE_DONE)
102                         ret = RESUME_GUEST;
103                 else {
104                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
105                         ret = RESUME_HOST;
106                 }
107         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
108                 /*
109                  * XXXKYMA: The guest kernel does not expect to get this fault
110                  * when we are not using HIGHMEM. Need to address this in a
111                  * HIGHMEM kernel
112                  */
113                 kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
114                         cause, opc, badvaddr);
115                 kvm_mips_dump_host_tlbs();
116                 kvm_arch_vcpu_dump_regs(vcpu);
117                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
118                 ret = RESUME_HOST;
119         } else {
120                 kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
121                         cause, opc, badvaddr);
122                 kvm_mips_dump_host_tlbs();
123                 kvm_arch_vcpu_dump_regs(vcpu);
124                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
125                 ret = RESUME_HOST;
126         }
127         return ret;
128 }
129
130 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
131 {
132         struct kvm_run *run = vcpu->run;
133         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
134         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
135         u32 cause = vcpu->arch.host_cp0_cause;
136         enum emulation_result er = EMULATE_DONE;
137         int ret = RESUME_GUEST;
138
139         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
140             && KVM_GUEST_KERNEL_MODE(vcpu)) {
141                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
142                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
143                         ret = RESUME_HOST;
144                 }
145         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
146                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
147                 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
148                           store ? "ST" : "LD", cause, opc, badvaddr);
149
150                 /*
151                  * User Address (UA) fault, this could happen if
152                  * (1) TLB entry not present/valid in both Guest and shadow host
153                  *     TLBs, in this case we pass on the fault to the guest
154                  *     kernel and let it handle it.
155                  * (2) TLB entry is present in the Guest TLB but not in the
156                  *     shadow, in this case we inject the TLB from the Guest TLB
157                  *     into the shadow host TLB
158                  */
159
160                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
161                 if (er == EMULATE_DONE)
162                         ret = RESUME_GUEST;
163                 else {
164                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
165                         ret = RESUME_HOST;
166                 }
167         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
168                 /*
169                  * All KSEG0 faults are handled by KVM, as the guest kernel does
170                  * not expect to ever get them
171                  */
172                 if (kvm_mips_handle_kseg0_tlb_fault
173                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
174                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
175                         ret = RESUME_HOST;
176                 }
177         } else if (KVM_GUEST_KERNEL_MODE(vcpu)
178                    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
179                 /*
180                  * With EVA we may get a TLB exception instead of an address
181                  * error when the guest performs MMIO to KSeg1 addresses.
182                  */
183                 kvm_debug("Emulate %s MMIO space\n",
184                           store ? "Store to" : "Load from");
185                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
186                 if (er == EMULATE_FAIL) {
187                         kvm_err("Emulate %s MMIO space failed\n",
188                                 store ? "Store to" : "Load from");
189                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
190                         ret = RESUME_HOST;
191                 } else {
192                         run->exit_reason = KVM_EXIT_MMIO;
193                         ret = RESUME_HOST;
194                 }
195         } else {
196                 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
197                         store ? "ST" : "LD", cause, opc, badvaddr);
198                 kvm_mips_dump_host_tlbs();
199                 kvm_arch_vcpu_dump_regs(vcpu);
200                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
201                 ret = RESUME_HOST;
202         }
203         return ret;
204 }
205
206 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
207 {
208         return kvm_trap_emul_handle_tlb_miss(vcpu, true);
209 }
210
211 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
212 {
213         return kvm_trap_emul_handle_tlb_miss(vcpu, false);
214 }
215
216 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
217 {
218         struct kvm_run *run = vcpu->run;
219         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
220         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
221         u32 cause = vcpu->arch.host_cp0_cause;
222         enum emulation_result er = EMULATE_DONE;
223         int ret = RESUME_GUEST;
224
225         if (KVM_GUEST_KERNEL_MODE(vcpu)
226             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
227                 kvm_debug("Emulate Store to MMIO space\n");
228                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
229                 if (er == EMULATE_FAIL) {
230                         kvm_err("Emulate Store to MMIO space failed\n");
231                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
232                         ret = RESUME_HOST;
233                 } else {
234                         run->exit_reason = KVM_EXIT_MMIO;
235                         ret = RESUME_HOST;
236                 }
237         } else {
238                 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
239                         cause, opc, badvaddr);
240                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
241                 ret = RESUME_HOST;
242         }
243         return ret;
244 }
245
246 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
247 {
248         struct kvm_run *run = vcpu->run;
249         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
250         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
251         u32 cause = vcpu->arch.host_cp0_cause;
252         enum emulation_result er = EMULATE_DONE;
253         int ret = RESUME_GUEST;
254
255         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
256                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
257                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
258                 if (er == EMULATE_FAIL) {
259                         kvm_err("Emulate Load from MMIO space failed\n");
260                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
261                         ret = RESUME_HOST;
262                 } else {
263                         run->exit_reason = KVM_EXIT_MMIO;
264                         ret = RESUME_HOST;
265                 }
266         } else {
267                 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
268                         cause, opc, badvaddr);
269                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
270                 ret = RESUME_HOST;
271                 er = EMULATE_FAIL;
272         }
273         return ret;
274 }
275
276 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
277 {
278         struct kvm_run *run = vcpu->run;
279         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
280         u32 cause = vcpu->arch.host_cp0_cause;
281         enum emulation_result er = EMULATE_DONE;
282         int ret = RESUME_GUEST;
283
284         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
285         if (er == EMULATE_DONE)
286                 ret = RESUME_GUEST;
287         else {
288                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
289                 ret = RESUME_HOST;
290         }
291         return ret;
292 }
293
294 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
295 {
296         struct kvm_run *run = vcpu->run;
297         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
298         u32 cause = vcpu->arch.host_cp0_cause;
299         enum emulation_result er = EMULATE_DONE;
300         int ret = RESUME_GUEST;
301
302         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
303         if (er == EMULATE_DONE)
304                 ret = RESUME_GUEST;
305         else {
306                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307                 ret = RESUME_HOST;
308         }
309         return ret;
310 }
311
312 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
313 {
314         struct kvm_run *run = vcpu->run;
315         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
316         u32 cause = vcpu->arch.host_cp0_cause;
317         enum emulation_result er = EMULATE_DONE;
318         int ret = RESUME_GUEST;
319
320         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
321         if (er == EMULATE_DONE)
322                 ret = RESUME_GUEST;
323         else {
324                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325                 ret = RESUME_HOST;
326         }
327         return ret;
328 }
329
330 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
331 {
332         struct kvm_run *run = vcpu->run;
333         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
334         u32 cause = vcpu->arch.host_cp0_cause;
335         enum emulation_result er = EMULATE_DONE;
336         int ret = RESUME_GUEST;
337
338         er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
339         if (er == EMULATE_DONE) {
340                 ret = RESUME_GUEST;
341         } else {
342                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343                 ret = RESUME_HOST;
344         }
345         return ret;
346 }
347
348 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
349 {
350         struct kvm_run *run = vcpu->run;
351         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
352         u32 cause = vcpu->arch.host_cp0_cause;
353         enum emulation_result er = EMULATE_DONE;
354         int ret = RESUME_GUEST;
355
356         er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
357         if (er == EMULATE_DONE) {
358                 ret = RESUME_GUEST;
359         } else {
360                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
361                 ret = RESUME_HOST;
362         }
363         return ret;
364 }
365
366 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
367 {
368         struct kvm_run *run = vcpu->run;
369         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
370         u32 cause = vcpu->arch.host_cp0_cause;
371         enum emulation_result er = EMULATE_DONE;
372         int ret = RESUME_GUEST;
373
374         er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
375         if (er == EMULATE_DONE) {
376                 ret = RESUME_GUEST;
377         } else {
378                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
379                 ret = RESUME_HOST;
380         }
381         return ret;
382 }
383
384 /**
385  * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
386  * @vcpu:       Virtual CPU context.
387  *
388  * Handle when the guest attempts to use MSA when it is disabled.
389  */
390 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
391 {
392         struct mips_coproc *cop0 = vcpu->arch.cop0;
393         struct kvm_run *run = vcpu->run;
394         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
395         u32 cause = vcpu->arch.host_cp0_cause;
396         enum emulation_result er = EMULATE_DONE;
397         int ret = RESUME_GUEST;
398
399         if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
400             (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
401                 /*
402                  * No MSA in guest, or FPU enabled and not in FR=1 mode,
403                  * guest reserved instruction exception
404                  */
405                 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
406         } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
407                 /* MSA disabled by guest, guest MSA disabled exception */
408                 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
409         } else {
410                 /* Restore MSA/FPU state */
411                 kvm_own_msa(vcpu);
412                 er = EMULATE_DONE;
413         }
414
415         switch (er) {
416         case EMULATE_DONE:
417                 ret = RESUME_GUEST;
418                 break;
419
420         case EMULATE_FAIL:
421                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
422                 ret = RESUME_HOST;
423                 break;
424
425         default:
426                 BUG();
427         }
428         return ret;
429 }
430
431 static int kvm_trap_emul_vm_init(struct kvm *kvm)
432 {
433         return 0;
434 }
435
436 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
437 {
438         vcpu->arch.kscratch_enabled = 0xfc;
439
440         return 0;
441 }
442
443 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
444 {
445         struct mips_coproc *cop0 = vcpu->arch.cop0;
446         u32 config, config1;
447         int vcpu_id = vcpu->vcpu_id;
448
449         /*
450          * Arch specific stuff, set up config registers properly so that the
451          * guest will come up as expected
452          */
453 #ifndef CONFIG_CPU_MIPSR6
454         /* r2-r5, simulate a MIPS 24kc */
455         kvm_write_c0_guest_prid(cop0, 0x00019300);
456 #else
457         /* r6+, simulate a generic QEMU machine */
458         kvm_write_c0_guest_prid(cop0, 0x00010000);
459 #endif
460         /*
461          * Have config1, Cacheable, noncoherent, write-back, write allocate.
462          * Endianness, arch revision & virtually tagged icache should match
463          * host.
464          */
465         config = read_c0_config() & MIPS_CONF_AR;
466         config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
467 #ifdef CONFIG_CPU_BIG_ENDIAN
468         config |= CONF_BE;
469 #endif
470         if (cpu_has_vtag_icache)
471                 config |= MIPS_CONF_VI;
472         kvm_write_c0_guest_config(cop0, config);
473
474         /* Read the cache characteristics from the host Config1 Register */
475         config1 = (read_c0_config1() & ~0x7f);
476
477         /* Set up MMU size */
478         config1 &= ~(0x3f << 25);
479         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
480
481         /* We unset some bits that we aren't emulating */
482         config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
483                      MIPS_CONF1_WR | MIPS_CONF1_CA);
484         kvm_write_c0_guest_config1(cop0, config1);
485
486         /* Have config3, no tertiary/secondary caches implemented */
487         kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
488         /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
489
490         /* Have config4, UserLocal */
491         kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
492
493         /* Have config5 */
494         kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
495
496         /* No config6 */
497         kvm_write_c0_guest_config5(cop0, 0);
498
499         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
500         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
501
502         /*
503          * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
504          */
505         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
506
507         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
508         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
509                                        (vcpu_id & MIPS_EBASE_CPUNUM));
510
511         return 0;
512 }
513
514 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
515 {
516         return 0;
517 }
518
519 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
520                                           u64 __user *indices)
521 {
522         return 0;
523 }
524
525 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
526                                      const struct kvm_one_reg *reg,
527                                      s64 *v)
528 {
529         switch (reg->id) {
530         case KVM_REG_MIPS_CP0_COUNT:
531                 *v = kvm_mips_read_count(vcpu);
532                 break;
533         case KVM_REG_MIPS_COUNT_CTL:
534                 *v = vcpu->arch.count_ctl;
535                 break;
536         case KVM_REG_MIPS_COUNT_RESUME:
537                 *v = ktime_to_ns(vcpu->arch.count_resume);
538                 break;
539         case KVM_REG_MIPS_COUNT_HZ:
540                 *v = vcpu->arch.count_hz;
541                 break;
542         default:
543                 return -EINVAL;
544         }
545         return 0;
546 }
547
548 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
549                                      const struct kvm_one_reg *reg,
550                                      s64 v)
551 {
552         struct mips_coproc *cop0 = vcpu->arch.cop0;
553         int ret = 0;
554         unsigned int cur, change;
555
556         switch (reg->id) {
557         case KVM_REG_MIPS_CP0_COUNT:
558                 kvm_mips_write_count(vcpu, v);
559                 break;
560         case KVM_REG_MIPS_CP0_COMPARE:
561                 kvm_mips_write_compare(vcpu, v, false);
562                 break;
563         case KVM_REG_MIPS_CP0_CAUSE:
564                 /*
565                  * If the timer is stopped or started (DC bit) it must look
566                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
567                  * A timer interrupt should not happen in between.
568                  */
569                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
570                         if (v & CAUSEF_DC) {
571                                 /* disable timer first */
572                                 kvm_mips_count_disable_cause(vcpu);
573                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
574                         } else {
575                                 /* enable timer last */
576                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
577                                 kvm_mips_count_enable_cause(vcpu);
578                         }
579                 } else {
580                         kvm_write_c0_guest_cause(cop0, v);
581                 }
582                 break;
583         case KVM_REG_MIPS_CP0_CONFIG:
584                 /* read-only for now */
585                 break;
586         case KVM_REG_MIPS_CP0_CONFIG1:
587                 cur = kvm_read_c0_guest_config1(cop0);
588                 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
589                 if (change) {
590                         v = cur ^ change;
591                         kvm_write_c0_guest_config1(cop0, v);
592                 }
593                 break;
594         case KVM_REG_MIPS_CP0_CONFIG2:
595                 /* read-only for now */
596                 break;
597         case KVM_REG_MIPS_CP0_CONFIG3:
598                 cur = kvm_read_c0_guest_config3(cop0);
599                 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
600                 if (change) {
601                         v = cur ^ change;
602                         kvm_write_c0_guest_config3(cop0, v);
603                 }
604                 break;
605         case KVM_REG_MIPS_CP0_CONFIG4:
606                 cur = kvm_read_c0_guest_config4(cop0);
607                 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
608                 if (change) {
609                         v = cur ^ change;
610                         kvm_write_c0_guest_config4(cop0, v);
611                 }
612                 break;
613         case KVM_REG_MIPS_CP0_CONFIG5:
614                 cur = kvm_read_c0_guest_config5(cop0);
615                 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
616                 if (change) {
617                         v = cur ^ change;
618                         kvm_write_c0_guest_config5(cop0, v);
619                 }
620                 break;
621         case KVM_REG_MIPS_COUNT_CTL:
622                 ret = kvm_mips_set_count_ctl(vcpu, v);
623                 break;
624         case KVM_REG_MIPS_COUNT_RESUME:
625                 ret = kvm_mips_set_count_resume(vcpu, v);
626                 break;
627         case KVM_REG_MIPS_COUNT_HZ:
628                 ret = kvm_mips_set_count_hz(vcpu, v);
629                 break;
630         default:
631                 return -EINVAL;
632         }
633         return ret;
634 }
635
636 static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
637 {
638         kvm_lose_fpu(vcpu);
639
640         return 0;
641 }
642
643 static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
644 {
645         return 0;
646 }
647
648 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
649         /* exit handlers */
650         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
651         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
652         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
653         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
654         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
655         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
656         .handle_syscall = kvm_trap_emul_handle_syscall,
657         .handle_res_inst = kvm_trap_emul_handle_res_inst,
658         .handle_break = kvm_trap_emul_handle_break,
659         .handle_trap = kvm_trap_emul_handle_trap,
660         .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
661         .handle_fpe = kvm_trap_emul_handle_fpe,
662         .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
663
664         .vm_init = kvm_trap_emul_vm_init,
665         .vcpu_init = kvm_trap_emul_vcpu_init,
666         .vcpu_setup = kvm_trap_emul_vcpu_setup,
667         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
668         .queue_timer_int = kvm_mips_queue_timer_int_cb,
669         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
670         .queue_io_int = kvm_mips_queue_io_int_cb,
671         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
672         .irq_deliver = kvm_mips_irq_deliver_cb,
673         .irq_clear = kvm_mips_irq_clear_cb,
674         .num_regs = kvm_trap_emul_num_regs,
675         .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
676         .get_one_reg = kvm_trap_emul_get_one_reg,
677         .set_one_reg = kvm_trap_emul_set_one_reg,
678         .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
679         .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
680 };
681
682 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
683 {
684         *install_callbacks = &kvm_trap_emul_callbacks;
685         return 0;
686 }