2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
37 struct kvm_stats_debugfs_item debugfs_entries[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace) },
39 { "exit_null", VCPU_STAT(exit_null) },
40 { "exit_validity", VCPU_STAT(exit_validity) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
42 { "exit_external_request", VCPU_STAT(exit_external_request) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
44 { "exit_instruction", VCPU_STAT(exit_instruction) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
48 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
50 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
58 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
59 { "instruction_spx", VCPU_STAT(instruction_spx) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
61 { "instruction_stap", VCPU_STAT(instruction_stap) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
67 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
76 { "diagnose_10", VCPU_STAT(diagnose_10) },
77 { "diagnose_44", VCPU_STAT(diagnose_44) },
78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
82 static unsigned long long *facilities;
84 /* Section: not file related */
85 int kvm_arch_hardware_enable(void *garbage)
87 /* every s390 is virtualization enabled ;-) */
91 void kvm_arch_hardware_disable(void *garbage)
95 int kvm_arch_hardware_setup(void)
100 void kvm_arch_hardware_unsetup(void)
104 void kvm_arch_check_processor_compat(void *rtn)
108 int kvm_arch_init(void *opaque)
113 void kvm_arch_exit(void)
117 /* Section: device related */
118 long kvm_arch_dev_ioctl(struct file *filp,
119 unsigned int ioctl, unsigned long arg)
121 if (ioctl == KVM_S390_ENABLE_SIE)
122 return s390_enable_sie();
126 int kvm_dev_ioctl_check_extension(long ext)
131 case KVM_CAP_S390_PSW:
132 case KVM_CAP_S390_GMAP:
133 case KVM_CAP_SYNC_MMU:
134 #ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL:
137 case KVM_CAP_SYNC_REGS:
138 case KVM_CAP_ONE_REG:
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
154 /* Section: vm related */
156 * Get (and clear) the dirty memory log for a memory slot.
158 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
159 struct kvm_dirty_log *log)
164 long kvm_arch_vm_ioctl(struct file *filp,
165 unsigned int ioctl, unsigned long arg)
167 struct kvm *kvm = filp->private_data;
168 void __user *argp = (void __user *)arg;
172 case KVM_S390_INTERRUPT: {
173 struct kvm_s390_interrupt s390int;
176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
178 r = kvm_s390_inject_vm(kvm, &s390int);
188 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
194 #ifdef CONFIG_KVM_S390_UCONTROL
195 if (type & ~KVM_VM_S390_UCONTROL)
197 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
204 rc = s390_enable_sie();
210 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
214 sprintf(debug_name, "kvm-%u", current->pid);
216 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
220 spin_lock_init(&kvm->arch.float_int.lock);
221 INIT_LIST_HEAD(&kvm->arch.float_int.list);
223 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
224 VM_EVENT(kvm, 3, "%s", "vm created");
226 if (type & KVM_VM_S390_UCONTROL) {
227 kvm->arch.gmap = NULL;
229 kvm->arch.gmap = gmap_alloc(current->mm);
235 debug_unregister(kvm->arch.dbf);
237 free_page((unsigned long)(kvm->arch.sca));
242 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
245 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
248 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
249 (__u64) vcpu->arch.sie_block)
250 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
254 if (kvm_is_ucontrol(vcpu->kvm))
255 gmap_free(vcpu->arch.gmap);
257 free_page((unsigned long)(vcpu->arch.sie_block));
258 kvm_vcpu_uninit(vcpu);
262 static void kvm_free_vcpus(struct kvm *kvm)
265 struct kvm_vcpu *vcpu;
267 kvm_for_each_vcpu(i, vcpu, kvm)
268 kvm_arch_vcpu_destroy(vcpu);
270 mutex_lock(&kvm->lock);
271 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
272 kvm->vcpus[i] = NULL;
274 atomic_set(&kvm->online_vcpus, 0);
275 mutex_unlock(&kvm->lock);
278 void kvm_arch_sync_events(struct kvm *kvm)
282 void kvm_arch_destroy_vm(struct kvm *kvm)
285 free_page((unsigned long)(kvm->arch.sca));
286 debug_unregister(kvm->arch.dbf);
287 if (!kvm_is_ucontrol(kvm))
288 gmap_free(kvm->arch.gmap);
291 /* Section: vcpu related */
292 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
294 if (kvm_is_ucontrol(vcpu->kvm)) {
295 vcpu->arch.gmap = gmap_alloc(current->mm);
296 if (!vcpu->arch.gmap)
301 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
302 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
309 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
314 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
316 save_fp_regs(&vcpu->arch.host_fpregs);
317 save_access_regs(vcpu->arch.host_acrs);
318 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
319 restore_fp_regs(&vcpu->arch.guest_fpregs);
320 restore_access_regs(vcpu->run->s.regs.acrs);
321 gmap_enable(vcpu->arch.gmap);
322 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
325 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
327 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 gmap_disable(vcpu->arch.gmap);
329 save_fp_regs(&vcpu->arch.guest_fpregs);
330 save_access_regs(vcpu->run->s.regs.acrs);
331 restore_fp_regs(&vcpu->arch.host_fpregs);
332 restore_access_regs(vcpu->arch.host_acrs);
335 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu->arch.sie_block->gpsw.mask = 0UL;
339 vcpu->arch.sie_block->gpsw.addr = 0UL;
340 kvm_s390_set_prefix(vcpu, 0);
341 vcpu->arch.sie_block->cputm = 0UL;
342 vcpu->arch.sie_block->ckc = 0UL;
343 vcpu->arch.sie_block->todpr = 0;
344 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
345 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
346 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
347 vcpu->arch.guest_fpregs.fpc = 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
349 vcpu->arch.sie_block->gbea = 1;
352 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
354 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
357 vcpu->arch.sie_block->ecb = 6;
358 vcpu->arch.sie_block->eca = 0xC1002001U;
359 vcpu->arch.sie_block->fac = (int) (long) facilities;
360 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
361 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
362 (unsigned long) vcpu);
363 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
364 get_cpu_id(&vcpu->arch.cpu_id);
365 vcpu->arch.cpu_id.version = 0xff;
369 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
372 struct kvm_vcpu *vcpu;
375 if (id >= KVM_MAX_VCPUS)
380 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
384 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
385 get_zeroed_page(GFP_KERNEL);
387 if (!vcpu->arch.sie_block)
390 vcpu->arch.sie_block->icpua = id;
391 if (!kvm_is_ucontrol(kvm)) {
392 if (!kvm->arch.sca) {
396 if (!kvm->arch.sca->cpu[id].sda)
397 kvm->arch.sca->cpu[id].sda =
398 (__u64) vcpu->arch.sie_block;
399 vcpu->arch.sie_block->scaoh =
400 (__u32)(((__u64)kvm->arch.sca) >> 32);
401 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
402 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
405 spin_lock_init(&vcpu->arch.local_int.lock);
406 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
407 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
408 spin_lock(&kvm->arch.float_int.lock);
409 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
410 init_waitqueue_head(&vcpu->arch.local_int.wq);
411 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
412 spin_unlock(&kvm->arch.float_int.lock);
414 rc = kvm_vcpu_init(vcpu, kvm, id);
416 goto out_free_sie_block;
417 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
418 vcpu->arch.sie_block);
422 free_page((unsigned long)(vcpu->arch.sie_block));
429 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
431 /* kvm common code refers to this, but never calls it */
436 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
438 /* kvm common code refers to this, but never calls it */
443 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444 struct kvm_one_reg *reg)
456 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
457 struct kvm_one_reg *reg)
469 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
471 kvm_s390_vcpu_initial_reset(vcpu);
475 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
477 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
481 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
483 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
487 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
488 struct kvm_sregs *sregs)
490 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
491 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
492 restore_access_regs(vcpu->run->s.regs.acrs);
496 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
497 struct kvm_sregs *sregs)
499 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
500 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
504 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
506 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
507 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
508 restore_fp_regs(&vcpu->arch.guest_fpregs);
512 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
514 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
515 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
519 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
523 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
526 vcpu->run->psw_mask = psw.mask;
527 vcpu->run->psw_addr = psw.addr;
532 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
533 struct kvm_translation *tr)
535 return -EINVAL; /* not implemented yet */
538 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
539 struct kvm_guest_debug *dbg)
541 return -EINVAL; /* not implemented yet */
544 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
545 struct kvm_mp_state *mp_state)
547 return -EINVAL; /* not implemented yet */
550 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
551 struct kvm_mp_state *mp_state)
553 return -EINVAL; /* not implemented yet */
556 static int __vcpu_run(struct kvm_vcpu *vcpu)
560 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
565 if (test_thread_flag(TIF_MCCK_PENDING))
568 if (!kvm_is_ucontrol(vcpu->kvm))
569 kvm_s390_deliver_pending_interrupts(vcpu);
571 vcpu->arch.sie_block->icptcode = 0;
575 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
576 atomic_read(&vcpu->arch.sie_block->cpuflags));
577 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
579 if (kvm_is_ucontrol(vcpu->kvm)) {
580 rc = SIE_INTERCEPT_UCONTROL;
582 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
583 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
587 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
588 vcpu->arch.sie_block->icptcode);
593 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
597 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
603 if (vcpu->sigset_active)
604 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
606 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
608 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
610 switch (kvm_run->exit_reason) {
611 case KVM_EXIT_S390_SIEIC:
612 case KVM_EXIT_UNKNOWN:
614 case KVM_EXIT_S390_RESET:
615 case KVM_EXIT_S390_UCONTROL:
621 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
622 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
623 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
624 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
625 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
627 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
628 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
629 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
630 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
636 rc = __vcpu_run(vcpu);
639 if (kvm_is_ucontrol(vcpu->kvm))
642 rc = kvm_handle_sie_intercept(vcpu);
643 } while (!signal_pending(current) && !rc);
645 if (rc == SIE_INTERCEPT_RERUNVCPU)
648 if (signal_pending(current) && !rc) {
649 kvm_run->exit_reason = KVM_EXIT_INTR;
653 #ifdef CONFIG_KVM_S390_UCONTROL
654 if (rc == SIE_INTERCEPT_UCONTROL) {
655 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
656 kvm_run->s390_ucontrol.trans_exc_code =
657 current->thread.gmap_addr;
658 kvm_run->s390_ucontrol.pgm_code = 0x10;
663 if (rc == -EOPNOTSUPP) {
664 /* intercept cannot be handled in-kernel, prepare kvm-run */
665 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
666 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
667 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
668 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
672 if (rc == -EREMOTE) {
673 /* intercept was handled, but userspace support is needed
674 * kvm_run has been prepared by the handler */
678 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
679 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
680 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
681 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
683 if (vcpu->sigset_active)
684 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
686 vcpu->stat.exit_userspace++;
690 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
691 unsigned long n, int prefix)
694 return copy_to_guest(vcpu, guestdest, from, n);
696 return copy_to_guest_absolute(vcpu, guestdest, from, n);
700 * store status at address
701 * we use have two special cases:
702 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
703 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
705 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
707 unsigned char archmode = 1;
710 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
711 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
713 addr = SAVE_AREA_BASE;
715 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
716 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
718 addr = SAVE_AREA_BASE;
723 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
724 vcpu->arch.guest_fpregs.fprs, 128, prefix))
727 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
728 vcpu->run->s.regs.gprs, 128, prefix))
731 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
732 &vcpu->arch.sie_block->gpsw, 16, prefix))
735 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
736 &vcpu->arch.sie_block->prefix, 4, prefix))
739 if (__guestcopy(vcpu,
740 addr + offsetof(struct save_area, fp_ctrl_reg),
741 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
744 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
745 &vcpu->arch.sie_block->todpr, 4, prefix))
748 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
749 &vcpu->arch.sie_block->cputm, 8, prefix))
752 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
753 &vcpu->arch.sie_block->ckc, 8, prefix))
756 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
757 &vcpu->run->s.regs.acrs, 64, prefix))
760 if (__guestcopy(vcpu,
761 addr + offsetof(struct save_area, ctrl_regs),
762 &vcpu->arch.sie_block->gcr, 128, prefix))
767 long kvm_arch_vcpu_ioctl(struct file *filp,
768 unsigned int ioctl, unsigned long arg)
770 struct kvm_vcpu *vcpu = filp->private_data;
771 void __user *argp = (void __user *)arg;
775 case KVM_S390_INTERRUPT: {
776 struct kvm_s390_interrupt s390int;
779 if (copy_from_user(&s390int, argp, sizeof(s390int)))
781 r = kvm_s390_inject_vcpu(vcpu, &s390int);
784 case KVM_S390_STORE_STATUS:
785 r = kvm_s390_vcpu_store_status(vcpu, arg);
787 case KVM_S390_SET_INITIAL_PSW: {
791 if (copy_from_user(&psw, argp, sizeof(psw)))
793 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
796 case KVM_S390_INITIAL_RESET:
797 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
799 case KVM_SET_ONE_REG:
800 case KVM_GET_ONE_REG: {
801 struct kvm_one_reg reg;
803 if (copy_from_user(®, argp, sizeof(reg)))
805 if (ioctl == KVM_SET_ONE_REG)
806 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
808 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
811 #ifdef CONFIG_KVM_S390_UCONTROL
812 case KVM_S390_UCAS_MAP: {
813 struct kvm_s390_ucas_mapping ucasmap;
815 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
820 if (!kvm_is_ucontrol(vcpu->kvm)) {
825 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
826 ucasmap.vcpu_addr, ucasmap.length);
829 case KVM_S390_UCAS_UNMAP: {
830 struct kvm_s390_ucas_mapping ucasmap;
832 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
837 if (!kvm_is_ucontrol(vcpu->kvm)) {
842 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
847 case KVM_S390_VCPU_FAULT: {
848 r = gmap_fault(arg, vcpu->arch.gmap);
849 if (!IS_ERR_VALUE(r))
859 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
861 #ifdef CONFIG_KVM_S390_UCONTROL
862 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
863 && (kvm_is_ucontrol(vcpu->kvm))) {
864 vmf->page = virt_to_page(vcpu->arch.sie_block);
869 return VM_FAULT_SIGBUS;
872 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
873 struct kvm_memory_slot *dont)
877 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
882 /* Section: memory related */
883 int kvm_arch_prepare_memory_region(struct kvm *kvm,
884 struct kvm_memory_slot *memslot,
885 struct kvm_memory_slot old,
886 struct kvm_userspace_memory_region *mem,
889 /* A few sanity checks. We can have exactly one memory slot which has
890 to start at guest virtual zero and which has to be located at a
891 page boundary in userland and which has to end at a page boundary.
892 The memory in userland is ok to be fragmented into various different
893 vmas. It is okay to mmap() and munmap() stuff in this slot after
894 doing this call at any time */
899 if (mem->guest_phys_addr)
902 if (mem->userspace_addr & 0xffffful)
905 if (mem->memory_size & 0xffffful)
914 void kvm_arch_commit_memory_region(struct kvm *kvm,
915 struct kvm_userspace_memory_region *mem,
916 struct kvm_memory_slot old,
922 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
923 mem->guest_phys_addr, mem->memory_size);
925 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
929 void kvm_arch_flush_shadow(struct kvm *kvm)
933 static int __init kvm_s390_init(void)
936 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
941 * guests can ask for up to 255+1 double words, we need a full page
942 * to hold the maximum amount of facilities. On the other hand, we
943 * only set facilities that are known to work in KVM.
945 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
950 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
951 facilities[0] &= 0xff00fff3f47c0000ULL;
952 facilities[1] &= 0x201c000000000000ULL;
956 static void __exit kvm_s390_exit(void)
958 free_page((unsigned long) facilities);
962 module_init(kvm_s390_init);
963 module_exit(kvm_s390_exit);