2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/lowcore.h>
34 #include <asm/pgtable.h>
37 #include <asm/switch_to.h>
40 #include <asm/cpacf.h>
41 #include <asm/timex.h>
45 #define KMSG_COMPONENT "kvm-s390"
47 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49 #define CREATE_TRACE_POINTS
51 #include "trace-s390.h"
53 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
55 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
58 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
62 { "exit_null", VCPU_STAT(exit_null) },
63 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_pei", VCPU_STAT(exit_pei) },
69 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
70 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
71 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
76 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
77 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
78 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
79 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
80 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
81 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
82 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
83 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
84 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
85 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
86 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
87 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
88 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
89 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
90 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
91 { "instruction_spx", VCPU_STAT(instruction_spx) },
92 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
93 { "instruction_stap", VCPU_STAT(instruction_stap) },
94 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
95 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
96 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
97 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
98 { "instruction_essa", VCPU_STAT(instruction_essa) },
99 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
100 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
101 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
102 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
103 { "instruction_sie", VCPU_STAT(instruction_sie) },
104 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
105 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
106 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
107 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
108 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
109 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
110 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
111 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
112 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
113 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
114 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
115 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
116 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
117 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
118 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
119 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
120 { "diagnose_10", VCPU_STAT(diagnose_10) },
121 { "diagnose_44", VCPU_STAT(diagnose_44) },
122 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
123 { "diagnose_258", VCPU_STAT(diagnose_258) },
124 { "diagnose_308", VCPU_STAT(diagnose_308) },
125 { "diagnose_500", VCPU_STAT(diagnose_500) },
129 /* allow nested virtualization in KVM (if enabled by user space) */
131 module_param(nested, int, S_IRUGO);
132 MODULE_PARM_DESC(nested, "Nested virtualization support");
134 /* upper facilities limit for kvm */
135 unsigned long kvm_s390_fac_list_mask[16] = {
136 0xffe6000000000000UL,
137 0x005e000000000000UL,
140 unsigned long kvm_s390_fac_list_mask_size(void)
142 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
143 return ARRAY_SIZE(kvm_s390_fac_list_mask);
146 /* available cpu features supported by kvm */
147 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
148 /* available subfunctions indicated via query / "test bit" */
149 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
151 static struct gmap_notifier gmap_notifier;
152 static struct gmap_notifier vsie_gmap_notifier;
153 debug_info_t *kvm_s390_dbf;
155 /* Section: not file related */
156 int kvm_arch_hardware_enable(void)
158 /* every s390 is virtualization enabled ;-) */
162 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
166 * This callback is executed during stop_machine(). All CPUs are therefore
167 * temporarily stopped. In order not to change guest behavior, we have to
168 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
169 * so a CPU won't be stopped while calculating with the epoch.
171 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
175 struct kvm_vcpu *vcpu;
177 unsigned long long *delta = v;
179 list_for_each_entry(kvm, &vm_list, vm_list) {
180 kvm->arch.epoch -= *delta;
181 kvm_for_each_vcpu(i, vcpu, kvm) {
182 vcpu->arch.sie_block->epoch -= *delta;
183 if (vcpu->arch.cputm_enabled)
184 vcpu->arch.cputm_start += *delta;
185 if (vcpu->arch.vsie_block)
186 vcpu->arch.vsie_block->epoch -= *delta;
192 static struct notifier_block kvm_clock_notifier = {
193 .notifier_call = kvm_clock_sync,
196 int kvm_arch_hardware_setup(void)
198 gmap_notifier.notifier_call = kvm_gmap_notifier;
199 gmap_register_pte_notifier(&gmap_notifier);
200 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
201 gmap_register_pte_notifier(&vsie_gmap_notifier);
202 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
203 &kvm_clock_notifier);
207 void kvm_arch_hardware_unsetup(void)
209 gmap_unregister_pte_notifier(&gmap_notifier);
210 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
211 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
212 &kvm_clock_notifier);
215 static void allow_cpu_feat(unsigned long nr)
217 set_bit_inv(nr, kvm_s390_available_cpu_feat);
220 static inline int plo_test_bit(unsigned char nr)
222 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
223 int cc = 3; /* subfunction not available */
226 /* Parameter registers are ignored for "test bit" */
236 static void kvm_s390_cpu_feat_init(void)
240 for (i = 0; i < 256; ++i) {
242 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
245 if (test_facility(28)) /* TOD-clock steering */
246 ptff(kvm_s390_available_subfunc.ptff,
247 sizeof(kvm_s390_available_subfunc.ptff),
250 if (test_facility(17)) { /* MSA */
251 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
252 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
253 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
254 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
255 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
257 if (test_facility(76)) /* MSA3 */
258 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
259 if (test_facility(77)) { /* MSA4 */
260 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
261 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
262 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
263 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
265 if (test_facility(57)) /* MSA5 */
266 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
268 if (MACHINE_HAS_ESOP)
269 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
271 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
272 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
274 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
275 !test_facility(3) || !nested)
277 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
278 if (sclp.has_64bscao)
279 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
281 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
283 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
287 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
289 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
291 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
293 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
294 * all skey handling functions read/set the skey from the PGSTE
295 * instead of the real storage key.
297 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
298 * pages being detected as preserved although they are resident.
300 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
301 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
303 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
304 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
305 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
307 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
308 * cannot easily shadow the SCA because of the ipte lock.
312 int kvm_arch_init(void *opaque)
314 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
318 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
319 debug_unregister(kvm_s390_dbf);
323 kvm_s390_cpu_feat_init();
325 /* Register floating interrupt controller interface. */
326 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
329 void kvm_arch_exit(void)
331 debug_unregister(kvm_s390_dbf);
334 /* Section: device related */
335 long kvm_arch_dev_ioctl(struct file *filp,
336 unsigned int ioctl, unsigned long arg)
338 if (ioctl == KVM_S390_ENABLE_SIE)
339 return s390_enable_sie();
343 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
348 case KVM_CAP_S390_PSW:
349 case KVM_CAP_S390_GMAP:
350 case KVM_CAP_SYNC_MMU:
351 #ifdef CONFIG_KVM_S390_UCONTROL
352 case KVM_CAP_S390_UCONTROL:
354 case KVM_CAP_ASYNC_PF:
355 case KVM_CAP_SYNC_REGS:
356 case KVM_CAP_ONE_REG:
357 case KVM_CAP_ENABLE_CAP:
358 case KVM_CAP_S390_CSS_SUPPORT:
359 case KVM_CAP_IOEVENTFD:
360 case KVM_CAP_DEVICE_CTRL:
361 case KVM_CAP_ENABLE_CAP_VM:
362 case KVM_CAP_S390_IRQCHIP:
363 case KVM_CAP_VM_ATTRIBUTES:
364 case KVM_CAP_MP_STATE:
365 case KVM_CAP_S390_INJECT_IRQ:
366 case KVM_CAP_S390_USER_SIGP:
367 case KVM_CAP_S390_USER_STSI:
368 case KVM_CAP_S390_SKEYS:
369 case KVM_CAP_S390_IRQ_STATE:
370 case KVM_CAP_S390_USER_INSTR0:
373 case KVM_CAP_S390_MEM_OP:
376 case KVM_CAP_NR_VCPUS:
377 case KVM_CAP_MAX_VCPUS:
378 r = KVM_S390_BSCA_CPU_SLOTS;
379 if (sclp.has_esca && sclp.has_64bscao)
380 r = KVM_S390_ESCA_CPU_SLOTS;
382 case KVM_CAP_NR_MEMSLOTS:
383 r = KVM_USER_MEM_SLOTS;
385 case KVM_CAP_S390_COW:
386 r = MACHINE_HAS_ESOP;
388 case KVM_CAP_S390_VECTOR_REGISTERS:
391 case KVM_CAP_S390_RI:
392 r = test_facility(64);
400 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
401 struct kvm_memory_slot *memslot)
403 gfn_t cur_gfn, last_gfn;
404 unsigned long address;
405 struct gmap *gmap = kvm->arch.gmap;
407 /* Loop over all guest pages */
408 last_gfn = memslot->base_gfn + memslot->npages;
409 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
410 address = gfn_to_hva_memslot(memslot, cur_gfn);
412 if (test_and_clear_guest_dirty(gmap->mm, address))
413 mark_page_dirty(kvm, cur_gfn);
414 if (fatal_signal_pending(current))
420 /* Section: vm related */
421 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
424 * Get (and clear) the dirty memory log for a memory slot.
426 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
427 struct kvm_dirty_log *log)
431 struct kvm_memslots *slots;
432 struct kvm_memory_slot *memslot;
435 mutex_lock(&kvm->slots_lock);
438 if (log->slot >= KVM_USER_MEM_SLOTS)
441 slots = kvm_memslots(kvm);
442 memslot = id_to_memslot(slots, log->slot);
444 if (!memslot->dirty_bitmap)
447 kvm_s390_sync_dirty_log(kvm, memslot);
448 r = kvm_get_dirty_log(kvm, log, &is_dirty);
452 /* Clear the dirty log */
454 n = kvm_dirty_bitmap_bytes(memslot);
455 memset(memslot->dirty_bitmap, 0, n);
459 mutex_unlock(&kvm->slots_lock);
463 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
466 struct kvm_vcpu *vcpu;
468 kvm_for_each_vcpu(i, vcpu, kvm) {
469 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
473 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
481 case KVM_CAP_S390_IRQCHIP:
482 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
483 kvm->arch.use_irqchip = 1;
486 case KVM_CAP_S390_USER_SIGP:
487 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
488 kvm->arch.user_sigp = 1;
491 case KVM_CAP_S390_VECTOR_REGISTERS:
492 mutex_lock(&kvm->lock);
493 if (kvm->created_vcpus) {
495 } else if (MACHINE_HAS_VX) {
496 set_kvm_facility(kvm->arch.model.fac_mask, 129);
497 set_kvm_facility(kvm->arch.model.fac_list, 129);
501 mutex_unlock(&kvm->lock);
502 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
503 r ? "(not available)" : "(success)");
505 case KVM_CAP_S390_RI:
507 mutex_lock(&kvm->lock);
508 if (kvm->created_vcpus) {
510 } else if (test_facility(64)) {
511 set_kvm_facility(kvm->arch.model.fac_mask, 64);
512 set_kvm_facility(kvm->arch.model.fac_list, 64);
515 mutex_unlock(&kvm->lock);
516 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
517 r ? "(not available)" : "(success)");
519 case KVM_CAP_S390_USER_STSI:
520 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
521 kvm->arch.user_stsi = 1;
524 case KVM_CAP_S390_USER_INSTR0:
525 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
526 kvm->arch.user_instr0 = 1;
527 icpt_operexc_on_all_vcpus(kvm);
537 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
541 switch (attr->attr) {
542 case KVM_S390_VM_MEM_LIMIT_SIZE:
544 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
545 kvm->arch.mem_limit);
546 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
556 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
560 switch (attr->attr) {
561 case KVM_S390_VM_MEM_ENABLE_CMMA:
567 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
568 mutex_lock(&kvm->lock);
569 if (!kvm->created_vcpus) {
570 kvm->arch.use_cmma = 1;
573 mutex_unlock(&kvm->lock);
575 case KVM_S390_VM_MEM_CLR_CMMA:
580 if (!kvm->arch.use_cmma)
583 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
584 mutex_lock(&kvm->lock);
585 idx = srcu_read_lock(&kvm->srcu);
586 s390_reset_cmma(kvm->arch.gmap->mm);
587 srcu_read_unlock(&kvm->srcu, idx);
588 mutex_unlock(&kvm->lock);
591 case KVM_S390_VM_MEM_LIMIT_SIZE: {
592 unsigned long new_limit;
594 if (kvm_is_ucontrol(kvm))
597 if (get_user(new_limit, (u64 __user *)attr->addr))
600 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
601 new_limit > kvm->arch.mem_limit)
607 /* gmap_create takes last usable address */
608 if (new_limit != KVM_S390_NO_MEM_LIMIT)
612 mutex_lock(&kvm->lock);
613 if (!kvm->created_vcpus) {
614 /* gmap_create will round the limit up */
615 struct gmap *new = gmap_create(current->mm, new_limit);
620 gmap_remove(kvm->arch.gmap);
622 kvm->arch.gmap = new;
626 mutex_unlock(&kvm->lock);
627 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
628 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
629 (void *) kvm->arch.gmap->asce);
639 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
641 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
643 struct kvm_vcpu *vcpu;
646 if (!test_kvm_facility(kvm, 76))
649 mutex_lock(&kvm->lock);
650 switch (attr->attr) {
651 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
653 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
654 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
655 kvm->arch.crypto.aes_kw = 1;
656 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
658 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
660 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
661 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
662 kvm->arch.crypto.dea_kw = 1;
663 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
665 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
666 kvm->arch.crypto.aes_kw = 0;
667 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
668 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
669 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
671 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
672 kvm->arch.crypto.dea_kw = 0;
673 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
674 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
675 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
678 mutex_unlock(&kvm->lock);
682 kvm_for_each_vcpu(i, vcpu, kvm) {
683 kvm_s390_vcpu_crypto_setup(vcpu);
686 mutex_unlock(&kvm->lock);
690 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
694 if (copy_from_user(>od_high, (void __user *)attr->addr,
700 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
705 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
709 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
712 kvm_s390_set_tod_clock(kvm, gtod);
713 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
717 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
724 switch (attr->attr) {
725 case KVM_S390_VM_TOD_HIGH:
726 ret = kvm_s390_set_tod_high(kvm, attr);
728 case KVM_S390_VM_TOD_LOW:
729 ret = kvm_s390_set_tod_low(kvm, attr);
738 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
742 if (copy_to_user((void __user *)attr->addr, >od_high,
745 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
750 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
754 gtod = kvm_s390_get_tod_clock_fast(kvm);
755 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
757 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
762 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
769 switch (attr->attr) {
770 case KVM_S390_VM_TOD_HIGH:
771 ret = kvm_s390_get_tod_high(kvm, attr);
773 case KVM_S390_VM_TOD_LOW:
774 ret = kvm_s390_get_tod_low(kvm, attr);
783 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
785 struct kvm_s390_vm_cpu_processor *proc;
786 u16 lowest_ibc, unblocked_ibc;
789 mutex_lock(&kvm->lock);
790 if (kvm->created_vcpus) {
794 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
799 if (!copy_from_user(proc, (void __user *)attr->addr,
801 kvm->arch.model.cpuid = proc->cpuid;
802 lowest_ibc = sclp.ibc >> 16 & 0xfff;
803 unblocked_ibc = sclp.ibc & 0xfff;
804 if (lowest_ibc && proc->ibc) {
805 if (proc->ibc > unblocked_ibc)
806 kvm->arch.model.ibc = unblocked_ibc;
807 else if (proc->ibc < lowest_ibc)
808 kvm->arch.model.ibc = lowest_ibc;
810 kvm->arch.model.ibc = proc->ibc;
812 memcpy(kvm->arch.model.fac_list, proc->fac_list,
813 S390_ARCH_FAC_LIST_SIZE_BYTE);
818 mutex_unlock(&kvm->lock);
822 static int kvm_s390_set_processor_feat(struct kvm *kvm,
823 struct kvm_device_attr *attr)
825 struct kvm_s390_vm_cpu_feat data;
828 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
830 if (!bitmap_subset((unsigned long *) data.feat,
831 kvm_s390_available_cpu_feat,
832 KVM_S390_VM_CPU_FEAT_NR_BITS))
835 mutex_lock(&kvm->lock);
836 if (!atomic_read(&kvm->online_vcpus)) {
837 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
838 KVM_S390_VM_CPU_FEAT_NR_BITS);
841 mutex_unlock(&kvm->lock);
845 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
846 struct kvm_device_attr *attr)
849 * Once supported by kernel + hw, we have to store the subfunctions
850 * in kvm->arch and remember that user space configured them.
855 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
859 switch (attr->attr) {
860 case KVM_S390_VM_CPU_PROCESSOR:
861 ret = kvm_s390_set_processor(kvm, attr);
863 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
864 ret = kvm_s390_set_processor_feat(kvm, attr);
866 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
867 ret = kvm_s390_set_processor_subfunc(kvm, attr);
873 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
875 struct kvm_s390_vm_cpu_processor *proc;
878 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
883 proc->cpuid = kvm->arch.model.cpuid;
884 proc->ibc = kvm->arch.model.ibc;
885 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
886 S390_ARCH_FAC_LIST_SIZE_BYTE);
887 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
894 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
896 struct kvm_s390_vm_cpu_machine *mach;
899 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
904 get_cpu_id((struct cpuid *) &mach->cpuid);
905 mach->ibc = sclp.ibc;
906 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
907 S390_ARCH_FAC_LIST_SIZE_BYTE);
908 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
909 S390_ARCH_FAC_LIST_SIZE_BYTE);
910 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
917 static int kvm_s390_get_processor_feat(struct kvm *kvm,
918 struct kvm_device_attr *attr)
920 struct kvm_s390_vm_cpu_feat data;
922 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
923 KVM_S390_VM_CPU_FEAT_NR_BITS);
924 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
929 static int kvm_s390_get_machine_feat(struct kvm *kvm,
930 struct kvm_device_attr *attr)
932 struct kvm_s390_vm_cpu_feat data;
934 bitmap_copy((unsigned long *) data.feat,
935 kvm_s390_available_cpu_feat,
936 KVM_S390_VM_CPU_FEAT_NR_BITS);
937 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
942 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
943 struct kvm_device_attr *attr)
946 * Once we can actually configure subfunctions (kernel + hw support),
947 * we have to check if they were already set by user space, if so copy
948 * them from kvm->arch.
953 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
954 struct kvm_device_attr *attr)
956 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
957 sizeof(struct kvm_s390_vm_cpu_subfunc)))
961 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
965 switch (attr->attr) {
966 case KVM_S390_VM_CPU_PROCESSOR:
967 ret = kvm_s390_get_processor(kvm, attr);
969 case KVM_S390_VM_CPU_MACHINE:
970 ret = kvm_s390_get_machine(kvm, attr);
972 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
973 ret = kvm_s390_get_processor_feat(kvm, attr);
975 case KVM_S390_VM_CPU_MACHINE_FEAT:
976 ret = kvm_s390_get_machine_feat(kvm, attr);
978 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
979 ret = kvm_s390_get_processor_subfunc(kvm, attr);
981 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
982 ret = kvm_s390_get_machine_subfunc(kvm, attr);
988 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
992 switch (attr->group) {
993 case KVM_S390_VM_MEM_CTRL:
994 ret = kvm_s390_set_mem_control(kvm, attr);
996 case KVM_S390_VM_TOD:
997 ret = kvm_s390_set_tod(kvm, attr);
999 case KVM_S390_VM_CPU_MODEL:
1000 ret = kvm_s390_set_cpu_model(kvm, attr);
1002 case KVM_S390_VM_CRYPTO:
1003 ret = kvm_s390_vm_set_crypto(kvm, attr);
1013 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1017 switch (attr->group) {
1018 case KVM_S390_VM_MEM_CTRL:
1019 ret = kvm_s390_get_mem_control(kvm, attr);
1021 case KVM_S390_VM_TOD:
1022 ret = kvm_s390_get_tod(kvm, attr);
1024 case KVM_S390_VM_CPU_MODEL:
1025 ret = kvm_s390_get_cpu_model(kvm, attr);
1035 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1039 switch (attr->group) {
1040 case KVM_S390_VM_MEM_CTRL:
1041 switch (attr->attr) {
1042 case KVM_S390_VM_MEM_ENABLE_CMMA:
1043 case KVM_S390_VM_MEM_CLR_CMMA:
1044 ret = sclp.has_cmma ? 0 : -ENXIO;
1046 case KVM_S390_VM_MEM_LIMIT_SIZE:
1054 case KVM_S390_VM_TOD:
1055 switch (attr->attr) {
1056 case KVM_S390_VM_TOD_LOW:
1057 case KVM_S390_VM_TOD_HIGH:
1065 case KVM_S390_VM_CPU_MODEL:
1066 switch (attr->attr) {
1067 case KVM_S390_VM_CPU_PROCESSOR:
1068 case KVM_S390_VM_CPU_MACHINE:
1069 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1070 case KVM_S390_VM_CPU_MACHINE_FEAT:
1071 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1074 /* configuring subfunctions is not supported yet */
1075 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1081 case KVM_S390_VM_CRYPTO:
1082 switch (attr->attr) {
1083 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1084 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1085 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1086 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1102 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1108 if (args->flags != 0)
1111 /* Is this guest using storage keys? */
1112 if (!mm_use_skey(current->mm))
1113 return KVM_S390_GET_SKEYS_NONE;
1115 /* Enforce sane limit on memory allocation */
1116 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1119 keys = kmalloc_array(args->count, sizeof(uint8_t),
1120 GFP_KERNEL | __GFP_NOWARN);
1122 keys = vmalloc(sizeof(uint8_t) * args->count);
1126 down_read(¤t->mm->mmap_sem);
1127 for (i = 0; i < args->count; i++) {
1128 hva = gfn_to_hva(kvm, args->start_gfn + i);
1129 if (kvm_is_error_hva(hva)) {
1134 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1138 up_read(¤t->mm->mmap_sem);
1141 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1142 sizeof(uint8_t) * args->count);
1151 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1157 if (args->flags != 0)
1160 /* Enforce sane limit on memory allocation */
1161 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1164 keys = kmalloc_array(args->count, sizeof(uint8_t),
1165 GFP_KERNEL | __GFP_NOWARN);
1167 keys = vmalloc(sizeof(uint8_t) * args->count);
1171 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1172 sizeof(uint8_t) * args->count);
1178 /* Enable storage key handling for the guest */
1179 r = s390_enable_skey();
1183 down_read(¤t->mm->mmap_sem);
1184 for (i = 0; i < args->count; i++) {
1185 hva = gfn_to_hva(kvm, args->start_gfn + i);
1186 if (kvm_is_error_hva(hva)) {
1191 /* Lowest order bit is reserved */
1192 if (keys[i] & 0x01) {
1197 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1201 up_read(¤t->mm->mmap_sem);
1207 long kvm_arch_vm_ioctl(struct file *filp,
1208 unsigned int ioctl, unsigned long arg)
1210 struct kvm *kvm = filp->private_data;
1211 void __user *argp = (void __user *)arg;
1212 struct kvm_device_attr attr;
1216 case KVM_S390_INTERRUPT: {
1217 struct kvm_s390_interrupt s390int;
1220 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1222 r = kvm_s390_inject_vm(kvm, &s390int);
1225 case KVM_ENABLE_CAP: {
1226 struct kvm_enable_cap cap;
1228 if (copy_from_user(&cap, argp, sizeof(cap)))
1230 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1233 case KVM_CREATE_IRQCHIP: {
1234 struct kvm_irq_routing_entry routing;
1237 if (kvm->arch.use_irqchip) {
1238 /* Set up dummy routing. */
1239 memset(&routing, 0, sizeof(routing));
1240 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
1244 case KVM_SET_DEVICE_ATTR: {
1246 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1248 r = kvm_s390_vm_set_attr(kvm, &attr);
1251 case KVM_GET_DEVICE_ATTR: {
1253 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1255 r = kvm_s390_vm_get_attr(kvm, &attr);
1258 case KVM_HAS_DEVICE_ATTR: {
1260 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1262 r = kvm_s390_vm_has_attr(kvm, &attr);
1265 case KVM_S390_GET_SKEYS: {
1266 struct kvm_s390_skeys args;
1269 if (copy_from_user(&args, argp,
1270 sizeof(struct kvm_s390_skeys)))
1272 r = kvm_s390_get_skeys(kvm, &args);
1275 case KVM_S390_SET_SKEYS: {
1276 struct kvm_s390_skeys args;
1279 if (copy_from_user(&args, argp,
1280 sizeof(struct kvm_s390_skeys)))
1282 r = kvm_s390_set_skeys(kvm, &args);
1292 static int kvm_s390_query_ap_config(u8 *config)
1294 u32 fcn_code = 0x04000000UL;
1297 memset(config, 0, 128);
1301 ".long 0xb2af0000\n" /* PQAP(QCI) */
1307 : "r" (fcn_code), "r" (config)
1308 : "cc", "0", "2", "memory"
1314 static int kvm_s390_apxa_installed(void)
1319 if (test_facility(12)) {
1320 cc = kvm_s390_query_ap_config(config);
1323 pr_err("PQAP(QCI) failed with cc=%d", cc);
1325 return config[0] & 0x40;
1331 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1333 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1335 if (kvm_s390_apxa_installed())
1336 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1338 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1341 static u64 kvm_s390_get_initial_cpuid(void)
1346 cpuid.version = 0xff;
1347 return *((u64 *) &cpuid);
1350 static void kvm_s390_crypto_init(struct kvm *kvm)
1352 if (!test_kvm_facility(kvm, 76))
1355 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
1356 kvm_s390_set_crycb_format(kvm);
1358 /* Enable AES/DEA protected key functions by default */
1359 kvm->arch.crypto.aes_kw = 1;
1360 kvm->arch.crypto.dea_kw = 1;
1361 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1362 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1363 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1364 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1367 static void sca_dispose(struct kvm *kvm)
1369 if (kvm->arch.use_esca)
1370 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1372 free_page((unsigned long)(kvm->arch.sca));
1373 kvm->arch.sca = NULL;
1376 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1378 gfp_t alloc_flags = GFP_KERNEL;
1380 char debug_name[16];
1381 static unsigned long sca_offset;
1384 #ifdef CONFIG_KVM_S390_UCONTROL
1385 if (type & ~KVM_VM_S390_UCONTROL)
1387 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1394 rc = s390_enable_sie();
1400 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1402 kvm->arch.use_esca = 0; /* start with basic SCA */
1403 if (!sclp.has_64bscao)
1404 alloc_flags |= GFP_DMA;
1405 rwlock_init(&kvm->arch.sca_lock);
1406 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1409 spin_lock(&kvm_lock);
1411 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1413 kvm->arch.sca = (struct bsca_block *)
1414 ((char *) kvm->arch.sca + sca_offset);
1415 spin_unlock(&kvm_lock);
1417 sprintf(debug_name, "kvm-%u", current->pid);
1419 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1423 kvm->arch.sie_page2 =
1424 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1425 if (!kvm->arch.sie_page2)
1428 /* Populate the facility mask initially. */
1429 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1430 S390_ARCH_FAC_LIST_SIZE_BYTE);
1431 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1432 if (i < kvm_s390_fac_list_mask_size())
1433 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
1435 kvm->arch.model.fac_mask[i] = 0UL;
1438 /* Populate the facility list initially. */
1439 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1440 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1441 S390_ARCH_FAC_LIST_SIZE_BYTE);
1443 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1444 set_kvm_facility(kvm->arch.model.fac_list, 74);
1446 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
1447 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1449 kvm_s390_crypto_init(kvm);
1451 spin_lock_init(&kvm->arch.float_int.lock);
1452 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1453 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1454 init_waitqueue_head(&kvm->arch.ipte_wq);
1455 mutex_init(&kvm->arch.ipte_mutex);
1457 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1458 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1460 if (type & KVM_VM_S390_UCONTROL) {
1461 kvm->arch.gmap = NULL;
1462 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1464 if (sclp.hamax == U64_MAX)
1465 kvm->arch.mem_limit = TASK_MAX_SIZE;
1467 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1469 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1470 if (!kvm->arch.gmap)
1472 kvm->arch.gmap->private = kvm;
1473 kvm->arch.gmap->pfault_enabled = 0;
1476 kvm->arch.css_support = 0;
1477 kvm->arch.use_irqchip = 0;
1478 kvm->arch.epoch = 0;
1480 spin_lock_init(&kvm->arch.start_stop_lock);
1481 kvm_s390_vsie_init(kvm);
1482 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1486 free_page((unsigned long)kvm->arch.sie_page2);
1487 debug_unregister(kvm->arch.dbf);
1489 KVM_EVENT(3, "creation of vm failed: %d", rc);
1493 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1495 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1496 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1497 kvm_s390_clear_local_irqs(vcpu);
1498 kvm_clear_async_pf_completion_queue(vcpu);
1499 if (!kvm_is_ucontrol(vcpu->kvm))
1502 if (kvm_is_ucontrol(vcpu->kvm))
1503 gmap_remove(vcpu->arch.gmap);
1505 if (vcpu->kvm->arch.use_cmma)
1506 kvm_s390_vcpu_unsetup_cmma(vcpu);
1507 free_page((unsigned long)(vcpu->arch.sie_block));
1509 kvm_vcpu_uninit(vcpu);
1510 kmem_cache_free(kvm_vcpu_cache, vcpu);
1513 static void kvm_free_vcpus(struct kvm *kvm)
1516 struct kvm_vcpu *vcpu;
1518 kvm_for_each_vcpu(i, vcpu, kvm)
1519 kvm_arch_vcpu_destroy(vcpu);
1521 mutex_lock(&kvm->lock);
1522 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1523 kvm->vcpus[i] = NULL;
1525 atomic_set(&kvm->online_vcpus, 0);
1526 mutex_unlock(&kvm->lock);
1529 void kvm_arch_destroy_vm(struct kvm *kvm)
1531 kvm_free_vcpus(kvm);
1533 debug_unregister(kvm->arch.dbf);
1534 free_page((unsigned long)kvm->arch.sie_page2);
1535 if (!kvm_is_ucontrol(kvm))
1536 gmap_remove(kvm->arch.gmap);
1537 kvm_s390_destroy_adapters(kvm);
1538 kvm_s390_clear_float_irqs(kvm);
1539 kvm_s390_vsie_destroy(kvm);
1540 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1543 /* Section: vcpu related */
1544 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1546 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
1547 if (!vcpu->arch.gmap)
1549 vcpu->arch.gmap->private = vcpu->kvm;
1554 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1556 read_lock(&vcpu->kvm->arch.sca_lock);
1557 if (vcpu->kvm->arch.use_esca) {
1558 struct esca_block *sca = vcpu->kvm->arch.sca;
1560 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1561 sca->cpu[vcpu->vcpu_id].sda = 0;
1563 struct bsca_block *sca = vcpu->kvm->arch.sca;
1565 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1566 sca->cpu[vcpu->vcpu_id].sda = 0;
1568 read_unlock(&vcpu->kvm->arch.sca_lock);
1571 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1573 read_lock(&vcpu->kvm->arch.sca_lock);
1574 if (vcpu->kvm->arch.use_esca) {
1575 struct esca_block *sca = vcpu->kvm->arch.sca;
1577 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1578 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1579 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1580 vcpu->arch.sie_block->ecb2 |= 0x04U;
1581 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1583 struct bsca_block *sca = vcpu->kvm->arch.sca;
1585 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1586 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1587 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1588 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1590 read_unlock(&vcpu->kvm->arch.sca_lock);
1593 /* Basic SCA to Extended SCA data copy routines */
1594 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1597 d->sigp_ctrl.c = s->sigp_ctrl.c;
1598 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1601 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1605 d->ipte_control = s->ipte_control;
1607 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1608 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1611 static int sca_switch_to_extended(struct kvm *kvm)
1613 struct bsca_block *old_sca = kvm->arch.sca;
1614 struct esca_block *new_sca;
1615 struct kvm_vcpu *vcpu;
1616 unsigned int vcpu_idx;
1619 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1623 scaoh = (u32)((u64)(new_sca) >> 32);
1624 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1626 kvm_s390_vcpu_block_all(kvm);
1627 write_lock(&kvm->arch.sca_lock);
1629 sca_copy_b_to_e(new_sca, old_sca);
1631 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1632 vcpu->arch.sie_block->scaoh = scaoh;
1633 vcpu->arch.sie_block->scaol = scaol;
1634 vcpu->arch.sie_block->ecb2 |= 0x04U;
1636 kvm->arch.sca = new_sca;
1637 kvm->arch.use_esca = 1;
1639 write_unlock(&kvm->arch.sca_lock);
1640 kvm_s390_vcpu_unblock_all(kvm);
1642 free_page((unsigned long)old_sca);
1644 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1645 old_sca, kvm->arch.sca);
1649 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1653 if (id < KVM_S390_BSCA_CPU_SLOTS)
1655 if (!sclp.has_esca || !sclp.has_64bscao)
1658 mutex_lock(&kvm->lock);
1659 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1660 mutex_unlock(&kvm->lock);
1662 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1665 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1667 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1668 kvm_clear_async_pf_completion_queue(vcpu);
1669 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1675 if (test_kvm_facility(vcpu->kvm, 64))
1676 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1677 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1678 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1681 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1683 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1685 if (kvm_is_ucontrol(vcpu->kvm))
1686 return __kvm_ucontrol_vcpu_init(vcpu);
1691 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1692 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1694 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
1695 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1696 vcpu->arch.cputm_start = get_tod_clock_fast();
1697 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1700 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1701 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1703 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
1704 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1705 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1706 vcpu->arch.cputm_start = 0;
1707 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1710 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1711 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1713 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1714 vcpu->arch.cputm_enabled = true;
1715 __start_cpu_timer_accounting(vcpu);
1718 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1719 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1721 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1722 __stop_cpu_timer_accounting(vcpu);
1723 vcpu->arch.cputm_enabled = false;
1726 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1728 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1729 __enable_cpu_timer_accounting(vcpu);
1733 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1735 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1736 __disable_cpu_timer_accounting(vcpu);
1740 /* set the cpu timer - may only be called from the VCPU thread itself */
1741 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1743 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1744 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1745 if (vcpu->arch.cputm_enabled)
1746 vcpu->arch.cputm_start = get_tod_clock_fast();
1747 vcpu->arch.sie_block->cputm = cputm;
1748 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1752 /* update and get the cpu timer - can also be called from other VCPU threads */
1753 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1758 if (unlikely(!vcpu->arch.cputm_enabled))
1759 return vcpu->arch.sie_block->cputm;
1761 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1763 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1765 * If the writer would ever execute a read in the critical
1766 * section, e.g. in irq context, we have a deadlock.
1768 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1769 value = vcpu->arch.sie_block->cputm;
1770 /* if cputm_start is 0, accounting is being started/stopped */
1771 if (likely(vcpu->arch.cputm_start))
1772 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1773 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1778 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1780 /* Save host register state */
1782 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1783 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
1786 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1788 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
1789 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1790 if (test_fp_ctl(current->thread.fpu.fpc))
1791 /* User space provided an invalid FPC, let's clear it */
1792 current->thread.fpu.fpc = 0;
1794 save_access_regs(vcpu->arch.host_acrs);
1795 restore_access_regs(vcpu->run->s.regs.acrs);
1796 gmap_enable(vcpu->arch.enabled_gmap);
1797 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1798 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1799 __start_cpu_timer_accounting(vcpu);
1803 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1806 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1807 __stop_cpu_timer_accounting(vcpu);
1808 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1809 vcpu->arch.enabled_gmap = gmap_get_enabled();
1810 gmap_disable(vcpu->arch.enabled_gmap);
1812 /* Save guest register state */
1814 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1816 /* Restore host register state */
1817 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1818 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
1820 save_access_regs(vcpu->run->s.regs.acrs);
1821 restore_access_regs(vcpu->arch.host_acrs);
1824 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1826 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1827 vcpu->arch.sie_block->gpsw.mask = 0UL;
1828 vcpu->arch.sie_block->gpsw.addr = 0UL;
1829 kvm_s390_set_prefix(vcpu, 0);
1830 kvm_s390_set_cpu_timer(vcpu, 0);
1831 vcpu->arch.sie_block->ckc = 0UL;
1832 vcpu->arch.sie_block->todpr = 0;
1833 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1834 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1835 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1836 /* make sure the new fpc will be lazily loaded */
1838 current->thread.fpu.fpc = 0;
1839 vcpu->arch.sie_block->gbea = 1;
1840 vcpu->arch.sie_block->pp = 0;
1841 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1842 kvm_clear_async_pf_completion_queue(vcpu);
1843 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1844 kvm_s390_vcpu_stop(vcpu);
1845 kvm_s390_clear_local_irqs(vcpu);
1848 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1850 mutex_lock(&vcpu->kvm->lock);
1852 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1854 mutex_unlock(&vcpu->kvm->lock);
1855 if (!kvm_is_ucontrol(vcpu->kvm)) {
1856 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1859 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
1860 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
1861 /* make vcpu_load load the right gmap on the first trigger */
1862 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
1865 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1867 if (!test_kvm_facility(vcpu->kvm, 76))
1870 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1872 if (vcpu->kvm->arch.crypto.aes_kw)
1873 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1874 if (vcpu->kvm->arch.crypto.dea_kw)
1875 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1877 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1880 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1882 free_page(vcpu->arch.sie_block->cbrlo);
1883 vcpu->arch.sie_block->cbrlo = 0;
1886 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1888 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1889 if (!vcpu->arch.sie_block->cbrlo)
1892 vcpu->arch.sie_block->ecb2 |= 0x80;
1893 vcpu->arch.sie_block->ecb2 &= ~0x08;
1897 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1899 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1901 vcpu->arch.sie_block->ibc = model->ibc;
1902 if (test_kvm_facility(vcpu->kvm, 7))
1903 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
1906 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1910 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1914 if (test_kvm_facility(vcpu->kvm, 78))
1915 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1916 else if (test_kvm_facility(vcpu->kvm, 8))
1917 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1919 kvm_s390_vcpu_setup_model(vcpu);
1921 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1922 if (MACHINE_HAS_ESOP)
1923 vcpu->arch.sie_block->ecb |= 0x02;
1924 if (test_kvm_facility(vcpu->kvm, 9))
1925 vcpu->arch.sie_block->ecb |= 0x04;
1926 if (test_kvm_facility(vcpu->kvm, 73))
1927 vcpu->arch.sie_block->ecb |= 0x10;
1929 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1930 vcpu->arch.sie_block->ecb2 |= 0x08;
1931 vcpu->arch.sie_block->eca = 0x1002000U;
1933 vcpu->arch.sie_block->eca |= 0x80000000U;
1935 vcpu->arch.sie_block->eca |= 0x40000000U;
1937 vcpu->arch.sie_block->eca |= 1;
1938 if (sclp.has_sigpif)
1939 vcpu->arch.sie_block->eca |= 0x10000000U;
1940 if (test_kvm_facility(vcpu->kvm, 64))
1941 vcpu->arch.sie_block->ecb3 |= 0x01;
1942 if (test_kvm_facility(vcpu->kvm, 129)) {
1943 vcpu->arch.sie_block->eca |= 0x00020000;
1944 vcpu->arch.sie_block->ecd |= 0x20000000;
1946 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1947 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1949 if (vcpu->kvm->arch.use_cmma) {
1950 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1954 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1955 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1957 kvm_s390_vcpu_crypto_setup(vcpu);
1962 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1965 struct kvm_vcpu *vcpu;
1966 struct sie_page *sie_page;
1969 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1974 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1978 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1982 vcpu->arch.sie_block = &sie_page->sie_block;
1983 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1985 /* the real guest size will always be smaller than msl */
1986 vcpu->arch.sie_block->mso = 0;
1987 vcpu->arch.sie_block->msl = sclp.hamax;
1989 vcpu->arch.sie_block->icpua = id;
1990 spin_lock_init(&vcpu->arch.local_int.lock);
1991 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1992 vcpu->arch.local_int.wq = &vcpu->wq;
1993 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1994 seqcount_init(&vcpu->arch.cputm_seqcount);
1996 rc = kvm_vcpu_init(vcpu, kvm, id);
1998 goto out_free_sie_block;
1999 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2000 vcpu->arch.sie_block);
2001 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
2005 free_page((unsigned long)(vcpu->arch.sie_block));
2007 kmem_cache_free(kvm_vcpu_cache, vcpu);
2012 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2014 return kvm_s390_vcpu_has_irq(vcpu, 0);
2017 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
2019 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2023 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
2025 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2028 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2030 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2034 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2036 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2040 * Kick a guest cpu out of SIE and wait until SIE is not running.
2041 * If the CPU is not running (e.g. waiting as idle) the function will
2042 * return immediately. */
2043 void exit_sie(struct kvm_vcpu *vcpu)
2045 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
2046 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2050 /* Kick a guest cpu out of SIE to process a request synchronously */
2051 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
2053 kvm_make_request(req, vcpu);
2054 kvm_s390_vcpu_request(vcpu);
2057 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2060 struct kvm *kvm = gmap->private;
2061 struct kvm_vcpu *vcpu;
2062 unsigned long prefix;
2065 if (gmap_is_shadow(gmap))
2067 if (start >= 1UL << 31)
2068 /* We are only interested in prefix pages */
2070 kvm_for_each_vcpu(i, vcpu, kvm) {
2071 /* match against both prefix pages */
2072 prefix = kvm_s390_get_prefix(vcpu);
2073 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2074 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2076 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2081 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2083 /* kvm common code refers to this, but never calls it */
2088 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2089 struct kvm_one_reg *reg)
2094 case KVM_REG_S390_TODPR:
2095 r = put_user(vcpu->arch.sie_block->todpr,
2096 (u32 __user *)reg->addr);
2098 case KVM_REG_S390_EPOCHDIFF:
2099 r = put_user(vcpu->arch.sie_block->epoch,
2100 (u64 __user *)reg->addr);
2102 case KVM_REG_S390_CPU_TIMER:
2103 r = put_user(kvm_s390_get_cpu_timer(vcpu),
2104 (u64 __user *)reg->addr);
2106 case KVM_REG_S390_CLOCK_COMP:
2107 r = put_user(vcpu->arch.sie_block->ckc,
2108 (u64 __user *)reg->addr);
2110 case KVM_REG_S390_PFTOKEN:
2111 r = put_user(vcpu->arch.pfault_token,
2112 (u64 __user *)reg->addr);
2114 case KVM_REG_S390_PFCOMPARE:
2115 r = put_user(vcpu->arch.pfault_compare,
2116 (u64 __user *)reg->addr);
2118 case KVM_REG_S390_PFSELECT:
2119 r = put_user(vcpu->arch.pfault_select,
2120 (u64 __user *)reg->addr);
2122 case KVM_REG_S390_PP:
2123 r = put_user(vcpu->arch.sie_block->pp,
2124 (u64 __user *)reg->addr);
2126 case KVM_REG_S390_GBEA:
2127 r = put_user(vcpu->arch.sie_block->gbea,
2128 (u64 __user *)reg->addr);
2137 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2138 struct kvm_one_reg *reg)
2144 case KVM_REG_S390_TODPR:
2145 r = get_user(vcpu->arch.sie_block->todpr,
2146 (u32 __user *)reg->addr);
2148 case KVM_REG_S390_EPOCHDIFF:
2149 r = get_user(vcpu->arch.sie_block->epoch,
2150 (u64 __user *)reg->addr);
2152 case KVM_REG_S390_CPU_TIMER:
2153 r = get_user(val, (u64 __user *)reg->addr);
2155 kvm_s390_set_cpu_timer(vcpu, val);
2157 case KVM_REG_S390_CLOCK_COMP:
2158 r = get_user(vcpu->arch.sie_block->ckc,
2159 (u64 __user *)reg->addr);
2161 case KVM_REG_S390_PFTOKEN:
2162 r = get_user(vcpu->arch.pfault_token,
2163 (u64 __user *)reg->addr);
2164 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2165 kvm_clear_async_pf_completion_queue(vcpu);
2167 case KVM_REG_S390_PFCOMPARE:
2168 r = get_user(vcpu->arch.pfault_compare,
2169 (u64 __user *)reg->addr);
2171 case KVM_REG_S390_PFSELECT:
2172 r = get_user(vcpu->arch.pfault_select,
2173 (u64 __user *)reg->addr);
2175 case KVM_REG_S390_PP:
2176 r = get_user(vcpu->arch.sie_block->pp,
2177 (u64 __user *)reg->addr);
2179 case KVM_REG_S390_GBEA:
2180 r = get_user(vcpu->arch.sie_block->gbea,
2181 (u64 __user *)reg->addr);
2190 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2192 kvm_s390_vcpu_initial_reset(vcpu);
2196 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2198 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
2202 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2204 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2208 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2209 struct kvm_sregs *sregs)
2211 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2212 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
2213 restore_access_regs(vcpu->run->s.regs.acrs);
2217 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2218 struct kvm_sregs *sregs)
2220 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2221 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2225 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2227 /* make sure the new values will be lazily loaded */
2229 if (test_fp_ctl(fpu->fpc))
2231 current->thread.fpu.fpc = fpu->fpc;
2233 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2235 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
2239 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2241 /* make sure we have the latest values */
2244 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2246 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2247 fpu->fpc = current->thread.fpu.fpc;
2251 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2255 if (!is_vcpu_stopped(vcpu))
2258 vcpu->run->psw_mask = psw.mask;
2259 vcpu->run->psw_addr = psw.addr;
2264 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2265 struct kvm_translation *tr)
2267 return -EINVAL; /* not implemented yet */
2270 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2271 KVM_GUESTDBG_USE_HW_BP | \
2272 KVM_GUESTDBG_ENABLE)
2274 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2275 struct kvm_guest_debug *dbg)
2279 vcpu->guest_debug = 0;
2280 kvm_s390_clear_bp_data(vcpu);
2282 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2284 if (!sclp.has_gpere)
2287 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2288 vcpu->guest_debug = dbg->control;
2289 /* enforce guest PER */
2290 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2292 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2293 rc = kvm_s390_import_bp_data(vcpu, dbg);
2295 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2296 vcpu->arch.guestdbg.last_bp = 0;
2300 vcpu->guest_debug = 0;
2301 kvm_s390_clear_bp_data(vcpu);
2302 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2308 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2309 struct kvm_mp_state *mp_state)
2311 /* CHECK_STOP and LOAD are not supported yet */
2312 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2313 KVM_MP_STATE_OPERATING;
2316 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2317 struct kvm_mp_state *mp_state)
2321 /* user space knows about this interface - let it control the state */
2322 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2324 switch (mp_state->mp_state) {
2325 case KVM_MP_STATE_STOPPED:
2326 kvm_s390_vcpu_stop(vcpu);
2328 case KVM_MP_STATE_OPERATING:
2329 kvm_s390_vcpu_start(vcpu);
2331 case KVM_MP_STATE_LOAD:
2332 case KVM_MP_STATE_CHECK_STOP:
2333 /* fall through - CHECK_STOP and LOAD are not supported yet */
2341 static bool ibs_enabled(struct kvm_vcpu *vcpu)
2343 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2346 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2349 kvm_s390_vcpu_request_handled(vcpu);
2350 if (!vcpu->requests)
2353 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2354 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2355 * This ensures that the ipte instruction for this request has
2356 * already finished. We might race against a second unmapper that
2357 * wants to set the blocking bit. Lets just retry the request loop.
2359 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2361 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2362 kvm_s390_get_prefix(vcpu),
2363 PAGE_SIZE * 2, PROT_WRITE);
2369 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2370 vcpu->arch.sie_block->ihcpu = 0xffff;
2374 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2375 if (!ibs_enabled(vcpu)) {
2376 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2377 atomic_or(CPUSTAT_IBS,
2378 &vcpu->arch.sie_block->cpuflags);
2383 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2384 if (ibs_enabled(vcpu)) {
2385 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2386 atomic_andnot(CPUSTAT_IBS,
2387 &vcpu->arch.sie_block->cpuflags);
2392 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2393 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2397 /* nothing to do, just clear the request */
2398 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2403 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2405 struct kvm_vcpu *vcpu;
2408 mutex_lock(&kvm->lock);
2410 kvm->arch.epoch = tod - get_tod_clock();
2411 kvm_s390_vcpu_block_all(kvm);
2412 kvm_for_each_vcpu(i, vcpu, kvm)
2413 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2414 kvm_s390_vcpu_unblock_all(kvm);
2416 mutex_unlock(&kvm->lock);
2420 * kvm_arch_fault_in_page - fault-in guest page if necessary
2421 * @vcpu: The corresponding virtual cpu
2422 * @gpa: Guest physical address
2423 * @writable: Whether the page should be writable or not
2425 * Make sure that a guest page has been faulted-in on the host.
2427 * Return: Zero on success, negative error code otherwise.
2429 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2431 return gmap_fault(vcpu->arch.gmap, gpa,
2432 writable ? FAULT_FLAG_WRITE : 0);
2435 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2436 unsigned long token)
2438 struct kvm_s390_interrupt inti;
2439 struct kvm_s390_irq irq;
2442 irq.u.ext.ext_params2 = token;
2443 irq.type = KVM_S390_INT_PFAULT_INIT;
2444 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2446 inti.type = KVM_S390_INT_PFAULT_DONE;
2447 inti.parm64 = token;
2448 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2452 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2453 struct kvm_async_pf *work)
2455 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2456 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2459 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2460 struct kvm_async_pf *work)
2462 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2463 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2466 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2467 struct kvm_async_pf *work)
2469 /* s390 will always inject the page directly */
2472 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2475 * s390 will always inject the page directly,
2476 * but we still want check_async_completion to cleanup
2481 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2484 struct kvm_arch_async_pf arch;
2487 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2489 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2490 vcpu->arch.pfault_compare)
2492 if (psw_extint_disabled(vcpu))
2494 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2496 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2498 if (!vcpu->arch.gmap->pfault_enabled)
2501 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2502 hva += current->thread.gmap_addr & ~PAGE_MASK;
2503 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2506 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2510 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2515 * On s390 notifications for arriving pages will be delivered directly
2516 * to the guest but the house keeping for completed pfaults is
2517 * handled outside the worker.
2519 kvm_check_async_pf_completion(vcpu);
2521 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2522 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2527 if (test_cpu_flag(CIF_MCCK_PENDING))
2530 if (!kvm_is_ucontrol(vcpu->kvm)) {
2531 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2536 rc = kvm_s390_handle_requests(vcpu);
2540 if (guestdbg_enabled(vcpu)) {
2541 kvm_s390_backup_guest_per_regs(vcpu);
2542 kvm_s390_patch_guest_per_regs(vcpu);
2545 vcpu->arch.sie_block->icptcode = 0;
2546 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2547 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2548 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2553 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2555 struct kvm_s390_pgm_info pgm_info = {
2556 .code = PGM_ADDRESSING,
2561 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2562 trace_kvm_s390_sie_fault(vcpu);
2565 * We want to inject an addressing exception, which is defined as a
2566 * suppressing or terminating exception. However, since we came here
2567 * by a DAT access exception, the PSW still points to the faulting
2568 * instruction since DAT exceptions are nullifying. So we've got
2569 * to look up the current opcode to get the length of the instruction
2570 * to be able to forward the PSW.
2572 rc = read_guest_instr(vcpu, &opcode, 1);
2573 ilen = insn_length(opcode);
2577 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2578 * Forward by arbitrary ilc, injection will take care of
2579 * nullification if necessary.
2581 pgm_info = vcpu->arch.pgm;
2584 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2585 kvm_s390_forward_psw(vcpu, ilen);
2586 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2589 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2591 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2592 vcpu->arch.sie_block->icptcode);
2593 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2595 if (guestdbg_enabled(vcpu))
2596 kvm_s390_restore_guest_per_regs(vcpu);
2598 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2599 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2601 if (vcpu->arch.sie_block->icptcode > 0) {
2602 int rc = kvm_handle_sie_intercept(vcpu);
2604 if (rc != -EOPNOTSUPP)
2606 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2607 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2608 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2609 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2611 } else if (exit_reason != -EFAULT) {
2612 vcpu->stat.exit_null++;
2614 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2615 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2616 vcpu->run->s390_ucontrol.trans_exc_code =
2617 current->thread.gmap_addr;
2618 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2620 } else if (current->thread.gmap_pfault) {
2621 trace_kvm_s390_major_guest_pfault(vcpu);
2622 current->thread.gmap_pfault = 0;
2623 if (kvm_arch_setup_async_pf(vcpu))
2625 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2627 return vcpu_post_run_fault_in_sie(vcpu);
2630 static int __vcpu_run(struct kvm_vcpu *vcpu)
2632 int rc, exit_reason;
2635 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2636 * ning the guest), so that memslots (and other stuff) are protected
2638 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2641 rc = vcpu_pre_run(vcpu);
2645 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2647 * As PF_VCPU will be used in fault handler, between
2648 * guest_enter and guest_exit should be no uaccess.
2650 local_irq_disable();
2651 guest_enter_irqoff();
2652 __disable_cpu_timer_accounting(vcpu);
2654 exit_reason = sie64a(vcpu->arch.sie_block,
2655 vcpu->run->s.regs.gprs);
2656 local_irq_disable();
2657 __enable_cpu_timer_accounting(vcpu);
2658 guest_exit_irqoff();
2660 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2662 rc = vcpu_post_run(vcpu, exit_reason);
2663 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2665 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2669 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2671 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2672 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2673 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2674 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2675 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2676 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2677 /* some control register changes require a tlb flush */
2678 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2680 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2681 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2682 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2683 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2684 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2685 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2687 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2688 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2689 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2690 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2691 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2692 kvm_clear_async_pf_completion_queue(vcpu);
2694 kvm_run->kvm_dirty_regs = 0;
2697 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2699 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2700 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2701 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2702 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2703 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2704 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2705 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2706 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2707 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2708 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2709 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2710 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2713 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2718 if (guestdbg_exit_pending(vcpu)) {
2719 kvm_s390_prepare_debug_exit(vcpu);
2723 if (vcpu->sigset_active)
2724 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2726 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2727 kvm_s390_vcpu_start(vcpu);
2728 } else if (is_vcpu_stopped(vcpu)) {
2729 pr_err_ratelimited("can't run stopped vcpu %d\n",
2734 sync_regs(vcpu, kvm_run);
2735 enable_cpu_timer_accounting(vcpu);
2738 rc = __vcpu_run(vcpu);
2740 if (signal_pending(current) && !rc) {
2741 kvm_run->exit_reason = KVM_EXIT_INTR;
2745 if (guestdbg_exit_pending(vcpu) && !rc) {
2746 kvm_s390_prepare_debug_exit(vcpu);
2750 if (rc == -EREMOTE) {
2751 /* userspace support is needed, kvm_run has been prepared */
2755 disable_cpu_timer_accounting(vcpu);
2756 store_regs(vcpu, kvm_run);
2758 if (vcpu->sigset_active)
2759 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2761 vcpu->stat.exit_userspace++;
2766 * store status at address
2767 * we use have two special cases:
2768 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2769 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2771 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2773 unsigned char archmode = 1;
2774 freg_t fprs[NUM_FPRS];
2779 px = kvm_s390_get_prefix(vcpu);
2780 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2781 if (write_guest_abs(vcpu, 163, &archmode, 1))
2784 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2785 if (write_guest_real(vcpu, 163, &archmode, 1))
2789 gpa -= __LC_FPREGS_SAVE_AREA;
2791 /* manually convert vector registers if necessary */
2792 if (MACHINE_HAS_VX) {
2793 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2794 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2797 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2798 vcpu->run->s.regs.fprs, 128);
2800 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2801 vcpu->run->s.regs.gprs, 128);
2802 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2803 &vcpu->arch.sie_block->gpsw, 16);
2804 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2806 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2807 &vcpu->run->s.regs.fpc, 4);
2808 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2809 &vcpu->arch.sie_block->todpr, 4);
2810 cputm = kvm_s390_get_cpu_timer(vcpu);
2811 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2813 clkcomp = vcpu->arch.sie_block->ckc >> 8;
2814 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2816 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2817 &vcpu->run->s.regs.acrs, 64);
2818 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2819 &vcpu->arch.sie_block->gcr, 128);
2820 return rc ? -EFAULT : 0;
2823 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2826 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2827 * copying in vcpu load/put. Lets update our copies before we save
2828 * it into the save area
2831 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2832 save_access_regs(vcpu->run->s.regs.acrs);
2834 return kvm_s390_store_status_unloaded(vcpu, addr);
2838 * store additional status at address
2840 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2843 /* Only bits 0-53 are used for address formation */
2844 if (!(gpa & ~0x3ff))
2847 return write_guest_abs(vcpu, gpa & ~0x3ff,
2848 (void *)&vcpu->run->s.regs.vrs, 512);
2851 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2853 if (!test_kvm_facility(vcpu->kvm, 129))
2857 * The guest VXRS are in the host VXRs due to the lazy
2858 * copying in vcpu load/put. We can simply call save_fpu_regs()
2859 * to save the current register state because we are in the
2860 * middle of a load/put cycle.
2862 * Let's update our copies before we save it into the save area.
2866 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2869 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2871 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2872 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2875 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2878 struct kvm_vcpu *vcpu;
2880 kvm_for_each_vcpu(i, vcpu, kvm) {
2881 __disable_ibs_on_vcpu(vcpu);
2885 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2889 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2890 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2893 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2895 int i, online_vcpus, started_vcpus = 0;
2897 if (!is_vcpu_stopped(vcpu))
2900 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2901 /* Only one cpu at a time may enter/leave the STOPPED state. */
2902 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2903 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2905 for (i = 0; i < online_vcpus; i++) {
2906 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2910 if (started_vcpus == 0) {
2911 /* we're the only active VCPU -> speed it up */
2912 __enable_ibs_on_vcpu(vcpu);
2913 } else if (started_vcpus == 1) {
2915 * As we are starting a second VCPU, we have to disable
2916 * the IBS facility on all VCPUs to remove potentially
2917 * oustanding ENABLE requests.
2919 __disable_ibs_on_all_vcpus(vcpu->kvm);
2922 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2924 * Another VCPU might have used IBS while we were offline.
2925 * Let's play safe and flush the VCPU at startup.
2927 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2928 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2932 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2934 int i, online_vcpus, started_vcpus = 0;
2935 struct kvm_vcpu *started_vcpu = NULL;
2937 if (is_vcpu_stopped(vcpu))
2940 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2941 /* Only one cpu at a time may enter/leave the STOPPED state. */
2942 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2943 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2945 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2946 kvm_s390_clear_stop_irq(vcpu);
2948 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2949 __disable_ibs_on_vcpu(vcpu);
2951 for (i = 0; i < online_vcpus; i++) {
2952 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2954 started_vcpu = vcpu->kvm->vcpus[i];
2958 if (started_vcpus == 1) {
2960 * As we only have one VCPU left, we want to enable the
2961 * IBS facility for that VCPU to speed it up.
2963 __enable_ibs_on_vcpu(started_vcpu);
2966 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2970 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2971 struct kvm_enable_cap *cap)
2979 case KVM_CAP_S390_CSS_SUPPORT:
2980 if (!vcpu->kvm->arch.css_support) {
2981 vcpu->kvm->arch.css_support = 1;
2982 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2983 trace_kvm_s390_enable_css(vcpu->kvm);
2994 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2995 struct kvm_s390_mem_op *mop)
2997 void __user *uaddr = (void __user *)mop->buf;
2998 void *tmpbuf = NULL;
3000 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3001 | KVM_S390_MEMOP_F_CHECK_ONLY;
3003 if (mop->flags & ~supported_flags)
3006 if (mop->size > MEM_OP_MAX_SIZE)
3009 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3010 tmpbuf = vmalloc(mop->size);
3015 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3018 case KVM_S390_MEMOP_LOGICAL_READ:
3019 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3020 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3021 mop->size, GACC_FETCH);
3024 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3026 if (copy_to_user(uaddr, tmpbuf, mop->size))
3030 case KVM_S390_MEMOP_LOGICAL_WRITE:
3031 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3032 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3033 mop->size, GACC_STORE);
3036 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3040 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3046 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3048 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3049 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3055 long kvm_arch_vcpu_ioctl(struct file *filp,
3056 unsigned int ioctl, unsigned long arg)
3058 struct kvm_vcpu *vcpu = filp->private_data;
3059 void __user *argp = (void __user *)arg;
3064 case KVM_S390_IRQ: {
3065 struct kvm_s390_irq s390irq;
3068 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3070 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3073 case KVM_S390_INTERRUPT: {
3074 struct kvm_s390_interrupt s390int;
3075 struct kvm_s390_irq s390irq;
3078 if (copy_from_user(&s390int, argp, sizeof(s390int)))
3080 if (s390int_to_s390irq(&s390int, &s390irq))
3082 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3085 case KVM_S390_STORE_STATUS:
3086 idx = srcu_read_lock(&vcpu->kvm->srcu);
3087 r = kvm_s390_vcpu_store_status(vcpu, arg);
3088 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3090 case KVM_S390_SET_INITIAL_PSW: {
3094 if (copy_from_user(&psw, argp, sizeof(psw)))
3096 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3099 case KVM_S390_INITIAL_RESET:
3100 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3102 case KVM_SET_ONE_REG:
3103 case KVM_GET_ONE_REG: {
3104 struct kvm_one_reg reg;
3106 if (copy_from_user(®, argp, sizeof(reg)))
3108 if (ioctl == KVM_SET_ONE_REG)
3109 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
3111 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
3114 #ifdef CONFIG_KVM_S390_UCONTROL
3115 case KVM_S390_UCAS_MAP: {
3116 struct kvm_s390_ucas_mapping ucasmap;
3118 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3123 if (!kvm_is_ucontrol(vcpu->kvm)) {
3128 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3129 ucasmap.vcpu_addr, ucasmap.length);
3132 case KVM_S390_UCAS_UNMAP: {
3133 struct kvm_s390_ucas_mapping ucasmap;
3135 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3140 if (!kvm_is_ucontrol(vcpu->kvm)) {
3145 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3150 case KVM_S390_VCPU_FAULT: {
3151 r = gmap_fault(vcpu->arch.gmap, arg, 0);
3154 case KVM_ENABLE_CAP:
3156 struct kvm_enable_cap cap;
3158 if (copy_from_user(&cap, argp, sizeof(cap)))
3160 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3163 case KVM_S390_MEM_OP: {
3164 struct kvm_s390_mem_op mem_op;
3166 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3167 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3172 case KVM_S390_SET_IRQ_STATE: {
3173 struct kvm_s390_irq_state irq_state;
3176 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3178 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3179 irq_state.len == 0 ||
3180 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3184 r = kvm_s390_set_irq_state(vcpu,
3185 (void __user *) irq_state.buf,
3189 case KVM_S390_GET_IRQ_STATE: {
3190 struct kvm_s390_irq_state irq_state;
3193 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3195 if (irq_state.len == 0) {
3199 r = kvm_s390_get_irq_state(vcpu,
3200 (__u8 __user *) irq_state.buf,
3210 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3212 #ifdef CONFIG_KVM_S390_UCONTROL
3213 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3214 && (kvm_is_ucontrol(vcpu->kvm))) {
3215 vmf->page = virt_to_page(vcpu->arch.sie_block);
3216 get_page(vmf->page);
3220 return VM_FAULT_SIGBUS;
3223 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3224 unsigned long npages)
3229 /* Section: memory related */
3230 int kvm_arch_prepare_memory_region(struct kvm *kvm,
3231 struct kvm_memory_slot *memslot,
3232 const struct kvm_userspace_memory_region *mem,
3233 enum kvm_mr_change change)
3235 /* A few sanity checks. We can have memory slots which have to be
3236 located/ended at a segment boundary (1MB). The memory in userland is
3237 ok to be fragmented into various different vmas. It is okay to mmap()
3238 and munmap() stuff in this slot after doing this call at any time */
3240 if (mem->userspace_addr & 0xffffful)
3243 if (mem->memory_size & 0xffffful)
3246 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3252 void kvm_arch_commit_memory_region(struct kvm *kvm,
3253 const struct kvm_userspace_memory_region *mem,
3254 const struct kvm_memory_slot *old,
3255 const struct kvm_memory_slot *new,
3256 enum kvm_mr_change change)
3260 /* If the basics of the memslot do not change, we do not want
3261 * to update the gmap. Every update causes several unnecessary
3262 * segment translation exceptions. This is usually handled just
3263 * fine by the normal fault handler + gmap, but it will also
3264 * cause faults on the prefix page of running guest CPUs.
3266 if (old->userspace_addr == mem->userspace_addr &&
3267 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3268 old->npages * PAGE_SIZE == mem->memory_size)
3271 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3272 mem->guest_phys_addr, mem->memory_size);
3274 pr_warn("failed to commit memory region\n");
3278 static inline unsigned long nonhyp_mask(int i)
3280 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3282 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3285 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3287 vcpu->valid_wakeup = false;
3290 static int __init kvm_s390_init(void)
3294 if (!sclp.has_sief2) {
3295 pr_info("SIE not available\n");
3299 for (i = 0; i < 16; i++)
3300 kvm_s390_fac_list_mask[i] |=
3301 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3303 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3306 static void __exit kvm_s390_exit(void)
3311 module_init(kvm_s390_init);
3312 module_exit(kvm_s390_exit);
3315 * Enable autoloading of the kvm module.
3316 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3317 * since x86 takes a different approach.
3319 #include <linux/miscdevice.h>
3320 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3321 MODULE_ALIAS("devname:kvm");