KVM: s390: Add sthyi emulation
[cascardo/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/etr.h>
32 #include <asm/pgtable.h>
33 #include <asm/gmap.h>
34 #include <asm/nmi.h>
35 #include <asm/switch_to.h>
36 #include <asm/isc.h>
37 #include <asm/sclp.h>
38 #include "kvm-s390.h"
39 #include "gaccess.h"
40
41 #define KMSG_COMPONENT "kvm-s390"
42 #undef pr_fmt
43 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44
45 #define CREATE_TRACE_POINTS
46 #include "trace.h"
47 #include "trace-s390.h"
48
49 #define MEM_OP_MAX_SIZE 65536   /* Maximum transfer size for KVM_S390_MEM_OP */
50 #define LOCAL_IRQS 32
51 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
52                            (KVM_MAX_VCPUS + LOCAL_IRQS))
53
54 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
55
56 struct kvm_stats_debugfs_item debugfs_entries[] = {
57         { "userspace_handled", VCPU_STAT(exit_userspace) },
58         { "exit_null", VCPU_STAT(exit_null) },
59         { "exit_validity", VCPU_STAT(exit_validity) },
60         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
61         { "exit_external_request", VCPU_STAT(exit_external_request) },
62         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
63         { "exit_instruction", VCPU_STAT(exit_instruction) },
64         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
65         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
66         { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
67         { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
68         { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
69         { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
70         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
71         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
72         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
73         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
74         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
75         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
76         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
77         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
78         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
79         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
80         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
81         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
82         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
83         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
84         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
85         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
86         { "instruction_spx", VCPU_STAT(instruction_spx) },
87         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
88         { "instruction_stap", VCPU_STAT(instruction_stap) },
89         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
90         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
91         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
92         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
93         { "instruction_essa", VCPU_STAT(instruction_essa) },
94         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
95         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
96         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
97         { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
98         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
99         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
100         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
101         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
102         { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
103         { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
104         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
105         { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
106         { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
107         { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
108         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
109         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
110         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
111         { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
112         { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
113         { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
114         { "diagnose_10", VCPU_STAT(diagnose_10) },
115         { "diagnose_44", VCPU_STAT(diagnose_44) },
116         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
117         { "diagnose_258", VCPU_STAT(diagnose_258) },
118         { "diagnose_308", VCPU_STAT(diagnose_308) },
119         { "diagnose_500", VCPU_STAT(diagnose_500) },
120         { NULL }
121 };
122
123 /* upper facilities limit for kvm */
124 unsigned long kvm_s390_fac_list_mask[16] = {
125         0xffe6000000000000UL,
126         0x005e000000000000UL,
127 };
128
129 unsigned long kvm_s390_fac_list_mask_size(void)
130 {
131         BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
132         return ARRAY_SIZE(kvm_s390_fac_list_mask);
133 }
134
135 static struct gmap_notifier gmap_notifier;
136 debug_info_t *kvm_s390_dbf;
137
138 /* Section: not file related */
139 int kvm_arch_hardware_enable(void)
140 {
141         /* every s390 is virtualization enabled ;-) */
142         return 0;
143 }
144
145 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
146
147 /*
148  * This callback is executed during stop_machine(). All CPUs are therefore
149  * temporarily stopped. In order not to change guest behavior, we have to
150  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
151  * so a CPU won't be stopped while calculating with the epoch.
152  */
153 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
154                           void *v)
155 {
156         struct kvm *kvm;
157         struct kvm_vcpu *vcpu;
158         int i;
159         unsigned long long *delta = v;
160
161         list_for_each_entry(kvm, &vm_list, vm_list) {
162                 kvm->arch.epoch -= *delta;
163                 kvm_for_each_vcpu(i, vcpu, kvm) {
164                         vcpu->arch.sie_block->epoch -= *delta;
165                         if (vcpu->arch.cputm_enabled)
166                                 vcpu->arch.cputm_start += *delta;
167                 }
168         }
169         return NOTIFY_OK;
170 }
171
172 static struct notifier_block kvm_clock_notifier = {
173         .notifier_call = kvm_clock_sync,
174 };
175
176 int kvm_arch_hardware_setup(void)
177 {
178         gmap_notifier.notifier_call = kvm_gmap_notifier;
179         gmap_register_ipte_notifier(&gmap_notifier);
180         atomic_notifier_chain_register(&s390_epoch_delta_notifier,
181                                        &kvm_clock_notifier);
182         return 0;
183 }
184
185 void kvm_arch_hardware_unsetup(void)
186 {
187         gmap_unregister_ipte_notifier(&gmap_notifier);
188         atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
189                                          &kvm_clock_notifier);
190 }
191
192 int kvm_arch_init(void *opaque)
193 {
194         kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
195         if (!kvm_s390_dbf)
196                 return -ENOMEM;
197
198         if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
199                 debug_unregister(kvm_s390_dbf);
200                 return -ENOMEM;
201         }
202
203         /* Register floating interrupt controller interface. */
204         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
205 }
206
207 void kvm_arch_exit(void)
208 {
209         debug_unregister(kvm_s390_dbf);
210 }
211
212 /* Section: device related */
213 long kvm_arch_dev_ioctl(struct file *filp,
214                         unsigned int ioctl, unsigned long arg)
215 {
216         if (ioctl == KVM_S390_ENABLE_SIE)
217                 return s390_enable_sie();
218         return -EINVAL;
219 }
220
221 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
222 {
223         int r;
224
225         switch (ext) {
226         case KVM_CAP_S390_PSW:
227         case KVM_CAP_S390_GMAP:
228         case KVM_CAP_SYNC_MMU:
229 #ifdef CONFIG_KVM_S390_UCONTROL
230         case KVM_CAP_S390_UCONTROL:
231 #endif
232         case KVM_CAP_ASYNC_PF:
233         case KVM_CAP_SYNC_REGS:
234         case KVM_CAP_ONE_REG:
235         case KVM_CAP_ENABLE_CAP:
236         case KVM_CAP_S390_CSS_SUPPORT:
237         case KVM_CAP_IOEVENTFD:
238         case KVM_CAP_DEVICE_CTRL:
239         case KVM_CAP_ENABLE_CAP_VM:
240         case KVM_CAP_S390_IRQCHIP:
241         case KVM_CAP_VM_ATTRIBUTES:
242         case KVM_CAP_MP_STATE:
243         case KVM_CAP_S390_INJECT_IRQ:
244         case KVM_CAP_S390_USER_SIGP:
245         case KVM_CAP_S390_USER_STSI:
246         case KVM_CAP_S390_SKEYS:
247         case KVM_CAP_S390_IRQ_STATE:
248                 r = 1;
249                 break;
250         case KVM_CAP_S390_MEM_OP:
251                 r = MEM_OP_MAX_SIZE;
252                 break;
253         case KVM_CAP_NR_VCPUS:
254         case KVM_CAP_MAX_VCPUS:
255                 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
256                                   : KVM_S390_BSCA_CPU_SLOTS;
257                 break;
258         case KVM_CAP_NR_MEMSLOTS:
259                 r = KVM_USER_MEM_SLOTS;
260                 break;
261         case KVM_CAP_S390_COW:
262                 r = MACHINE_HAS_ESOP;
263                 break;
264         case KVM_CAP_S390_VECTOR_REGISTERS:
265                 r = MACHINE_HAS_VX;
266                 break;
267         case KVM_CAP_S390_RI:
268                 r = test_facility(64);
269                 break;
270         default:
271                 r = 0;
272         }
273         return r;
274 }
275
276 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
277                                         struct kvm_memory_slot *memslot)
278 {
279         gfn_t cur_gfn, last_gfn;
280         unsigned long address;
281         struct gmap *gmap = kvm->arch.gmap;
282
283         /* Loop over all guest pages */
284         last_gfn = memslot->base_gfn + memslot->npages;
285         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
286                 address = gfn_to_hva_memslot(memslot, cur_gfn);
287
288                 if (test_and_clear_guest_dirty(gmap->mm, address))
289                         mark_page_dirty(kvm, cur_gfn);
290                 if (fatal_signal_pending(current))
291                         return;
292                 cond_resched();
293         }
294 }
295
296 /* Section: vm related */
297 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
298
299 /*
300  * Get (and clear) the dirty memory log for a memory slot.
301  */
302 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
303                                struct kvm_dirty_log *log)
304 {
305         int r;
306         unsigned long n;
307         struct kvm_memslots *slots;
308         struct kvm_memory_slot *memslot;
309         int is_dirty = 0;
310
311         mutex_lock(&kvm->slots_lock);
312
313         r = -EINVAL;
314         if (log->slot >= KVM_USER_MEM_SLOTS)
315                 goto out;
316
317         slots = kvm_memslots(kvm);
318         memslot = id_to_memslot(slots, log->slot);
319         r = -ENOENT;
320         if (!memslot->dirty_bitmap)
321                 goto out;
322
323         kvm_s390_sync_dirty_log(kvm, memslot);
324         r = kvm_get_dirty_log(kvm, log, &is_dirty);
325         if (r)
326                 goto out;
327
328         /* Clear the dirty log */
329         if (is_dirty) {
330                 n = kvm_dirty_bitmap_bytes(memslot);
331                 memset(memslot->dirty_bitmap, 0, n);
332         }
333         r = 0;
334 out:
335         mutex_unlock(&kvm->slots_lock);
336         return r;
337 }
338
339 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
340 {
341         int r;
342
343         if (cap->flags)
344                 return -EINVAL;
345
346         switch (cap->cap) {
347         case KVM_CAP_S390_IRQCHIP:
348                 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
349                 kvm->arch.use_irqchip = 1;
350                 r = 0;
351                 break;
352         case KVM_CAP_S390_USER_SIGP:
353                 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
354                 kvm->arch.user_sigp = 1;
355                 r = 0;
356                 break;
357         case KVM_CAP_S390_VECTOR_REGISTERS:
358                 mutex_lock(&kvm->lock);
359                 if (atomic_read(&kvm->online_vcpus)) {
360                         r = -EBUSY;
361                 } else if (MACHINE_HAS_VX) {
362                         set_kvm_facility(kvm->arch.model.fac_mask, 129);
363                         set_kvm_facility(kvm->arch.model.fac_list, 129);
364                         r = 0;
365                 } else
366                         r = -EINVAL;
367                 mutex_unlock(&kvm->lock);
368                 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
369                          r ? "(not available)" : "(success)");
370                 break;
371         case KVM_CAP_S390_RI:
372                 r = -EINVAL;
373                 mutex_lock(&kvm->lock);
374                 if (atomic_read(&kvm->online_vcpus)) {
375                         r = -EBUSY;
376                 } else if (test_facility(64)) {
377                         set_kvm_facility(kvm->arch.model.fac_mask, 64);
378                         set_kvm_facility(kvm->arch.model.fac_list, 64);
379                         r = 0;
380                 }
381                 mutex_unlock(&kvm->lock);
382                 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
383                          r ? "(not available)" : "(success)");
384                 break;
385         case KVM_CAP_S390_USER_STSI:
386                 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
387                 kvm->arch.user_stsi = 1;
388                 r = 0;
389                 break;
390         default:
391                 r = -EINVAL;
392                 break;
393         }
394         return r;
395 }
396
397 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
398 {
399         int ret;
400
401         switch (attr->attr) {
402         case KVM_S390_VM_MEM_LIMIT_SIZE:
403                 ret = 0;
404                 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
405                          kvm->arch.mem_limit);
406                 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
407                         ret = -EFAULT;
408                 break;
409         default:
410                 ret = -ENXIO;
411                 break;
412         }
413         return ret;
414 }
415
416 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
417 {
418         int ret;
419         unsigned int idx;
420         switch (attr->attr) {
421         case KVM_S390_VM_MEM_ENABLE_CMMA:
422                 /* enable CMMA only for z10 and later (EDAT_1) */
423                 ret = -EINVAL;
424                 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
425                         break;
426
427                 ret = -EBUSY;
428                 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
429                 mutex_lock(&kvm->lock);
430                 if (atomic_read(&kvm->online_vcpus) == 0) {
431                         kvm->arch.use_cmma = 1;
432                         ret = 0;
433                 }
434                 mutex_unlock(&kvm->lock);
435                 break;
436         case KVM_S390_VM_MEM_CLR_CMMA:
437                 ret = -EINVAL;
438                 if (!kvm->arch.use_cmma)
439                         break;
440
441                 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
442                 mutex_lock(&kvm->lock);
443                 idx = srcu_read_lock(&kvm->srcu);
444                 s390_reset_cmma(kvm->arch.gmap->mm);
445                 srcu_read_unlock(&kvm->srcu, idx);
446                 mutex_unlock(&kvm->lock);
447                 ret = 0;
448                 break;
449         case KVM_S390_VM_MEM_LIMIT_SIZE: {
450                 unsigned long new_limit;
451
452                 if (kvm_is_ucontrol(kvm))
453                         return -EINVAL;
454
455                 if (get_user(new_limit, (u64 __user *)attr->addr))
456                         return -EFAULT;
457
458                 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
459                     new_limit > kvm->arch.mem_limit)
460                         return -E2BIG;
461
462                 if (!new_limit)
463                         return -EINVAL;
464
465                 /* gmap_alloc takes last usable address */
466                 if (new_limit != KVM_S390_NO_MEM_LIMIT)
467                         new_limit -= 1;
468
469                 ret = -EBUSY;
470                 mutex_lock(&kvm->lock);
471                 if (atomic_read(&kvm->online_vcpus) == 0) {
472                         /* gmap_alloc will round the limit up */
473                         struct gmap *new = gmap_alloc(current->mm, new_limit);
474
475                         if (!new) {
476                                 ret = -ENOMEM;
477                         } else {
478                                 gmap_free(kvm->arch.gmap);
479                                 new->private = kvm;
480                                 kvm->arch.gmap = new;
481                                 ret = 0;
482                         }
483                 }
484                 mutex_unlock(&kvm->lock);
485                 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
486                 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
487                          (void *) kvm->arch.gmap->asce);
488                 break;
489         }
490         default:
491                 ret = -ENXIO;
492                 break;
493         }
494         return ret;
495 }
496
497 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
498
499 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
500 {
501         struct kvm_vcpu *vcpu;
502         int i;
503
504         if (!test_kvm_facility(kvm, 76))
505                 return -EINVAL;
506
507         mutex_lock(&kvm->lock);
508         switch (attr->attr) {
509         case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
510                 get_random_bytes(
511                         kvm->arch.crypto.crycb->aes_wrapping_key_mask,
512                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
513                 kvm->arch.crypto.aes_kw = 1;
514                 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
515                 break;
516         case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
517                 get_random_bytes(
518                         kvm->arch.crypto.crycb->dea_wrapping_key_mask,
519                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
520                 kvm->arch.crypto.dea_kw = 1;
521                 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
522                 break;
523         case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
524                 kvm->arch.crypto.aes_kw = 0;
525                 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
526                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
527                 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
528                 break;
529         case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
530                 kvm->arch.crypto.dea_kw = 0;
531                 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
532                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
533                 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
534                 break;
535         default:
536                 mutex_unlock(&kvm->lock);
537                 return -ENXIO;
538         }
539
540         kvm_for_each_vcpu(i, vcpu, kvm) {
541                 kvm_s390_vcpu_crypto_setup(vcpu);
542                 exit_sie(vcpu);
543         }
544         mutex_unlock(&kvm->lock);
545         return 0;
546 }
547
548 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
549 {
550         u8 gtod_high;
551
552         if (copy_from_user(&gtod_high, (void __user *)attr->addr,
553                                            sizeof(gtod_high)))
554                 return -EFAULT;
555
556         if (gtod_high != 0)
557                 return -EINVAL;
558         VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
559
560         return 0;
561 }
562
563 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
564 {
565         u64 gtod;
566
567         if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
568                 return -EFAULT;
569
570         kvm_s390_set_tod_clock(kvm, gtod);
571         VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
572         return 0;
573 }
574
575 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
576 {
577         int ret;
578
579         if (attr->flags)
580                 return -EINVAL;
581
582         switch (attr->attr) {
583         case KVM_S390_VM_TOD_HIGH:
584                 ret = kvm_s390_set_tod_high(kvm, attr);
585                 break;
586         case KVM_S390_VM_TOD_LOW:
587                 ret = kvm_s390_set_tod_low(kvm, attr);
588                 break;
589         default:
590                 ret = -ENXIO;
591                 break;
592         }
593         return ret;
594 }
595
596 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
597 {
598         u8 gtod_high = 0;
599
600         if (copy_to_user((void __user *)attr->addr, &gtod_high,
601                                          sizeof(gtod_high)))
602                 return -EFAULT;
603         VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
604
605         return 0;
606 }
607
608 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
609 {
610         u64 gtod;
611
612         gtod = kvm_s390_get_tod_clock_fast(kvm);
613         if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
614                 return -EFAULT;
615         VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
616
617         return 0;
618 }
619
620 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
621 {
622         int ret;
623
624         if (attr->flags)
625                 return -EINVAL;
626
627         switch (attr->attr) {
628         case KVM_S390_VM_TOD_HIGH:
629                 ret = kvm_s390_get_tod_high(kvm, attr);
630                 break;
631         case KVM_S390_VM_TOD_LOW:
632                 ret = kvm_s390_get_tod_low(kvm, attr);
633                 break;
634         default:
635                 ret = -ENXIO;
636                 break;
637         }
638         return ret;
639 }
640
641 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
642 {
643         struct kvm_s390_vm_cpu_processor *proc;
644         u16 lowest_ibc, unblocked_ibc;
645         int ret = 0;
646
647         mutex_lock(&kvm->lock);
648         if (atomic_read(&kvm->online_vcpus)) {
649                 ret = -EBUSY;
650                 goto out;
651         }
652         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
653         if (!proc) {
654                 ret = -ENOMEM;
655                 goto out;
656         }
657         if (!copy_from_user(proc, (void __user *)attr->addr,
658                             sizeof(*proc))) {
659                 kvm->arch.model.cpuid = proc->cpuid;
660                 lowest_ibc = sclp.ibc >> 16 & 0xfff;
661                 unblocked_ibc = sclp.ibc & 0xfff;
662                 if (lowest_ibc) {
663                         if (proc->ibc > unblocked_ibc)
664                                 kvm->arch.model.ibc = unblocked_ibc;
665                         else if (proc->ibc < lowest_ibc)
666                                 kvm->arch.model.ibc = lowest_ibc;
667                         else
668                                 kvm->arch.model.ibc = proc->ibc;
669                 }
670                 memcpy(kvm->arch.model.fac_list, proc->fac_list,
671                        S390_ARCH_FAC_LIST_SIZE_BYTE);
672         } else
673                 ret = -EFAULT;
674         kfree(proc);
675 out:
676         mutex_unlock(&kvm->lock);
677         return ret;
678 }
679
680 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
681 {
682         int ret = -ENXIO;
683
684         switch (attr->attr) {
685         case KVM_S390_VM_CPU_PROCESSOR:
686                 ret = kvm_s390_set_processor(kvm, attr);
687                 break;
688         }
689         return ret;
690 }
691
692 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
693 {
694         struct kvm_s390_vm_cpu_processor *proc;
695         int ret = 0;
696
697         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
698         if (!proc) {
699                 ret = -ENOMEM;
700                 goto out;
701         }
702         proc->cpuid = kvm->arch.model.cpuid;
703         proc->ibc = kvm->arch.model.ibc;
704         memcpy(&proc->fac_list, kvm->arch.model.fac_list,
705                S390_ARCH_FAC_LIST_SIZE_BYTE);
706         if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
707                 ret = -EFAULT;
708         kfree(proc);
709 out:
710         return ret;
711 }
712
713 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
714 {
715         struct kvm_s390_vm_cpu_machine *mach;
716         int ret = 0;
717
718         mach = kzalloc(sizeof(*mach), GFP_KERNEL);
719         if (!mach) {
720                 ret = -ENOMEM;
721                 goto out;
722         }
723         get_cpu_id((struct cpuid *) &mach->cpuid);
724         mach->ibc = sclp.ibc;
725         memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
726                S390_ARCH_FAC_LIST_SIZE_BYTE);
727         memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
728                S390_ARCH_FAC_LIST_SIZE_BYTE);
729         if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
730                 ret = -EFAULT;
731         kfree(mach);
732 out:
733         return ret;
734 }
735
736 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
737 {
738         int ret = -ENXIO;
739
740         switch (attr->attr) {
741         case KVM_S390_VM_CPU_PROCESSOR:
742                 ret = kvm_s390_get_processor(kvm, attr);
743                 break;
744         case KVM_S390_VM_CPU_MACHINE:
745                 ret = kvm_s390_get_machine(kvm, attr);
746                 break;
747         }
748         return ret;
749 }
750
751 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
752 {
753         int ret;
754
755         switch (attr->group) {
756         case KVM_S390_VM_MEM_CTRL:
757                 ret = kvm_s390_set_mem_control(kvm, attr);
758                 break;
759         case KVM_S390_VM_TOD:
760                 ret = kvm_s390_set_tod(kvm, attr);
761                 break;
762         case KVM_S390_VM_CPU_MODEL:
763                 ret = kvm_s390_set_cpu_model(kvm, attr);
764                 break;
765         case KVM_S390_VM_CRYPTO:
766                 ret = kvm_s390_vm_set_crypto(kvm, attr);
767                 break;
768         default:
769                 ret = -ENXIO;
770                 break;
771         }
772
773         return ret;
774 }
775
776 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
777 {
778         int ret;
779
780         switch (attr->group) {
781         case KVM_S390_VM_MEM_CTRL:
782                 ret = kvm_s390_get_mem_control(kvm, attr);
783                 break;
784         case KVM_S390_VM_TOD:
785                 ret = kvm_s390_get_tod(kvm, attr);
786                 break;
787         case KVM_S390_VM_CPU_MODEL:
788                 ret = kvm_s390_get_cpu_model(kvm, attr);
789                 break;
790         default:
791                 ret = -ENXIO;
792                 break;
793         }
794
795         return ret;
796 }
797
798 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
799 {
800         int ret;
801
802         switch (attr->group) {
803         case KVM_S390_VM_MEM_CTRL:
804                 switch (attr->attr) {
805                 case KVM_S390_VM_MEM_ENABLE_CMMA:
806                 case KVM_S390_VM_MEM_CLR_CMMA:
807                 case KVM_S390_VM_MEM_LIMIT_SIZE:
808                         ret = 0;
809                         break;
810                 default:
811                         ret = -ENXIO;
812                         break;
813                 }
814                 break;
815         case KVM_S390_VM_TOD:
816                 switch (attr->attr) {
817                 case KVM_S390_VM_TOD_LOW:
818                 case KVM_S390_VM_TOD_HIGH:
819                         ret = 0;
820                         break;
821                 default:
822                         ret = -ENXIO;
823                         break;
824                 }
825                 break;
826         case KVM_S390_VM_CPU_MODEL:
827                 switch (attr->attr) {
828                 case KVM_S390_VM_CPU_PROCESSOR:
829                 case KVM_S390_VM_CPU_MACHINE:
830                         ret = 0;
831                         break;
832                 default:
833                         ret = -ENXIO;
834                         break;
835                 }
836                 break;
837         case KVM_S390_VM_CRYPTO:
838                 switch (attr->attr) {
839                 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
840                 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
841                 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
842                 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
843                         ret = 0;
844                         break;
845                 default:
846                         ret = -ENXIO;
847                         break;
848                 }
849                 break;
850         default:
851                 ret = -ENXIO;
852                 break;
853         }
854
855         return ret;
856 }
857
858 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
859 {
860         uint8_t *keys;
861         uint64_t hva;
862         unsigned long curkey;
863         int i, r = 0;
864
865         if (args->flags != 0)
866                 return -EINVAL;
867
868         /* Is this guest using storage keys? */
869         if (!mm_use_skey(current->mm))
870                 return KVM_S390_GET_SKEYS_NONE;
871
872         /* Enforce sane limit on memory allocation */
873         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
874                 return -EINVAL;
875
876         keys = kmalloc_array(args->count, sizeof(uint8_t),
877                              GFP_KERNEL | __GFP_NOWARN);
878         if (!keys)
879                 keys = vmalloc(sizeof(uint8_t) * args->count);
880         if (!keys)
881                 return -ENOMEM;
882
883         for (i = 0; i < args->count; i++) {
884                 hva = gfn_to_hva(kvm, args->start_gfn + i);
885                 if (kvm_is_error_hva(hva)) {
886                         r = -EFAULT;
887                         goto out;
888                 }
889
890                 curkey = get_guest_storage_key(current->mm, hva);
891                 if (IS_ERR_VALUE(curkey)) {
892                         r = curkey;
893                         goto out;
894                 }
895                 keys[i] = curkey;
896         }
897
898         r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
899                          sizeof(uint8_t) * args->count);
900         if (r)
901                 r = -EFAULT;
902 out:
903         kvfree(keys);
904         return r;
905 }
906
907 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
908 {
909         uint8_t *keys;
910         uint64_t hva;
911         int i, r = 0;
912
913         if (args->flags != 0)
914                 return -EINVAL;
915
916         /* Enforce sane limit on memory allocation */
917         if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
918                 return -EINVAL;
919
920         keys = kmalloc_array(args->count, sizeof(uint8_t),
921                              GFP_KERNEL | __GFP_NOWARN);
922         if (!keys)
923                 keys = vmalloc(sizeof(uint8_t) * args->count);
924         if (!keys)
925                 return -ENOMEM;
926
927         r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
928                            sizeof(uint8_t) * args->count);
929         if (r) {
930                 r = -EFAULT;
931                 goto out;
932         }
933
934         /* Enable storage key handling for the guest */
935         r = s390_enable_skey();
936         if (r)
937                 goto out;
938
939         for (i = 0; i < args->count; i++) {
940                 hva = gfn_to_hva(kvm, args->start_gfn + i);
941                 if (kvm_is_error_hva(hva)) {
942                         r = -EFAULT;
943                         goto out;
944                 }
945
946                 /* Lowest order bit is reserved */
947                 if (keys[i] & 0x01) {
948                         r = -EINVAL;
949                         goto out;
950                 }
951
952                 r = set_guest_storage_key(current->mm, hva,
953                                           (unsigned long)keys[i], 0);
954                 if (r)
955                         goto out;
956         }
957 out:
958         kvfree(keys);
959         return r;
960 }
961
962 long kvm_arch_vm_ioctl(struct file *filp,
963                        unsigned int ioctl, unsigned long arg)
964 {
965         struct kvm *kvm = filp->private_data;
966         void __user *argp = (void __user *)arg;
967         struct kvm_device_attr attr;
968         int r;
969
970         switch (ioctl) {
971         case KVM_S390_INTERRUPT: {
972                 struct kvm_s390_interrupt s390int;
973
974                 r = -EFAULT;
975                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
976                         break;
977                 r = kvm_s390_inject_vm(kvm, &s390int);
978                 break;
979         }
980         case KVM_ENABLE_CAP: {
981                 struct kvm_enable_cap cap;
982                 r = -EFAULT;
983                 if (copy_from_user(&cap, argp, sizeof(cap)))
984                         break;
985                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
986                 break;
987         }
988         case KVM_CREATE_IRQCHIP: {
989                 struct kvm_irq_routing_entry routing;
990
991                 r = -EINVAL;
992                 if (kvm->arch.use_irqchip) {
993                         /* Set up dummy routing. */
994                         memset(&routing, 0, sizeof(routing));
995                         r = kvm_set_irq_routing(kvm, &routing, 0, 0);
996                 }
997                 break;
998         }
999         case KVM_SET_DEVICE_ATTR: {
1000                 r = -EFAULT;
1001                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1002                         break;
1003                 r = kvm_s390_vm_set_attr(kvm, &attr);
1004                 break;
1005         }
1006         case KVM_GET_DEVICE_ATTR: {
1007                 r = -EFAULT;
1008                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1009                         break;
1010                 r = kvm_s390_vm_get_attr(kvm, &attr);
1011                 break;
1012         }
1013         case KVM_HAS_DEVICE_ATTR: {
1014                 r = -EFAULT;
1015                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1016                         break;
1017                 r = kvm_s390_vm_has_attr(kvm, &attr);
1018                 break;
1019         }
1020         case KVM_S390_GET_SKEYS: {
1021                 struct kvm_s390_skeys args;
1022
1023                 r = -EFAULT;
1024                 if (copy_from_user(&args, argp,
1025                                    sizeof(struct kvm_s390_skeys)))
1026                         break;
1027                 r = kvm_s390_get_skeys(kvm, &args);
1028                 break;
1029         }
1030         case KVM_S390_SET_SKEYS: {
1031                 struct kvm_s390_skeys args;
1032
1033                 r = -EFAULT;
1034                 if (copy_from_user(&args, argp,
1035                                    sizeof(struct kvm_s390_skeys)))
1036                         break;
1037                 r = kvm_s390_set_skeys(kvm, &args);
1038                 break;
1039         }
1040         default:
1041                 r = -ENOTTY;
1042         }
1043
1044         return r;
1045 }
1046
1047 static int kvm_s390_query_ap_config(u8 *config)
1048 {
1049         u32 fcn_code = 0x04000000UL;
1050         u32 cc = 0;
1051
1052         memset(config, 0, 128);
1053         asm volatile(
1054                 "lgr 0,%1\n"
1055                 "lgr 2,%2\n"
1056                 ".long 0xb2af0000\n"            /* PQAP(QCI) */
1057                 "0: ipm %0\n"
1058                 "srl %0,28\n"
1059                 "1:\n"
1060                 EX_TABLE(0b, 1b)
1061                 : "+r" (cc)
1062                 : "r" (fcn_code), "r" (config)
1063                 : "cc", "0", "2", "memory"
1064         );
1065
1066         return cc;
1067 }
1068
1069 static int kvm_s390_apxa_installed(void)
1070 {
1071         u8 config[128];
1072         int cc;
1073
1074         if (test_facility(12)) {
1075                 cc = kvm_s390_query_ap_config(config);
1076
1077                 if (cc)
1078                         pr_err("PQAP(QCI) failed with cc=%d", cc);
1079                 else
1080                         return config[0] & 0x40;
1081         }
1082
1083         return 0;
1084 }
1085
1086 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1087 {
1088         kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1089
1090         if (kvm_s390_apxa_installed())
1091                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1092         else
1093                 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1094 }
1095
1096 static u64 kvm_s390_get_initial_cpuid(void)
1097 {
1098         struct cpuid cpuid;
1099
1100         get_cpu_id(&cpuid);
1101         cpuid.version = 0xff;
1102         return *((u64 *) &cpuid);
1103 }
1104
1105 static void kvm_s390_crypto_init(struct kvm *kvm)
1106 {
1107         if (!test_kvm_facility(kvm, 76))
1108                 return;
1109
1110         kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
1111         kvm_s390_set_crycb_format(kvm);
1112
1113         /* Enable AES/DEA protected key functions by default */
1114         kvm->arch.crypto.aes_kw = 1;
1115         kvm->arch.crypto.dea_kw = 1;
1116         get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1117                          sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1118         get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1119                          sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1120 }
1121
1122 static void sca_dispose(struct kvm *kvm)
1123 {
1124         if (kvm->arch.use_esca)
1125                 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1126         else
1127                 free_page((unsigned long)(kvm->arch.sca));
1128         kvm->arch.sca = NULL;
1129 }
1130
1131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1132 {
1133         int i, rc;
1134         char debug_name[16];
1135         static unsigned long sca_offset;
1136
1137         rc = -EINVAL;
1138 #ifdef CONFIG_KVM_S390_UCONTROL
1139         if (type & ~KVM_VM_S390_UCONTROL)
1140                 goto out_err;
1141         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1142                 goto out_err;
1143 #else
1144         if (type)
1145                 goto out_err;
1146 #endif
1147
1148         rc = s390_enable_sie();
1149         if (rc)
1150                 goto out_err;
1151
1152         rc = -ENOMEM;
1153
1154         kvm->arch.use_esca = 0; /* start with basic SCA */
1155         rwlock_init(&kvm->arch.sca_lock);
1156         kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1157         if (!kvm->arch.sca)
1158                 goto out_err;
1159         spin_lock(&kvm_lock);
1160         sca_offset += 16;
1161         if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1162                 sca_offset = 0;
1163         kvm->arch.sca = (struct bsca_block *)
1164                         ((char *) kvm->arch.sca + sca_offset);
1165         spin_unlock(&kvm_lock);
1166
1167         sprintf(debug_name, "kvm-%u", current->pid);
1168
1169         kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1170         if (!kvm->arch.dbf)
1171                 goto out_err;
1172
1173         kvm->arch.sie_page2 =
1174              (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1175         if (!kvm->arch.sie_page2)
1176                 goto out_err;
1177
1178         /* Populate the facility mask initially. */
1179         memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1180                S390_ARCH_FAC_LIST_SIZE_BYTE);
1181         for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1182                 if (i < kvm_s390_fac_list_mask_size())
1183                         kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
1184                 else
1185                         kvm->arch.model.fac_mask[i] = 0UL;
1186         }
1187
1188         /* Populate the facility list initially. */
1189         kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1190         memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1191                S390_ARCH_FAC_LIST_SIZE_BYTE);
1192
1193         set_kvm_facility(kvm->arch.model.fac_mask, 74);
1194         set_kvm_facility(kvm->arch.model.fac_list, 74);
1195
1196         kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
1197         kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1198
1199         kvm_s390_crypto_init(kvm);
1200
1201         spin_lock_init(&kvm->arch.float_int.lock);
1202         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1203                 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1204         init_waitqueue_head(&kvm->arch.ipte_wq);
1205         mutex_init(&kvm->arch.ipte_mutex);
1206
1207         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1208         VM_EVENT(kvm, 3, "vm created with type %lu", type);
1209
1210         if (type & KVM_VM_S390_UCONTROL) {
1211                 kvm->arch.gmap = NULL;
1212                 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1213         } else {
1214                 if (sclp.hamax == U64_MAX)
1215                         kvm->arch.mem_limit = TASK_MAX_SIZE;
1216                 else
1217                         kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1218                                                     sclp.hamax + 1);
1219                 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1220                 if (!kvm->arch.gmap)
1221                         goto out_err;
1222                 kvm->arch.gmap->private = kvm;
1223                 kvm->arch.gmap->pfault_enabled = 0;
1224         }
1225
1226         kvm->arch.css_support = 0;
1227         kvm->arch.use_irqchip = 0;
1228         kvm->arch.epoch = 0;
1229
1230         spin_lock_init(&kvm->arch.start_stop_lock);
1231         KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1232
1233         return 0;
1234 out_err:
1235         free_page((unsigned long)kvm->arch.sie_page2);
1236         debug_unregister(kvm->arch.dbf);
1237         sca_dispose(kvm);
1238         KVM_EVENT(3, "creation of vm failed: %d", rc);
1239         return rc;
1240 }
1241
1242 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1243 {
1244         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1245         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1246         kvm_s390_clear_local_irqs(vcpu);
1247         kvm_clear_async_pf_completion_queue(vcpu);
1248         if (!kvm_is_ucontrol(vcpu->kvm))
1249                 sca_del_vcpu(vcpu);
1250
1251         if (kvm_is_ucontrol(vcpu->kvm))
1252                 gmap_free(vcpu->arch.gmap);
1253
1254         if (vcpu->kvm->arch.use_cmma)
1255                 kvm_s390_vcpu_unsetup_cmma(vcpu);
1256         free_page((unsigned long)(vcpu->arch.sie_block));
1257
1258         kvm_vcpu_uninit(vcpu);
1259         kmem_cache_free(kvm_vcpu_cache, vcpu);
1260 }
1261
1262 static void kvm_free_vcpus(struct kvm *kvm)
1263 {
1264         unsigned int i;
1265         struct kvm_vcpu *vcpu;
1266
1267         kvm_for_each_vcpu(i, vcpu, kvm)
1268                 kvm_arch_vcpu_destroy(vcpu);
1269
1270         mutex_lock(&kvm->lock);
1271         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1272                 kvm->vcpus[i] = NULL;
1273
1274         atomic_set(&kvm->online_vcpus, 0);
1275         mutex_unlock(&kvm->lock);
1276 }
1277
1278 void kvm_arch_destroy_vm(struct kvm *kvm)
1279 {
1280         kvm_free_vcpus(kvm);
1281         sca_dispose(kvm);
1282         debug_unregister(kvm->arch.dbf);
1283         free_page((unsigned long)kvm->arch.sie_page2);
1284         if (!kvm_is_ucontrol(kvm))
1285                 gmap_free(kvm->arch.gmap);
1286         kvm_s390_destroy_adapters(kvm);
1287         kvm_s390_clear_float_irqs(kvm);
1288         KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1289 }
1290
1291 /* Section: vcpu related */
1292 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1293 {
1294         vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1295         if (!vcpu->arch.gmap)
1296                 return -ENOMEM;
1297         vcpu->arch.gmap->private = vcpu->kvm;
1298
1299         return 0;
1300 }
1301
1302 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1303 {
1304         read_lock(&vcpu->kvm->arch.sca_lock);
1305         if (vcpu->kvm->arch.use_esca) {
1306                 struct esca_block *sca = vcpu->kvm->arch.sca;
1307
1308                 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1309                 sca->cpu[vcpu->vcpu_id].sda = 0;
1310         } else {
1311                 struct bsca_block *sca = vcpu->kvm->arch.sca;
1312
1313                 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1314                 sca->cpu[vcpu->vcpu_id].sda = 0;
1315         }
1316         read_unlock(&vcpu->kvm->arch.sca_lock);
1317 }
1318
1319 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1320 {
1321         read_lock(&vcpu->kvm->arch.sca_lock);
1322         if (vcpu->kvm->arch.use_esca) {
1323                 struct esca_block *sca = vcpu->kvm->arch.sca;
1324
1325                 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1326                 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1327                 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1328                 vcpu->arch.sie_block->ecb2 |= 0x04U;
1329                 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1330         } else {
1331                 struct bsca_block *sca = vcpu->kvm->arch.sca;
1332
1333                 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1334                 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1335                 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1336                 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1337         }
1338         read_unlock(&vcpu->kvm->arch.sca_lock);
1339 }
1340
1341 /* Basic SCA to Extended SCA data copy routines */
1342 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1343 {
1344         d->sda = s->sda;
1345         d->sigp_ctrl.c = s->sigp_ctrl.c;
1346         d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1347 }
1348
1349 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1350 {
1351         int i;
1352
1353         d->ipte_control = s->ipte_control;
1354         d->mcn[0] = s->mcn;
1355         for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1356                 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1357 }
1358
1359 static int sca_switch_to_extended(struct kvm *kvm)
1360 {
1361         struct bsca_block *old_sca = kvm->arch.sca;
1362         struct esca_block *new_sca;
1363         struct kvm_vcpu *vcpu;
1364         unsigned int vcpu_idx;
1365         u32 scaol, scaoh;
1366
1367         new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1368         if (!new_sca)
1369                 return -ENOMEM;
1370
1371         scaoh = (u32)((u64)(new_sca) >> 32);
1372         scaol = (u32)(u64)(new_sca) & ~0x3fU;
1373
1374         kvm_s390_vcpu_block_all(kvm);
1375         write_lock(&kvm->arch.sca_lock);
1376
1377         sca_copy_b_to_e(new_sca, old_sca);
1378
1379         kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1380                 vcpu->arch.sie_block->scaoh = scaoh;
1381                 vcpu->arch.sie_block->scaol = scaol;
1382                 vcpu->arch.sie_block->ecb2 |= 0x04U;
1383         }
1384         kvm->arch.sca = new_sca;
1385         kvm->arch.use_esca = 1;
1386
1387         write_unlock(&kvm->arch.sca_lock);
1388         kvm_s390_vcpu_unblock_all(kvm);
1389
1390         free_page((unsigned long)old_sca);
1391
1392         VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1393                  old_sca, kvm->arch.sca);
1394         return 0;
1395 }
1396
1397 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1398 {
1399         int rc;
1400
1401         if (id < KVM_S390_BSCA_CPU_SLOTS)
1402                 return true;
1403         if (!sclp.has_esca)
1404                 return false;
1405
1406         mutex_lock(&kvm->lock);
1407         rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1408         mutex_unlock(&kvm->lock);
1409
1410         return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1411 }
1412
1413 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1414 {
1415         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1416         kvm_clear_async_pf_completion_queue(vcpu);
1417         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1418                                     KVM_SYNC_GPRS |
1419                                     KVM_SYNC_ACRS |
1420                                     KVM_SYNC_CRS |
1421                                     KVM_SYNC_ARCH0 |
1422                                     KVM_SYNC_PFAULT;
1423         if (test_kvm_facility(vcpu->kvm, 64))
1424                 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1425         /* fprs can be synchronized via vrs, even if the guest has no vx. With
1426          * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1427          */
1428         if (MACHINE_HAS_VX)
1429                 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1430         else
1431                 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1432
1433         if (kvm_is_ucontrol(vcpu->kvm))
1434                 return __kvm_ucontrol_vcpu_init(vcpu);
1435
1436         return 0;
1437 }
1438
1439 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1440 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1441 {
1442         WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
1443         raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1444         vcpu->arch.cputm_start = get_tod_clock_fast();
1445         raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1446 }
1447
1448 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1449 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1450 {
1451         WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
1452         raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1453         vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1454         vcpu->arch.cputm_start = 0;
1455         raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1456 }
1457
1458 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1459 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1460 {
1461         WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1462         vcpu->arch.cputm_enabled = true;
1463         __start_cpu_timer_accounting(vcpu);
1464 }
1465
1466 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1467 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1468 {
1469         WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1470         __stop_cpu_timer_accounting(vcpu);
1471         vcpu->arch.cputm_enabled = false;
1472 }
1473
1474 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1475 {
1476         preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1477         __enable_cpu_timer_accounting(vcpu);
1478         preempt_enable();
1479 }
1480
1481 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1482 {
1483         preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1484         __disable_cpu_timer_accounting(vcpu);
1485         preempt_enable();
1486 }
1487
1488 /* set the cpu timer - may only be called from the VCPU thread itself */
1489 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1490 {
1491         preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1492         raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1493         if (vcpu->arch.cputm_enabled)
1494                 vcpu->arch.cputm_start = get_tod_clock_fast();
1495         vcpu->arch.sie_block->cputm = cputm;
1496         raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1497         preempt_enable();
1498 }
1499
1500 /* update and get the cpu timer - can also be called from other VCPU threads */
1501 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1502 {
1503         unsigned int seq;
1504         __u64 value;
1505
1506         if (unlikely(!vcpu->arch.cputm_enabled))
1507                 return vcpu->arch.sie_block->cputm;
1508
1509         preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1510         do {
1511                 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1512                 /*
1513                  * If the writer would ever execute a read in the critical
1514                  * section, e.g. in irq context, we have a deadlock.
1515                  */
1516                 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1517                 value = vcpu->arch.sie_block->cputm;
1518                 /* if cputm_start is 0, accounting is being started/stopped */
1519                 if (likely(vcpu->arch.cputm_start))
1520                         value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1521         } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1522         preempt_enable();
1523         return value;
1524 }
1525
1526 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1527 {
1528         /* Save host register state */
1529         save_fpu_regs();
1530         vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1531         vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
1532
1533         if (MACHINE_HAS_VX)
1534                 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1535         else
1536                 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
1537         current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1538         if (test_fp_ctl(current->thread.fpu.fpc))
1539                 /* User space provided an invalid FPC, let's clear it */
1540                 current->thread.fpu.fpc = 0;
1541
1542         save_access_regs(vcpu->arch.host_acrs);
1543         restore_access_regs(vcpu->run->s.regs.acrs);
1544         gmap_enable(vcpu->arch.gmap);
1545         atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1546         if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1547                 __start_cpu_timer_accounting(vcpu);
1548         vcpu->cpu = cpu;
1549 }
1550
1551 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1552 {
1553         vcpu->cpu = -1;
1554         if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1555                 __stop_cpu_timer_accounting(vcpu);
1556         atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1557         gmap_disable(vcpu->arch.gmap);
1558
1559         /* Save guest register state */
1560         save_fpu_regs();
1561         vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1562
1563         /* Restore host register state */
1564         current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1565         current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
1566
1567         save_access_regs(vcpu->run->s.regs.acrs);
1568         restore_access_regs(vcpu->arch.host_acrs);
1569 }
1570
1571 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1572 {
1573         /* this equals initial cpu reset in pop, but we don't switch to ESA */
1574         vcpu->arch.sie_block->gpsw.mask = 0UL;
1575         vcpu->arch.sie_block->gpsw.addr = 0UL;
1576         kvm_s390_set_prefix(vcpu, 0);
1577         kvm_s390_set_cpu_timer(vcpu, 0);
1578         vcpu->arch.sie_block->ckc       = 0UL;
1579         vcpu->arch.sie_block->todpr     = 0;
1580         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1581         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1582         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1583         /* make sure the new fpc will be lazily loaded */
1584         save_fpu_regs();
1585         current->thread.fpu.fpc = 0;
1586         vcpu->arch.sie_block->gbea = 1;
1587         vcpu->arch.sie_block->pp = 0;
1588         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1589         kvm_clear_async_pf_completion_queue(vcpu);
1590         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1591                 kvm_s390_vcpu_stop(vcpu);
1592         kvm_s390_clear_local_irqs(vcpu);
1593 }
1594
1595 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1596 {
1597         mutex_lock(&vcpu->kvm->lock);
1598         preempt_disable();
1599         vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1600         preempt_enable();
1601         mutex_unlock(&vcpu->kvm->lock);
1602         if (!kvm_is_ucontrol(vcpu->kvm)) {
1603                 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1604                 sca_add_vcpu(vcpu);
1605         }
1606
1607 }
1608
1609 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1610 {
1611         if (!test_kvm_facility(vcpu->kvm, 76))
1612                 return;
1613
1614         vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1615
1616         if (vcpu->kvm->arch.crypto.aes_kw)
1617                 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1618         if (vcpu->kvm->arch.crypto.dea_kw)
1619                 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1620
1621         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1622 }
1623
1624 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1625 {
1626         free_page(vcpu->arch.sie_block->cbrlo);
1627         vcpu->arch.sie_block->cbrlo = 0;
1628 }
1629
1630 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1631 {
1632         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1633         if (!vcpu->arch.sie_block->cbrlo)
1634                 return -ENOMEM;
1635
1636         vcpu->arch.sie_block->ecb2 |= 0x80;
1637         vcpu->arch.sie_block->ecb2 &= ~0x08;
1638         return 0;
1639 }
1640
1641 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1642 {
1643         struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1644
1645         vcpu->arch.sie_block->ibc = model->ibc;
1646         if (test_kvm_facility(vcpu->kvm, 7))
1647                 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
1648 }
1649
1650 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1651 {
1652         int rc = 0;
1653
1654         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1655                                                     CPUSTAT_SM |
1656                                                     CPUSTAT_STOPPED);
1657
1658         if (test_kvm_facility(vcpu->kvm, 78))
1659                 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1660         else if (test_kvm_facility(vcpu->kvm, 8))
1661                 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1662
1663         kvm_s390_vcpu_setup_model(vcpu);
1664
1665         vcpu->arch.sie_block->ecb = 0x02;
1666         if (test_kvm_facility(vcpu->kvm, 9))
1667                 vcpu->arch.sie_block->ecb |= 0x04;
1668         if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1669                 vcpu->arch.sie_block->ecb |= 0x10;
1670
1671         if (test_kvm_facility(vcpu->kvm, 8))
1672                 vcpu->arch.sie_block->ecb2 |= 0x08;
1673         vcpu->arch.sie_block->eca   = 0xC1002000U;
1674         if (sclp.has_siif)
1675                 vcpu->arch.sie_block->eca |= 1;
1676         if (sclp.has_sigpif)
1677                 vcpu->arch.sie_block->eca |= 0x10000000U;
1678         if (test_kvm_facility(vcpu->kvm, 64))
1679                 vcpu->arch.sie_block->ecb3 |= 0x01;
1680         if (test_kvm_facility(vcpu->kvm, 129)) {
1681                 vcpu->arch.sie_block->eca |= 0x00020000;
1682                 vcpu->arch.sie_block->ecd |= 0x20000000;
1683         }
1684         vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1685         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1686         if (test_kvm_facility(vcpu->kvm, 74))
1687                 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
1688
1689         if (vcpu->kvm->arch.use_cmma) {
1690                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1691                 if (rc)
1692                         return rc;
1693         }
1694         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1695         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1696
1697         kvm_s390_vcpu_crypto_setup(vcpu);
1698
1699         return rc;
1700 }
1701
1702 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1703                                       unsigned int id)
1704 {
1705         struct kvm_vcpu *vcpu;
1706         struct sie_page *sie_page;
1707         int rc = -EINVAL;
1708
1709         if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1710                 goto out;
1711
1712         rc = -ENOMEM;
1713
1714         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1715         if (!vcpu)
1716                 goto out;
1717
1718         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1719         if (!sie_page)
1720                 goto out_free_cpu;
1721
1722         vcpu->arch.sie_block = &sie_page->sie_block;
1723         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1724
1725         vcpu->arch.sie_block->icpua = id;
1726         spin_lock_init(&vcpu->arch.local_int.lock);
1727         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1728         vcpu->arch.local_int.wq = &vcpu->wq;
1729         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1730         seqcount_init(&vcpu->arch.cputm_seqcount);
1731
1732         rc = kvm_vcpu_init(vcpu, kvm, id);
1733         if (rc)
1734                 goto out_free_sie_block;
1735         VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1736                  vcpu->arch.sie_block);
1737         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1738
1739         return vcpu;
1740 out_free_sie_block:
1741         free_page((unsigned long)(vcpu->arch.sie_block));
1742 out_free_cpu:
1743         kmem_cache_free(kvm_vcpu_cache, vcpu);
1744 out:
1745         return ERR_PTR(rc);
1746 }
1747
1748 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1749 {
1750         return kvm_s390_vcpu_has_irq(vcpu, 0);
1751 }
1752
1753 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1754 {
1755         atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1756         exit_sie(vcpu);
1757 }
1758
1759 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1760 {
1761         atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1762 }
1763
1764 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1765 {
1766         atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1767         exit_sie(vcpu);
1768 }
1769
1770 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1771 {
1772         atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1773 }
1774
1775 /*
1776  * Kick a guest cpu out of SIE and wait until SIE is not running.
1777  * If the CPU is not running (e.g. waiting as idle) the function will
1778  * return immediately. */
1779 void exit_sie(struct kvm_vcpu *vcpu)
1780 {
1781         atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1782         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1783                 cpu_relax();
1784 }
1785
1786 /* Kick a guest cpu out of SIE to process a request synchronously */
1787 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1788 {
1789         kvm_make_request(req, vcpu);
1790         kvm_s390_vcpu_request(vcpu);
1791 }
1792
1793 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1794 {
1795         int i;
1796         struct kvm *kvm = gmap->private;
1797         struct kvm_vcpu *vcpu;
1798
1799         kvm_for_each_vcpu(i, vcpu, kvm) {
1800                 /* match against both prefix pages */
1801                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1802                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1803                         kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1804                 }
1805         }
1806 }
1807
1808 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1809 {
1810         /* kvm common code refers to this, but never calls it */
1811         BUG();
1812         return 0;
1813 }
1814
1815 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1816                                            struct kvm_one_reg *reg)
1817 {
1818         int r = -EINVAL;
1819
1820         switch (reg->id) {
1821         case KVM_REG_S390_TODPR:
1822                 r = put_user(vcpu->arch.sie_block->todpr,
1823                              (u32 __user *)reg->addr);
1824                 break;
1825         case KVM_REG_S390_EPOCHDIFF:
1826                 r = put_user(vcpu->arch.sie_block->epoch,
1827                              (u64 __user *)reg->addr);
1828                 break;
1829         case KVM_REG_S390_CPU_TIMER:
1830                 r = put_user(kvm_s390_get_cpu_timer(vcpu),
1831                              (u64 __user *)reg->addr);
1832                 break;
1833         case KVM_REG_S390_CLOCK_COMP:
1834                 r = put_user(vcpu->arch.sie_block->ckc,
1835                              (u64 __user *)reg->addr);
1836                 break;
1837         case KVM_REG_S390_PFTOKEN:
1838                 r = put_user(vcpu->arch.pfault_token,
1839                              (u64 __user *)reg->addr);
1840                 break;
1841         case KVM_REG_S390_PFCOMPARE:
1842                 r = put_user(vcpu->arch.pfault_compare,
1843                              (u64 __user *)reg->addr);
1844                 break;
1845         case KVM_REG_S390_PFSELECT:
1846                 r = put_user(vcpu->arch.pfault_select,
1847                              (u64 __user *)reg->addr);
1848                 break;
1849         case KVM_REG_S390_PP:
1850                 r = put_user(vcpu->arch.sie_block->pp,
1851                              (u64 __user *)reg->addr);
1852                 break;
1853         case KVM_REG_S390_GBEA:
1854                 r = put_user(vcpu->arch.sie_block->gbea,
1855                              (u64 __user *)reg->addr);
1856                 break;
1857         default:
1858                 break;
1859         }
1860
1861         return r;
1862 }
1863
1864 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1865                                            struct kvm_one_reg *reg)
1866 {
1867         int r = -EINVAL;
1868         __u64 val;
1869
1870         switch (reg->id) {
1871         case KVM_REG_S390_TODPR:
1872                 r = get_user(vcpu->arch.sie_block->todpr,
1873                              (u32 __user *)reg->addr);
1874                 break;
1875         case KVM_REG_S390_EPOCHDIFF:
1876                 r = get_user(vcpu->arch.sie_block->epoch,
1877                              (u64 __user *)reg->addr);
1878                 break;
1879         case KVM_REG_S390_CPU_TIMER:
1880                 r = get_user(val, (u64 __user *)reg->addr);
1881                 if (!r)
1882                         kvm_s390_set_cpu_timer(vcpu, val);
1883                 break;
1884         case KVM_REG_S390_CLOCK_COMP:
1885                 r = get_user(vcpu->arch.sie_block->ckc,
1886                              (u64 __user *)reg->addr);
1887                 break;
1888         case KVM_REG_S390_PFTOKEN:
1889                 r = get_user(vcpu->arch.pfault_token,
1890                              (u64 __user *)reg->addr);
1891                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1892                         kvm_clear_async_pf_completion_queue(vcpu);
1893                 break;
1894         case KVM_REG_S390_PFCOMPARE:
1895                 r = get_user(vcpu->arch.pfault_compare,
1896                              (u64 __user *)reg->addr);
1897                 break;
1898         case KVM_REG_S390_PFSELECT:
1899                 r = get_user(vcpu->arch.pfault_select,
1900                              (u64 __user *)reg->addr);
1901                 break;
1902         case KVM_REG_S390_PP:
1903                 r = get_user(vcpu->arch.sie_block->pp,
1904                              (u64 __user *)reg->addr);
1905                 break;
1906         case KVM_REG_S390_GBEA:
1907                 r = get_user(vcpu->arch.sie_block->gbea,
1908                              (u64 __user *)reg->addr);
1909                 break;
1910         default:
1911                 break;
1912         }
1913
1914         return r;
1915 }
1916
1917 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1918 {
1919         kvm_s390_vcpu_initial_reset(vcpu);
1920         return 0;
1921 }
1922
1923 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1924 {
1925         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1926         return 0;
1927 }
1928
1929 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1930 {
1931         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1932         return 0;
1933 }
1934
1935 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1936                                   struct kvm_sregs *sregs)
1937 {
1938         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1939         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1940         restore_access_regs(vcpu->run->s.regs.acrs);
1941         return 0;
1942 }
1943
1944 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1945                                   struct kvm_sregs *sregs)
1946 {
1947         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1948         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1949         return 0;
1950 }
1951
1952 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1953 {
1954         /* make sure the new values will be lazily loaded */
1955         save_fpu_regs();
1956         if (test_fp_ctl(fpu->fpc))
1957                 return -EINVAL;
1958         current->thread.fpu.fpc = fpu->fpc;
1959         if (MACHINE_HAS_VX)
1960                 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
1961         else
1962                 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
1963         return 0;
1964 }
1965
1966 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1967 {
1968         /* make sure we have the latest values */
1969         save_fpu_regs();
1970         if (MACHINE_HAS_VX)
1971                 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
1972         else
1973                 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
1974         fpu->fpc = current->thread.fpu.fpc;
1975         return 0;
1976 }
1977
1978 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1979 {
1980         int rc = 0;
1981
1982         if (!is_vcpu_stopped(vcpu))
1983                 rc = -EBUSY;
1984         else {
1985                 vcpu->run->psw_mask = psw.mask;
1986                 vcpu->run->psw_addr = psw.addr;
1987         }
1988         return rc;
1989 }
1990
1991 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1992                                   struct kvm_translation *tr)
1993 {
1994         return -EINVAL; /* not implemented yet */
1995 }
1996
1997 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1998                               KVM_GUESTDBG_USE_HW_BP | \
1999                               KVM_GUESTDBG_ENABLE)
2000
2001 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2002                                         struct kvm_guest_debug *dbg)
2003 {
2004         int rc = 0;
2005
2006         vcpu->guest_debug = 0;
2007         kvm_s390_clear_bp_data(vcpu);
2008
2009         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2010                 return -EINVAL;
2011
2012         if (dbg->control & KVM_GUESTDBG_ENABLE) {
2013                 vcpu->guest_debug = dbg->control;
2014                 /* enforce guest PER */
2015                 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2016
2017                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2018                         rc = kvm_s390_import_bp_data(vcpu, dbg);
2019         } else {
2020                 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2021                 vcpu->arch.guestdbg.last_bp = 0;
2022         }
2023
2024         if (rc) {
2025                 vcpu->guest_debug = 0;
2026                 kvm_s390_clear_bp_data(vcpu);
2027                 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2028         }
2029
2030         return rc;
2031 }
2032
2033 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2034                                     struct kvm_mp_state *mp_state)
2035 {
2036         /* CHECK_STOP and LOAD are not supported yet */
2037         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2038                                        KVM_MP_STATE_OPERATING;
2039 }
2040
2041 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2042                                     struct kvm_mp_state *mp_state)
2043 {
2044         int rc = 0;
2045
2046         /* user space knows about this interface - let it control the state */
2047         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2048
2049         switch (mp_state->mp_state) {
2050         case KVM_MP_STATE_STOPPED:
2051                 kvm_s390_vcpu_stop(vcpu);
2052                 break;
2053         case KVM_MP_STATE_OPERATING:
2054                 kvm_s390_vcpu_start(vcpu);
2055                 break;
2056         case KVM_MP_STATE_LOAD:
2057         case KVM_MP_STATE_CHECK_STOP:
2058                 /* fall through - CHECK_STOP and LOAD are not supported yet */
2059         default:
2060                 rc = -ENXIO;
2061         }
2062
2063         return rc;
2064 }
2065
2066 static bool ibs_enabled(struct kvm_vcpu *vcpu)
2067 {
2068         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2069 }
2070
2071 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2072 {
2073 retry:
2074         kvm_s390_vcpu_request_handled(vcpu);
2075         if (!vcpu->requests)
2076                 return 0;
2077         /*
2078          * We use MMU_RELOAD just to re-arm the ipte notifier for the
2079          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2080          * This ensures that the ipte instruction for this request has
2081          * already finished. We might race against a second unmapper that
2082          * wants to set the blocking bit. Lets just retry the request loop.
2083          */
2084         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2085                 int rc;
2086                 rc = gmap_ipte_notify(vcpu->arch.gmap,
2087                                       kvm_s390_get_prefix(vcpu),
2088                                       PAGE_SIZE * 2);
2089                 if (rc)
2090                         return rc;
2091                 goto retry;
2092         }
2093
2094         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2095                 vcpu->arch.sie_block->ihcpu = 0xffff;
2096                 goto retry;
2097         }
2098
2099         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2100                 if (!ibs_enabled(vcpu)) {
2101                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2102                         atomic_or(CPUSTAT_IBS,
2103                                         &vcpu->arch.sie_block->cpuflags);
2104                 }
2105                 goto retry;
2106         }
2107
2108         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2109                 if (ibs_enabled(vcpu)) {
2110                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2111                         atomic_andnot(CPUSTAT_IBS,
2112                                           &vcpu->arch.sie_block->cpuflags);
2113                 }
2114                 goto retry;
2115         }
2116
2117         /* nothing to do, just clear the request */
2118         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2119
2120         return 0;
2121 }
2122
2123 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2124 {
2125         struct kvm_vcpu *vcpu;
2126         int i;
2127
2128         mutex_lock(&kvm->lock);
2129         preempt_disable();
2130         kvm->arch.epoch = tod - get_tod_clock();
2131         kvm_s390_vcpu_block_all(kvm);
2132         kvm_for_each_vcpu(i, vcpu, kvm)
2133                 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2134         kvm_s390_vcpu_unblock_all(kvm);
2135         preempt_enable();
2136         mutex_unlock(&kvm->lock);
2137 }
2138
2139 /**
2140  * kvm_arch_fault_in_page - fault-in guest page if necessary
2141  * @vcpu: The corresponding virtual cpu
2142  * @gpa: Guest physical address
2143  * @writable: Whether the page should be writable or not
2144  *
2145  * Make sure that a guest page has been faulted-in on the host.
2146  *
2147  * Return: Zero on success, negative error code otherwise.
2148  */
2149 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2150 {
2151         return gmap_fault(vcpu->arch.gmap, gpa,
2152                           writable ? FAULT_FLAG_WRITE : 0);
2153 }
2154
2155 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2156                                       unsigned long token)
2157 {
2158         struct kvm_s390_interrupt inti;
2159         struct kvm_s390_irq irq;
2160
2161         if (start_token) {
2162                 irq.u.ext.ext_params2 = token;
2163                 irq.type = KVM_S390_INT_PFAULT_INIT;
2164                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2165         } else {
2166                 inti.type = KVM_S390_INT_PFAULT_DONE;
2167                 inti.parm64 = token;
2168                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2169         }
2170 }
2171
2172 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2173                                      struct kvm_async_pf *work)
2174 {
2175         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2176         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2177 }
2178
2179 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2180                                  struct kvm_async_pf *work)
2181 {
2182         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2183         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2184 }
2185
2186 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2187                                struct kvm_async_pf *work)
2188 {
2189         /* s390 will always inject the page directly */
2190 }
2191
2192 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2193 {
2194         /*
2195          * s390 will always inject the page directly,
2196          * but we still want check_async_completion to cleanup
2197          */
2198         return true;
2199 }
2200
2201 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2202 {
2203         hva_t hva;
2204         struct kvm_arch_async_pf arch;
2205         int rc;
2206
2207         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2208                 return 0;
2209         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2210             vcpu->arch.pfault_compare)
2211                 return 0;
2212         if (psw_extint_disabled(vcpu))
2213                 return 0;
2214         if (kvm_s390_vcpu_has_irq(vcpu, 0))
2215                 return 0;
2216         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2217                 return 0;
2218         if (!vcpu->arch.gmap->pfault_enabled)
2219                 return 0;
2220
2221         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2222         hva += current->thread.gmap_addr & ~PAGE_MASK;
2223         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2224                 return 0;
2225
2226         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2227         return rc;
2228 }
2229
2230 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2231 {
2232         int rc, cpuflags;
2233
2234         /*
2235          * On s390 notifications for arriving pages will be delivered directly
2236          * to the guest but the house keeping for completed pfaults is
2237          * handled outside the worker.
2238          */
2239         kvm_check_async_pf_completion(vcpu);
2240
2241         vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2242         vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2243
2244         if (need_resched())
2245                 schedule();
2246
2247         if (test_cpu_flag(CIF_MCCK_PENDING))
2248                 s390_handle_mcck();
2249
2250         if (!kvm_is_ucontrol(vcpu->kvm)) {
2251                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2252                 if (rc)
2253                         return rc;
2254         }
2255
2256         rc = kvm_s390_handle_requests(vcpu);
2257         if (rc)
2258                 return rc;
2259
2260         if (guestdbg_enabled(vcpu)) {
2261                 kvm_s390_backup_guest_per_regs(vcpu);
2262                 kvm_s390_patch_guest_per_regs(vcpu);
2263         }
2264
2265         vcpu->arch.sie_block->icptcode = 0;
2266         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2267         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2268         trace_kvm_s390_sie_enter(vcpu, cpuflags);
2269
2270         return 0;
2271 }
2272
2273 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2274 {
2275         struct kvm_s390_pgm_info pgm_info = {
2276                 .code = PGM_ADDRESSING,
2277         };
2278         u8 opcode, ilen;
2279         int rc;
2280
2281         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2282         trace_kvm_s390_sie_fault(vcpu);
2283
2284         /*
2285          * We want to inject an addressing exception, which is defined as a
2286          * suppressing or terminating exception. However, since we came here
2287          * by a DAT access exception, the PSW still points to the faulting
2288          * instruction since DAT exceptions are nullifying. So we've got
2289          * to look up the current opcode to get the length of the instruction
2290          * to be able to forward the PSW.
2291          */
2292         rc = read_guest_instr(vcpu, &opcode, 1);
2293         ilen = insn_length(opcode);
2294         if (rc < 0) {
2295                 return rc;
2296         } else if (rc) {
2297                 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2298                  * Forward by arbitrary ilc, injection will take care of
2299                  * nullification if necessary.
2300                  */
2301                 pgm_info = vcpu->arch.pgm;
2302                 ilen = 4;
2303         }
2304         pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2305         kvm_s390_forward_psw(vcpu, ilen);
2306         return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2307 }
2308
2309 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2310 {
2311         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2312                    vcpu->arch.sie_block->icptcode);
2313         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2314
2315         if (guestdbg_enabled(vcpu))
2316                 kvm_s390_restore_guest_per_regs(vcpu);
2317
2318         vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2319         vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2320
2321         if (vcpu->arch.sie_block->icptcode > 0) {
2322                 int rc = kvm_handle_sie_intercept(vcpu);
2323
2324                 if (rc != -EOPNOTSUPP)
2325                         return rc;
2326                 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2327                 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2328                 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2329                 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2330                 return -EREMOTE;
2331         } else if (exit_reason != -EFAULT) {
2332                 vcpu->stat.exit_null++;
2333                 return 0;
2334         } else if (kvm_is_ucontrol(vcpu->kvm)) {
2335                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2336                 vcpu->run->s390_ucontrol.trans_exc_code =
2337                                                 current->thread.gmap_addr;
2338                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2339                 return -EREMOTE;
2340         } else if (current->thread.gmap_pfault) {
2341                 trace_kvm_s390_major_guest_pfault(vcpu);
2342                 current->thread.gmap_pfault = 0;
2343                 if (kvm_arch_setup_async_pf(vcpu))
2344                         return 0;
2345                 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2346         }
2347         return vcpu_post_run_fault_in_sie(vcpu);
2348 }
2349
2350 static int __vcpu_run(struct kvm_vcpu *vcpu)
2351 {
2352         int rc, exit_reason;
2353
2354         /*
2355          * We try to hold kvm->srcu during most of vcpu_run (except when run-
2356          * ning the guest), so that memslots (and other stuff) are protected
2357          */
2358         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2359
2360         do {
2361                 rc = vcpu_pre_run(vcpu);
2362                 if (rc)
2363                         break;
2364
2365                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2366                 /*
2367                  * As PF_VCPU will be used in fault handler, between
2368                  * guest_enter and guest_exit should be no uaccess.
2369                  */
2370                 local_irq_disable();
2371                 __kvm_guest_enter();
2372                 __disable_cpu_timer_accounting(vcpu);
2373                 local_irq_enable();
2374                 exit_reason = sie64a(vcpu->arch.sie_block,
2375                                      vcpu->run->s.regs.gprs);
2376                 local_irq_disable();
2377                 __enable_cpu_timer_accounting(vcpu);
2378                 __kvm_guest_exit();
2379                 local_irq_enable();
2380                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2381
2382                 rc = vcpu_post_run(vcpu, exit_reason);
2383         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2384
2385         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2386         return rc;
2387 }
2388
2389 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2390 {
2391         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2392         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2393         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2394                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2395         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2396                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2397                 /* some control register changes require a tlb flush */
2398                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2399         }
2400         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2401                 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2402                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2403                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2404                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2405                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2406         }
2407         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2408                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2409                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2410                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2411                 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2412                         kvm_clear_async_pf_completion_queue(vcpu);
2413         }
2414         kvm_run->kvm_dirty_regs = 0;
2415 }
2416
2417 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2418 {
2419         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2420         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2421         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2422         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2423         kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2424         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2425         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2426         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2427         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2428         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2429         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2430         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2431 }
2432
2433 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2434 {
2435         int rc;
2436         sigset_t sigsaved;
2437
2438         if (guestdbg_exit_pending(vcpu)) {
2439                 kvm_s390_prepare_debug_exit(vcpu);
2440                 return 0;
2441         }
2442
2443         if (vcpu->sigset_active)
2444                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2445
2446         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2447                 kvm_s390_vcpu_start(vcpu);
2448         } else if (is_vcpu_stopped(vcpu)) {
2449                 pr_err_ratelimited("can't run stopped vcpu %d\n",
2450                                    vcpu->vcpu_id);
2451                 return -EINVAL;
2452         }
2453
2454         sync_regs(vcpu, kvm_run);
2455         enable_cpu_timer_accounting(vcpu);
2456
2457         might_fault();
2458         rc = __vcpu_run(vcpu);
2459
2460         if (signal_pending(current) && !rc) {
2461                 kvm_run->exit_reason = KVM_EXIT_INTR;
2462                 rc = -EINTR;
2463         }
2464
2465         if (guestdbg_exit_pending(vcpu) && !rc)  {
2466                 kvm_s390_prepare_debug_exit(vcpu);
2467                 rc = 0;
2468         }
2469
2470         if (rc == -EREMOTE) {
2471                 /* userspace support is needed, kvm_run has been prepared */
2472                 rc = 0;
2473         }
2474
2475         disable_cpu_timer_accounting(vcpu);
2476         store_regs(vcpu, kvm_run);
2477
2478         if (vcpu->sigset_active)
2479                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2480
2481         vcpu->stat.exit_userspace++;
2482         return rc;
2483 }
2484
2485 /*
2486  * store status at address
2487  * we use have two special cases:
2488  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2489  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2490  */
2491 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2492 {
2493         unsigned char archmode = 1;
2494         freg_t fprs[NUM_FPRS];
2495         unsigned int px;
2496         u64 clkcomp, cputm;
2497         int rc;
2498
2499         px = kvm_s390_get_prefix(vcpu);
2500         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2501                 if (write_guest_abs(vcpu, 163, &archmode, 1))
2502                         return -EFAULT;
2503                 gpa = 0;
2504         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2505                 if (write_guest_real(vcpu, 163, &archmode, 1))
2506                         return -EFAULT;
2507                 gpa = px;
2508         } else
2509                 gpa -= __LC_FPREGS_SAVE_AREA;
2510
2511         /* manually convert vector registers if necessary */
2512         if (MACHINE_HAS_VX) {
2513                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2514                 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2515                                      fprs, 128);
2516         } else {
2517                 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2518                                      vcpu->run->s.regs.fprs, 128);
2519         }
2520         rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2521                               vcpu->run->s.regs.gprs, 128);
2522         rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2523                               &vcpu->arch.sie_block->gpsw, 16);
2524         rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2525                               &px, 4);
2526         rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2527                               &vcpu->run->s.regs.fpc, 4);
2528         rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2529                               &vcpu->arch.sie_block->todpr, 4);
2530         cputm = kvm_s390_get_cpu_timer(vcpu);
2531         rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2532                               &cputm, 8);
2533         clkcomp = vcpu->arch.sie_block->ckc >> 8;
2534         rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2535                               &clkcomp, 8);
2536         rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2537                               &vcpu->run->s.regs.acrs, 64);
2538         rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2539                               &vcpu->arch.sie_block->gcr, 128);
2540         return rc ? -EFAULT : 0;
2541 }
2542
2543 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2544 {
2545         /*
2546          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2547          * copying in vcpu load/put. Lets update our copies before we save
2548          * it into the save area
2549          */
2550         save_fpu_regs();
2551         vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2552         save_access_regs(vcpu->run->s.regs.acrs);
2553
2554         return kvm_s390_store_status_unloaded(vcpu, addr);
2555 }
2556
2557 /*
2558  * store additional status at address
2559  */
2560 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2561                                         unsigned long gpa)
2562 {
2563         /* Only bits 0-53 are used for address formation */
2564         if (!(gpa & ~0x3ff))
2565                 return 0;
2566
2567         return write_guest_abs(vcpu, gpa & ~0x3ff,
2568                                (void *)&vcpu->run->s.regs.vrs, 512);
2569 }
2570
2571 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2572 {
2573         if (!test_kvm_facility(vcpu->kvm, 129))
2574                 return 0;
2575
2576         /*
2577          * The guest VXRS are in the host VXRs due to the lazy
2578          * copying in vcpu load/put. We can simply call save_fpu_regs()
2579          * to save the current register state because we are in the
2580          * middle of a load/put cycle.
2581          *
2582          * Let's update our copies before we save it into the save area.
2583          */
2584         save_fpu_regs();
2585
2586         return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2587 }
2588
2589 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2590 {
2591         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2592         kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2593 }
2594
2595 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2596 {
2597         unsigned int i;
2598         struct kvm_vcpu *vcpu;
2599
2600         kvm_for_each_vcpu(i, vcpu, kvm) {
2601                 __disable_ibs_on_vcpu(vcpu);
2602         }
2603 }
2604
2605 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2606 {
2607         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2608         kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2609 }
2610
2611 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2612 {
2613         int i, online_vcpus, started_vcpus = 0;
2614
2615         if (!is_vcpu_stopped(vcpu))
2616                 return;
2617
2618         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2619         /* Only one cpu at a time may enter/leave the STOPPED state. */
2620         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2621         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2622
2623         for (i = 0; i < online_vcpus; i++) {
2624                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2625                         started_vcpus++;
2626         }
2627
2628         if (started_vcpus == 0) {
2629                 /* we're the only active VCPU -> speed it up */
2630                 __enable_ibs_on_vcpu(vcpu);
2631         } else if (started_vcpus == 1) {
2632                 /*
2633                  * As we are starting a second VCPU, we have to disable
2634                  * the IBS facility on all VCPUs to remove potentially
2635                  * oustanding ENABLE requests.
2636                  */
2637                 __disable_ibs_on_all_vcpus(vcpu->kvm);
2638         }
2639
2640         atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2641         /*
2642          * Another VCPU might have used IBS while we were offline.
2643          * Let's play safe and flush the VCPU at startup.
2644          */
2645         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2646         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2647         return;
2648 }
2649
2650 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2651 {
2652         int i, online_vcpus, started_vcpus = 0;
2653         struct kvm_vcpu *started_vcpu = NULL;
2654
2655         if (is_vcpu_stopped(vcpu))
2656                 return;
2657
2658         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2659         /* Only one cpu at a time may enter/leave the STOPPED state. */
2660         spin_lock(&vcpu->kvm->arch.start_stop_lock);
2661         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2662
2663         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2664         kvm_s390_clear_stop_irq(vcpu);
2665
2666         atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2667         __disable_ibs_on_vcpu(vcpu);
2668
2669         for (i = 0; i < online_vcpus; i++) {
2670                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2671                         started_vcpus++;
2672                         started_vcpu = vcpu->kvm->vcpus[i];
2673                 }
2674         }
2675
2676         if (started_vcpus == 1) {
2677                 /*
2678                  * As we only have one VCPU left, we want to enable the
2679                  * IBS facility for that VCPU to speed it up.
2680                  */
2681                 __enable_ibs_on_vcpu(started_vcpu);
2682         }
2683
2684         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2685         return;
2686 }
2687
2688 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2689                                      struct kvm_enable_cap *cap)
2690 {
2691         int r;
2692
2693         if (cap->flags)
2694                 return -EINVAL;
2695
2696         switch (cap->cap) {
2697         case KVM_CAP_S390_CSS_SUPPORT:
2698                 if (!vcpu->kvm->arch.css_support) {
2699                         vcpu->kvm->arch.css_support = 1;
2700                         VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2701                         trace_kvm_s390_enable_css(vcpu->kvm);
2702                 }
2703                 r = 0;
2704                 break;
2705         default:
2706                 r = -EINVAL;
2707                 break;
2708         }
2709         return r;
2710 }
2711
2712 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2713                                   struct kvm_s390_mem_op *mop)
2714 {
2715         void __user *uaddr = (void __user *)mop->buf;
2716         void *tmpbuf = NULL;
2717         int r, srcu_idx;
2718         const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2719                                     | KVM_S390_MEMOP_F_CHECK_ONLY;
2720
2721         if (mop->flags & ~supported_flags)
2722                 return -EINVAL;
2723
2724         if (mop->size > MEM_OP_MAX_SIZE)
2725                 return -E2BIG;
2726
2727         if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2728                 tmpbuf = vmalloc(mop->size);
2729                 if (!tmpbuf)
2730                         return -ENOMEM;
2731         }
2732
2733         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2734
2735         switch (mop->op) {
2736         case KVM_S390_MEMOP_LOGICAL_READ:
2737                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2738                         r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2739                                             mop->size, GACC_FETCH);
2740                         break;
2741                 }
2742                 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2743                 if (r == 0) {
2744                         if (copy_to_user(uaddr, tmpbuf, mop->size))
2745                                 r = -EFAULT;
2746                 }
2747                 break;
2748         case KVM_S390_MEMOP_LOGICAL_WRITE:
2749                 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2750                         r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2751                                             mop->size, GACC_STORE);
2752                         break;
2753                 }
2754                 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2755                         r = -EFAULT;
2756                         break;
2757                 }
2758                 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2759                 break;
2760         default:
2761                 r = -EINVAL;
2762         }
2763
2764         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2765
2766         if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2767                 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2768
2769         vfree(tmpbuf);
2770         return r;
2771 }
2772
2773 long kvm_arch_vcpu_ioctl(struct file *filp,
2774                          unsigned int ioctl, unsigned long arg)
2775 {
2776         struct kvm_vcpu *vcpu = filp->private_data;
2777         void __user *argp = (void __user *)arg;
2778         int idx;
2779         long r;
2780
2781         switch (ioctl) {
2782         case KVM_S390_IRQ: {
2783                 struct kvm_s390_irq s390irq;
2784
2785                 r = -EFAULT;
2786                 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2787                         break;
2788                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2789                 break;
2790         }
2791         case KVM_S390_INTERRUPT: {
2792                 struct kvm_s390_interrupt s390int;
2793                 struct kvm_s390_irq s390irq;
2794
2795                 r = -EFAULT;
2796                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2797                         break;
2798                 if (s390int_to_s390irq(&s390int, &s390irq))
2799                         return -EINVAL;
2800                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2801                 break;
2802         }
2803         case KVM_S390_STORE_STATUS:
2804                 idx = srcu_read_lock(&vcpu->kvm->srcu);
2805                 r = kvm_s390_vcpu_store_status(vcpu, arg);
2806                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2807                 break;
2808         case KVM_S390_SET_INITIAL_PSW: {
2809                 psw_t psw;
2810
2811                 r = -EFAULT;
2812                 if (copy_from_user(&psw, argp, sizeof(psw)))
2813                         break;
2814                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2815                 break;
2816         }
2817         case KVM_S390_INITIAL_RESET:
2818                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2819                 break;
2820         case KVM_SET_ONE_REG:
2821         case KVM_GET_ONE_REG: {
2822                 struct kvm_one_reg reg;
2823                 r = -EFAULT;
2824                 if (copy_from_user(&reg, argp, sizeof(reg)))
2825                         break;
2826                 if (ioctl == KVM_SET_ONE_REG)
2827                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2828                 else
2829                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2830                 break;
2831         }
2832 #ifdef CONFIG_KVM_S390_UCONTROL
2833         case KVM_S390_UCAS_MAP: {
2834                 struct kvm_s390_ucas_mapping ucasmap;
2835
2836                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2837                         r = -EFAULT;
2838                         break;
2839                 }
2840
2841                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2842                         r = -EINVAL;
2843                         break;
2844                 }
2845
2846                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2847                                      ucasmap.vcpu_addr, ucasmap.length);
2848                 break;
2849         }
2850         case KVM_S390_UCAS_UNMAP: {
2851                 struct kvm_s390_ucas_mapping ucasmap;
2852
2853                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2854                         r = -EFAULT;
2855                         break;
2856                 }
2857
2858                 if (!kvm_is_ucontrol(vcpu->kvm)) {
2859                         r = -EINVAL;
2860                         break;
2861                 }
2862
2863                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2864                         ucasmap.length);
2865                 break;
2866         }
2867 #endif
2868         case KVM_S390_VCPU_FAULT: {
2869                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2870                 break;
2871         }
2872         case KVM_ENABLE_CAP:
2873         {
2874                 struct kvm_enable_cap cap;
2875                 r = -EFAULT;
2876                 if (copy_from_user(&cap, argp, sizeof(cap)))
2877                         break;
2878                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2879                 break;
2880         }
2881         case KVM_S390_MEM_OP: {
2882                 struct kvm_s390_mem_op mem_op;
2883
2884                 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2885                         r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2886                 else
2887                         r = -EFAULT;
2888                 break;
2889         }
2890         case KVM_S390_SET_IRQ_STATE: {
2891                 struct kvm_s390_irq_state irq_state;
2892
2893                 r = -EFAULT;
2894                 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2895                         break;
2896                 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2897                     irq_state.len == 0 ||
2898                     irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2899                         r = -EINVAL;
2900                         break;
2901                 }
2902                 r = kvm_s390_set_irq_state(vcpu,
2903                                            (void __user *) irq_state.buf,
2904                                            irq_state.len);
2905                 break;
2906         }
2907         case KVM_S390_GET_IRQ_STATE: {
2908                 struct kvm_s390_irq_state irq_state;
2909
2910                 r = -EFAULT;
2911                 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2912                         break;
2913                 if (irq_state.len == 0) {
2914                         r = -EINVAL;
2915                         break;
2916                 }
2917                 r = kvm_s390_get_irq_state(vcpu,
2918                                            (__u8 __user *)  irq_state.buf,
2919                                            irq_state.len);
2920                 break;
2921         }
2922         default:
2923                 r = -ENOTTY;
2924         }
2925         return r;
2926 }
2927
2928 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2929 {
2930 #ifdef CONFIG_KVM_S390_UCONTROL
2931         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2932                  && (kvm_is_ucontrol(vcpu->kvm))) {
2933                 vmf->page = virt_to_page(vcpu->arch.sie_block);
2934                 get_page(vmf->page);
2935                 return 0;
2936         }
2937 #endif
2938         return VM_FAULT_SIGBUS;
2939 }
2940
2941 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2942                             unsigned long npages)
2943 {
2944         return 0;
2945 }
2946
2947 /* Section: memory related */
2948 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2949                                    struct kvm_memory_slot *memslot,
2950                                    const struct kvm_userspace_memory_region *mem,
2951                                    enum kvm_mr_change change)
2952 {
2953         /* A few sanity checks. We can have memory slots which have to be
2954            located/ended at a segment boundary (1MB). The memory in userland is
2955            ok to be fragmented into various different vmas. It is okay to mmap()
2956            and munmap() stuff in this slot after doing this call at any time */
2957
2958         if (mem->userspace_addr & 0xffffful)
2959                 return -EINVAL;
2960
2961         if (mem->memory_size & 0xffffful)
2962                 return -EINVAL;
2963
2964         if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2965                 return -EINVAL;
2966
2967         return 0;
2968 }
2969
2970 void kvm_arch_commit_memory_region(struct kvm *kvm,
2971                                 const struct kvm_userspace_memory_region *mem,
2972                                 const struct kvm_memory_slot *old,
2973                                 const struct kvm_memory_slot *new,
2974                                 enum kvm_mr_change change)
2975 {
2976         int rc;
2977
2978         /* If the basics of the memslot do not change, we do not want
2979          * to update the gmap. Every update causes several unnecessary
2980          * segment translation exceptions. This is usually handled just
2981          * fine by the normal fault handler + gmap, but it will also
2982          * cause faults on the prefix page of running guest CPUs.
2983          */
2984         if (old->userspace_addr == mem->userspace_addr &&
2985             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2986             old->npages * PAGE_SIZE == mem->memory_size)
2987                 return;
2988
2989         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2990                 mem->guest_phys_addr, mem->memory_size);
2991         if (rc)
2992                 pr_warn("failed to commit memory region\n");
2993         return;
2994 }
2995
2996 static inline unsigned long nonhyp_mask(int i)
2997 {
2998         unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
2999
3000         return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3001 }
3002
3003 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3004 {
3005         vcpu->valid_wakeup = false;
3006 }
3007
3008 static int __init kvm_s390_init(void)
3009 {
3010         int i;
3011
3012         if (!sclp.has_sief2) {
3013                 pr_info("SIE not available\n");
3014                 return -ENODEV;
3015         }
3016
3017         for (i = 0; i < 16; i++)
3018                 kvm_s390_fac_list_mask[i] |=
3019                         S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3020
3021         return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3022 }
3023
3024 static void __exit kvm_s390_exit(void)
3025 {
3026         kvm_exit();
3027 }
3028
3029 module_init(kvm_s390_init);
3030 module_exit(kvm_s390_exit);
3031
3032 /*
3033  * Enable autoloading of the kvm module.
3034  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3035  * since x86 takes a different approach.
3036  */
3037 #include <linux/miscdevice.h>
3038 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3039 MODULE_ALIAS("devname:kvm");