e55040467eb506cfd7fc82c9d485bd015a247fa6
[cascardo/linux.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008, 2015
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/dis.h>
23 #include <asm/uaccess.h>
24 #include <asm/sclp.h>
25 #include <asm/isc.h>
26 #include <asm/gmap.h>
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29 #include "trace-s390.h"
30
31 #define IOINT_SCHID_MASK 0x0000ffff
32 #define IOINT_SSID_MASK 0x00030000
33 #define IOINT_CSSID_MASK 0x03fc0000
34 #define PFAULT_INIT 0x0600
35 #define PFAULT_DONE 0x0680
36 #define VIRTIO_PARAM 0x0d00
37
38 /* handle external calls via sigp interpretation facility */
39 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
40 {
41         int c, scn;
42
43         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
44                 return 0;
45
46         read_lock(&vcpu->kvm->arch.sca_lock);
47         if (vcpu->kvm->arch.use_esca) {
48                 struct esca_block *sca = vcpu->kvm->arch.sca;
49                 union esca_sigp_ctrl sigp_ctrl =
50                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
51
52                 c = sigp_ctrl.c;
53                 scn = sigp_ctrl.scn;
54         } else {
55                 struct bsca_block *sca = vcpu->kvm->arch.sca;
56                 union bsca_sigp_ctrl sigp_ctrl =
57                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
58
59                 c = sigp_ctrl.c;
60                 scn = sigp_ctrl.scn;
61         }
62         read_unlock(&vcpu->kvm->arch.sca_lock);
63
64         if (src_id)
65                 *src_id = scn;
66
67         return c;
68 }
69
70 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
71 {
72         int expect, rc;
73
74         read_lock(&vcpu->kvm->arch.sca_lock);
75         if (vcpu->kvm->arch.use_esca) {
76                 struct esca_block *sca = vcpu->kvm->arch.sca;
77                 union esca_sigp_ctrl *sigp_ctrl =
78                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
79                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
80
81                 new_val.scn = src_id;
82                 new_val.c = 1;
83                 old_val.c = 0;
84
85                 expect = old_val.value;
86                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
87         } else {
88                 struct bsca_block *sca = vcpu->kvm->arch.sca;
89                 union bsca_sigp_ctrl *sigp_ctrl =
90                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
91                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
92
93                 new_val.scn = src_id;
94                 new_val.c = 1;
95                 old_val.c = 0;
96
97                 expect = old_val.value;
98                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
99         }
100         read_unlock(&vcpu->kvm->arch.sca_lock);
101
102         if (rc != expect) {
103                 /* another external call is pending */
104                 return -EBUSY;
105         }
106         atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
107         return 0;
108 }
109
110 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
111 {
112         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
113         int rc, expect;
114
115         atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
116         read_lock(&vcpu->kvm->arch.sca_lock);
117         if (vcpu->kvm->arch.use_esca) {
118                 struct esca_block *sca = vcpu->kvm->arch.sca;
119                 union esca_sigp_ctrl *sigp_ctrl =
120                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
121                 union esca_sigp_ctrl old = *sigp_ctrl;
122
123                 expect = old.value;
124                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
125         } else {
126                 struct bsca_block *sca = vcpu->kvm->arch.sca;
127                 union bsca_sigp_ctrl *sigp_ctrl =
128                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
129                 union bsca_sigp_ctrl old = *sigp_ctrl;
130
131                 expect = old.value;
132                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
133         }
134         read_unlock(&vcpu->kvm->arch.sca_lock);
135         WARN_ON(rc != expect); /* cannot clear? */
136 }
137
138 int psw_extint_disabled(struct kvm_vcpu *vcpu)
139 {
140         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
141 }
142
143 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
144 {
145         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
146 }
147
148 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
149 {
150         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
151 }
152
153 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
154 {
155         return psw_extint_disabled(vcpu) &&
156                psw_ioint_disabled(vcpu) &&
157                psw_mchk_disabled(vcpu);
158 }
159
160 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
161 {
162         if (psw_extint_disabled(vcpu) ||
163             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
164                 return 0;
165         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
166                 /* No timer interrupts when single stepping */
167                 return 0;
168         return 1;
169 }
170
171 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
172 {
173         if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
174                 return 0;
175         return ckc_interrupts_enabled(vcpu);
176 }
177
178 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
179 {
180         return !psw_extint_disabled(vcpu) &&
181                (vcpu->arch.sie_block->gcr[0] & 0x400ul);
182 }
183
184 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
185 {
186         if (!cpu_timer_interrupts_enabled(vcpu))
187                 return 0;
188         return kvm_s390_get_cpu_timer(vcpu) >> 63;
189 }
190
191 static inline int is_ioirq(unsigned long irq_type)
192 {
193         return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
194                 (irq_type <= IRQ_PEND_IO_ISC_7));
195 }
196
197 static uint64_t isc_to_isc_bits(int isc)
198 {
199         return (0x80 >> isc) << 24;
200 }
201
202 static inline u8 int_word_to_isc(u32 int_word)
203 {
204         return (int_word & 0x38000000) >> 27;
205 }
206
207 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
208 {
209         return vcpu->kvm->arch.float_int.pending_irqs |
210                vcpu->arch.local_int.pending_irqs;
211 }
212
213 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
214                                    unsigned long active_mask)
215 {
216         int i;
217
218         for (i = 0; i <= MAX_ISC; i++)
219                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
220                         active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
221
222         return active_mask;
223 }
224
225 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
226 {
227         unsigned long active_mask;
228
229         active_mask = pending_irqs(vcpu);
230         if (!active_mask)
231                 return 0;
232
233         if (psw_extint_disabled(vcpu))
234                 active_mask &= ~IRQ_PEND_EXT_MASK;
235         if (psw_ioint_disabled(vcpu))
236                 active_mask &= ~IRQ_PEND_IO_MASK;
237         else
238                 active_mask = disable_iscs(vcpu, active_mask);
239         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
240                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
241         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
242                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
243         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
244                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
245         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
246                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
247         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
248                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
249         if (psw_mchk_disabled(vcpu))
250                 active_mask &= ~IRQ_PEND_MCHK_MASK;
251         if (!(vcpu->arch.sie_block->gcr[14] &
252               vcpu->kvm->arch.float_int.mchk.cr14))
253                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
254
255         /*
256          * STOP irqs will never be actively delivered. They are triggered via
257          * intercept requests and cleared when the stop intercept is performed.
258          */
259         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
260
261         return active_mask;
262 }
263
264 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
265 {
266         atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
267         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
268 }
269
270 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
271 {
272         atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
273         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
274 }
275
276 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
277 {
278         atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
279                     &vcpu->arch.sie_block->cpuflags);
280         vcpu->arch.sie_block->lctl = 0x0000;
281         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
282
283         if (guestdbg_enabled(vcpu)) {
284                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
285                                                LCTL_CR10 | LCTL_CR11);
286                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
287         }
288 }
289
290 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
291 {
292         atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
293 }
294
295 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
296 {
297         if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
298                 return;
299         else if (psw_ioint_disabled(vcpu))
300                 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
301         else
302                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
303 }
304
305 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
306 {
307         if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
308                 return;
309         if (psw_extint_disabled(vcpu))
310                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
311         else
312                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
313 }
314
315 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
316 {
317         if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
318                 return;
319         if (psw_mchk_disabled(vcpu))
320                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
321         else
322                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
323 }
324
325 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
326 {
327         if (kvm_s390_is_stop_irq_pending(vcpu))
328                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
329 }
330
331 /* Set interception request for non-deliverable interrupts */
332 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
333 {
334         set_intercept_indicators_io(vcpu);
335         set_intercept_indicators_ext(vcpu);
336         set_intercept_indicators_mchk(vcpu);
337         set_intercept_indicators_stop(vcpu);
338 }
339
340 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
341 {
342         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
343         int rc;
344
345         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
346                                          0, 0);
347
348         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
349                            (u16 *)__LC_EXT_INT_CODE);
350         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
351         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
352                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
353         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
354                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
355         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
356         return rc ? -EFAULT : 0;
357 }
358
359 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
360 {
361         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
362         int rc;
363
364         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
365                                          0, 0);
366
367         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
368                            (u16 __user *)__LC_EXT_INT_CODE);
369         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
370         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
371                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
373                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
374         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
375         return rc ? -EFAULT : 0;
376 }
377
378 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
379 {
380         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
381         struct kvm_s390_ext_info ext;
382         int rc;
383
384         spin_lock(&li->lock);
385         ext = li->irq.ext;
386         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
387         li->irq.ext.ext_params2 = 0;
388         spin_unlock(&li->lock);
389
390         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
391                    ext.ext_params2);
392         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
393                                          KVM_S390_INT_PFAULT_INIT,
394                                          0, ext.ext_params2);
395
396         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
397         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
398         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
399                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
400         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
401                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
402         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
403         return rc ? -EFAULT : 0;
404 }
405
406 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
407 {
408         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
409         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
410         struct kvm_s390_mchk_info mchk = {};
411         unsigned long adtl_status_addr;
412         int deliver = 0;
413         int rc = 0;
414
415         spin_lock(&fi->lock);
416         spin_lock(&li->lock);
417         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
418             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
419                 /*
420                  * If there was an exigent machine check pending, then any
421                  * repressible machine checks that might have been pending
422                  * are indicated along with it, so always clear bits for
423                  * repressible and exigent interrupts
424                  */
425                 mchk = li->irq.mchk;
426                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
427                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
428                 memset(&li->irq.mchk, 0, sizeof(mchk));
429                 deliver = 1;
430         }
431         /*
432          * We indicate floating repressible conditions along with
433          * other pending conditions. Channel Report Pending and Channel
434          * Subsystem damage are the only two and and are indicated by
435          * bits in mcic and masked in cr14.
436          */
437         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
438                 mchk.mcic |= fi->mchk.mcic;
439                 mchk.cr14 |= fi->mchk.cr14;
440                 memset(&fi->mchk, 0, sizeof(mchk));
441                 deliver = 1;
442         }
443         spin_unlock(&li->lock);
444         spin_unlock(&fi->lock);
445
446         if (deliver) {
447                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
448                            mchk.mcic);
449                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
450                                                  KVM_S390_MCHK,
451                                                  mchk.cr14, mchk.mcic);
452
453                 rc  = kvm_s390_vcpu_store_status(vcpu,
454                                                  KVM_S390_STORE_STATUS_PREFIXED);
455                 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
456                                     &adtl_status_addr,
457                                     sizeof(unsigned long));
458                 rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
459                                                       adtl_status_addr);
460                 rc |= put_guest_lc(vcpu, mchk.mcic,
461                                    (u64 __user *) __LC_MCCK_CODE);
462                 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
463                                    (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
464                 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
465                                      &mchk.fixed_logout,
466                                      sizeof(mchk.fixed_logout));
467                 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
468                                      &vcpu->arch.sie_block->gpsw,
469                                      sizeof(psw_t));
470                 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
471                                     &vcpu->arch.sie_block->gpsw,
472                                     sizeof(psw_t));
473         }
474         return rc ? -EFAULT : 0;
475 }
476
477 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
478 {
479         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
480         int rc;
481
482         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
483         vcpu->stat.deliver_restart_signal++;
484         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
485
486         rc  = write_guest_lc(vcpu,
487                              offsetof(struct lowcore, restart_old_psw),
488                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
489         rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
490                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
491         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
492         return rc ? -EFAULT : 0;
493 }
494
495 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
496 {
497         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
498         struct kvm_s390_prefix_info prefix;
499
500         spin_lock(&li->lock);
501         prefix = li->irq.prefix;
502         li->irq.prefix.address = 0;
503         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
504         spin_unlock(&li->lock);
505
506         vcpu->stat.deliver_prefix_signal++;
507         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
508                                          KVM_S390_SIGP_SET_PREFIX,
509                                          prefix.address, 0);
510
511         kvm_s390_set_prefix(vcpu, prefix.address);
512         return 0;
513 }
514
515 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
516 {
517         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
518         int rc;
519         int cpu_addr;
520
521         spin_lock(&li->lock);
522         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
523         clear_bit(cpu_addr, li->sigp_emerg_pending);
524         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
525                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
526         spin_unlock(&li->lock);
527
528         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
529         vcpu->stat.deliver_emergency_signal++;
530         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
531                                          cpu_addr, 0);
532
533         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
534                            (u16 *)__LC_EXT_INT_CODE);
535         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
536         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
537                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
538         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
539                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
540         return rc ? -EFAULT : 0;
541 }
542
543 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
544 {
545         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
546         struct kvm_s390_extcall_info extcall;
547         int rc;
548
549         spin_lock(&li->lock);
550         extcall = li->irq.extcall;
551         li->irq.extcall.code = 0;
552         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
553         spin_unlock(&li->lock);
554
555         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
556         vcpu->stat.deliver_external_call++;
557         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
558                                          KVM_S390_INT_EXTERNAL_CALL,
559                                          extcall.code, 0);
560
561         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
562                            (u16 *)__LC_EXT_INT_CODE);
563         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
564         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
565                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
566         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
567                             sizeof(psw_t));
568         return rc ? -EFAULT : 0;
569 }
570
571 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
572 {
573         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
574         struct kvm_s390_pgm_info pgm_info;
575         int rc = 0, nullifying = false;
576         u16 ilen;
577
578         spin_lock(&li->lock);
579         pgm_info = li->irq.pgm;
580         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
581         memset(&li->irq.pgm, 0, sizeof(pgm_info));
582         spin_unlock(&li->lock);
583
584         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
585         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
586                    pgm_info.code, ilen);
587         vcpu->stat.deliver_program_int++;
588         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
589                                          pgm_info.code, 0);
590
591         switch (pgm_info.code & ~PGM_PER) {
592         case PGM_AFX_TRANSLATION:
593         case PGM_ASX_TRANSLATION:
594         case PGM_EX_TRANSLATION:
595         case PGM_LFX_TRANSLATION:
596         case PGM_LSTE_SEQUENCE:
597         case PGM_LSX_TRANSLATION:
598         case PGM_LX_TRANSLATION:
599         case PGM_PRIMARY_AUTHORITY:
600         case PGM_SECONDARY_AUTHORITY:
601                 nullifying = true;
602                 /* fall through */
603         case PGM_SPACE_SWITCH:
604                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
605                                   (u64 *)__LC_TRANS_EXC_CODE);
606                 break;
607         case PGM_ALEN_TRANSLATION:
608         case PGM_ALE_SEQUENCE:
609         case PGM_ASTE_INSTANCE:
610         case PGM_ASTE_SEQUENCE:
611         case PGM_ASTE_VALIDITY:
612         case PGM_EXTENDED_AUTHORITY:
613                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
614                                   (u8 *)__LC_EXC_ACCESS_ID);
615                 nullifying = true;
616                 break;
617         case PGM_ASCE_TYPE:
618         case PGM_PAGE_TRANSLATION:
619         case PGM_REGION_FIRST_TRANS:
620         case PGM_REGION_SECOND_TRANS:
621         case PGM_REGION_THIRD_TRANS:
622         case PGM_SEGMENT_TRANSLATION:
623                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
624                                   (u64 *)__LC_TRANS_EXC_CODE);
625                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
626                                    (u8 *)__LC_EXC_ACCESS_ID);
627                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
628                                    (u8 *)__LC_OP_ACCESS_ID);
629                 nullifying = true;
630                 break;
631         case PGM_MONITOR:
632                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
633                                   (u16 *)__LC_MON_CLASS_NR);
634                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
635                                    (u64 *)__LC_MON_CODE);
636                 break;
637         case PGM_VECTOR_PROCESSING:
638         case PGM_DATA:
639                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
640                                   (u32 *)__LC_DATA_EXC_CODE);
641                 break;
642         case PGM_PROTECTION:
643                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
644                                   (u64 *)__LC_TRANS_EXC_CODE);
645                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
646                                    (u8 *)__LC_EXC_ACCESS_ID);
647                 break;
648         case PGM_STACK_FULL:
649         case PGM_STACK_EMPTY:
650         case PGM_STACK_SPECIFICATION:
651         case PGM_STACK_TYPE:
652         case PGM_STACK_OPERATION:
653         case PGM_TRACE_TABEL:
654         case PGM_CRYPTO_OPERATION:
655                 nullifying = true;
656                 break;
657         }
658
659         if (pgm_info.code & PGM_PER) {
660                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
661                                    (u8 *) __LC_PER_CODE);
662                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
663                                    (u8 *)__LC_PER_ATMID);
664                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
665                                    (u64 *) __LC_PER_ADDRESS);
666                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
667                                    (u8 *) __LC_PER_ACCESS_ID);
668         }
669
670         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
671                 kvm_s390_rewind_psw(vcpu, ilen);
672
673         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
674         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
675         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
676                                  (u64 *) __LC_LAST_BREAK);
677         rc |= put_guest_lc(vcpu, pgm_info.code,
678                            (u16 *)__LC_PGM_INT_CODE);
679         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
680                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
681         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
682                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
683         return rc ? -EFAULT : 0;
684 }
685
686 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
687 {
688         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
689         struct kvm_s390_ext_info ext;
690         int rc = 0;
691
692         spin_lock(&fi->lock);
693         if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
694                 spin_unlock(&fi->lock);
695                 return 0;
696         }
697         ext = fi->srv_signal;
698         memset(&fi->srv_signal, 0, sizeof(ext));
699         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
700         spin_unlock(&fi->lock);
701
702         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
703                    ext.ext_params);
704         vcpu->stat.deliver_service_signal++;
705         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
706                                          ext.ext_params, 0);
707
708         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
709         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
710         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
711                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
712         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
713                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
714         rc |= put_guest_lc(vcpu, ext.ext_params,
715                            (u32 *)__LC_EXT_PARAMS);
716
717         return rc ? -EFAULT : 0;
718 }
719
720 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
721 {
722         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
723         struct kvm_s390_interrupt_info *inti;
724         int rc = 0;
725
726         spin_lock(&fi->lock);
727         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
728                                         struct kvm_s390_interrupt_info,
729                                         list);
730         if (inti) {
731                 list_del(&inti->list);
732                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
733         }
734         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
735                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
736         spin_unlock(&fi->lock);
737
738         if (inti) {
739                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
740                                                  KVM_S390_INT_PFAULT_DONE, 0,
741                                                  inti->ext.ext_params2);
742                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
743                            inti->ext.ext_params2);
744
745                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
746                                 (u16 *)__LC_EXT_INT_CODE);
747                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
748                                 (u16 *)__LC_EXT_CPU_ADDR);
749                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
750                                 &vcpu->arch.sie_block->gpsw,
751                                 sizeof(psw_t));
752                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
753                                 &vcpu->arch.sie_block->gpsw,
754                                 sizeof(psw_t));
755                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
756                                 (u64 *)__LC_EXT_PARAMS2);
757                 kfree(inti);
758         }
759         return rc ? -EFAULT : 0;
760 }
761
762 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
763 {
764         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
765         struct kvm_s390_interrupt_info *inti;
766         int rc = 0;
767
768         spin_lock(&fi->lock);
769         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
770                                         struct kvm_s390_interrupt_info,
771                                         list);
772         if (inti) {
773                 VCPU_EVENT(vcpu, 4,
774                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
775                            inti->ext.ext_params, inti->ext.ext_params2);
776                 vcpu->stat.deliver_virtio_interrupt++;
777                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
778                                 inti->type,
779                                 inti->ext.ext_params,
780                                 inti->ext.ext_params2);
781                 list_del(&inti->list);
782                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
783         }
784         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
785                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
786         spin_unlock(&fi->lock);
787
788         if (inti) {
789                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
790                                 (u16 *)__LC_EXT_INT_CODE);
791                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
792                                 (u16 *)__LC_EXT_CPU_ADDR);
793                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
794                                 &vcpu->arch.sie_block->gpsw,
795                                 sizeof(psw_t));
796                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
797                                 &vcpu->arch.sie_block->gpsw,
798                                 sizeof(psw_t));
799                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
800                                 (u32 *)__LC_EXT_PARAMS);
801                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
802                                 (u64 *)__LC_EXT_PARAMS2);
803                 kfree(inti);
804         }
805         return rc ? -EFAULT : 0;
806 }
807
808 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
809                                      unsigned long irq_type)
810 {
811         struct list_head *isc_list;
812         struct kvm_s390_float_interrupt *fi;
813         struct kvm_s390_interrupt_info *inti = NULL;
814         int rc = 0;
815
816         fi = &vcpu->kvm->arch.float_int;
817
818         spin_lock(&fi->lock);
819         isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
820         inti = list_first_entry_or_null(isc_list,
821                                         struct kvm_s390_interrupt_info,
822                                         list);
823         if (inti) {
824                 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
825                 vcpu->stat.deliver_io_int++;
826                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
827                                 inti->type,
828                                 ((__u32)inti->io.subchannel_id << 16) |
829                                 inti->io.subchannel_nr,
830                                 ((__u64)inti->io.io_int_parm << 32) |
831                                 inti->io.io_int_word);
832                 list_del(&inti->list);
833                 fi->counters[FIRQ_CNTR_IO] -= 1;
834         }
835         if (list_empty(isc_list))
836                 clear_bit(irq_type, &fi->pending_irqs);
837         spin_unlock(&fi->lock);
838
839         if (inti) {
840                 rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
841                                 (u16 *)__LC_SUBCHANNEL_ID);
842                 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
843                                 (u16 *)__LC_SUBCHANNEL_NR);
844                 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
845                                 (u32 *)__LC_IO_INT_PARM);
846                 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
847                                 (u32 *)__LC_IO_INT_WORD);
848                 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
849                                 &vcpu->arch.sie_block->gpsw,
850                                 sizeof(psw_t));
851                 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
852                                 &vcpu->arch.sie_block->gpsw,
853                                 sizeof(psw_t));
854                 kfree(inti);
855         }
856
857         return rc ? -EFAULT : 0;
858 }
859
860 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
861
862 static const deliver_irq_t deliver_irq_funcs[] = {
863         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
864         [IRQ_PEND_MCHK_REP]       = __deliver_machine_check,
865         [IRQ_PEND_PROG]           = __deliver_prog,
866         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
867         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
868         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
869         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
870         [IRQ_PEND_RESTART]        = __deliver_restart,
871         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
872         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
873         [IRQ_PEND_EXT_SERVICE]    = __deliver_service,
874         [IRQ_PEND_PFAULT_DONE]    = __deliver_pfault_done,
875         [IRQ_PEND_VIRTIO]         = __deliver_virtio,
876 };
877
878 /* Check whether an external call is pending (deliverable or not) */
879 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
880 {
881         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
882
883         if (!sclp.has_sigpif)
884                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
885
886         return sca_ext_call_pending(vcpu, NULL);
887 }
888
889 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
890 {
891         if (deliverable_irqs(vcpu))
892                 return 1;
893
894         if (kvm_cpu_has_pending_timer(vcpu))
895                 return 1;
896
897         /* external call pending and deliverable */
898         if (kvm_s390_ext_call_pending(vcpu) &&
899             !psw_extint_disabled(vcpu) &&
900             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
901                 return 1;
902
903         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
904                 return 1;
905         return 0;
906 }
907
908 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
909 {
910         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
911 }
912
913 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
914 {
915         u64 now, cputm, sltime = 0;
916
917         if (ckc_interrupts_enabled(vcpu)) {
918                 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
919                 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
920                 /* already expired or overflow? */
921                 if (!sltime || vcpu->arch.sie_block->ckc <= now)
922                         return 0;
923                 if (cpu_timer_interrupts_enabled(vcpu)) {
924                         cputm = kvm_s390_get_cpu_timer(vcpu);
925                         /* already expired? */
926                         if (cputm >> 63)
927                                 return 0;
928                         return min(sltime, tod_to_ns(cputm));
929                 }
930         } else if (cpu_timer_interrupts_enabled(vcpu)) {
931                 sltime = kvm_s390_get_cpu_timer(vcpu);
932                 /* already expired? */
933                 if (sltime >> 63)
934                         return 0;
935         }
936         return sltime;
937 }
938
939 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
940 {
941         u64 sltime;
942
943         vcpu->stat.exit_wait_state++;
944
945         /* fast path */
946         if (kvm_arch_vcpu_runnable(vcpu))
947                 return 0;
948
949         if (psw_interrupts_disabled(vcpu)) {
950                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
951                 return -EOPNOTSUPP; /* disabled wait */
952         }
953
954         if (!ckc_interrupts_enabled(vcpu) &&
955             !cpu_timer_interrupts_enabled(vcpu)) {
956                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
957                 __set_cpu_idle(vcpu);
958                 goto no_timer;
959         }
960
961         sltime = __calculate_sltime(vcpu);
962         if (!sltime)
963                 return 0;
964
965         __set_cpu_idle(vcpu);
966         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
967         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
968 no_timer:
969         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
970         kvm_vcpu_block(vcpu);
971         __unset_cpu_idle(vcpu);
972         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
973
974         hrtimer_cancel(&vcpu->arch.ckc_timer);
975         return 0;
976 }
977
978 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
979 {
980         if (swait_active(&vcpu->wq)) {
981                 /*
982                  * The vcpu gave up the cpu voluntarily, mark it as a good
983                  * yield-candidate.
984                  */
985                 vcpu->preempted = true;
986                 swake_up(&vcpu->wq);
987                 vcpu->stat.halt_wakeup++;
988         }
989 }
990
991 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
992 {
993         struct kvm_vcpu *vcpu;
994         u64 sltime;
995
996         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
997         sltime = __calculate_sltime(vcpu);
998
999         /*
1000          * If the monotonic clock runs faster than the tod clock we might be
1001          * woken up too early and have to go back to sleep to avoid deadlocks.
1002          */
1003         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1004                 return HRTIMER_RESTART;
1005         kvm_s390_vcpu_wakeup(vcpu);
1006         return HRTIMER_NORESTART;
1007 }
1008
1009 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1010 {
1011         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1012
1013         spin_lock(&li->lock);
1014         li->pending_irqs = 0;
1015         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1016         memset(&li->irq, 0, sizeof(li->irq));
1017         spin_unlock(&li->lock);
1018
1019         sca_clear_ext_call(vcpu);
1020 }
1021
1022 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1023 {
1024         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1025         deliver_irq_t func;
1026         int rc = 0;
1027         unsigned long irq_type;
1028         unsigned long irqs;
1029
1030         __reset_intercept_indicators(vcpu);
1031
1032         /* pending ckc conditions might have been invalidated */
1033         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1034         if (ckc_irq_pending(vcpu))
1035                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1036
1037         /* pending cpu timer conditions might have been invalidated */
1038         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1039         if (cpu_timer_irq_pending(vcpu))
1040                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1041
1042         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1043                 /* bits are in the order of interrupt priority */
1044                 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
1045                 if (is_ioirq(irq_type)) {
1046                         rc = __deliver_io(vcpu, irq_type);
1047                 } else {
1048                         func = deliver_irq_funcs[irq_type];
1049                         if (!func) {
1050                                 WARN_ON_ONCE(func == NULL);
1051                                 clear_bit(irq_type, &li->pending_irqs);
1052                                 continue;
1053                         }
1054                         rc = func(vcpu);
1055                 }
1056         }
1057
1058         set_intercept_indicators(vcpu);
1059
1060         return rc;
1061 }
1062
1063 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1064 {
1065         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1066
1067         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1068         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1069                                    irq->u.pgm.code, 0);
1070
1071         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1072                 /* auto detection if no valid ILC was given */
1073                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1074                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1075                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1076         }
1077
1078         if (irq->u.pgm.code == PGM_PER) {
1079                 li->irq.pgm.code |= PGM_PER;
1080                 li->irq.pgm.flags = irq->u.pgm.flags;
1081                 /* only modify PER related information */
1082                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1083                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1084                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1085                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1086         } else if (!(irq->u.pgm.code & PGM_PER)) {
1087                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1088                                    irq->u.pgm.code;
1089                 li->irq.pgm.flags = irq->u.pgm.flags;
1090                 /* only modify non-PER information */
1091                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1092                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1093                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1094                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1095                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1096                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1097         } else {
1098                 li->irq.pgm = irq->u.pgm;
1099         }
1100         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1101         return 0;
1102 }
1103
1104 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1105 {
1106         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1107
1108         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1109                    irq->u.ext.ext_params2);
1110         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1111                                    irq->u.ext.ext_params,
1112                                    irq->u.ext.ext_params2);
1113
1114         li->irq.ext = irq->u.ext;
1115         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1116         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1117         return 0;
1118 }
1119
1120 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1121 {
1122         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1123         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1124         uint16_t src_id = irq->u.extcall.code;
1125
1126         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1127                    src_id);
1128         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1129                                    src_id, 0);
1130
1131         /* sending vcpu invalid */
1132         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1133                 return -EINVAL;
1134
1135         if (sclp.has_sigpif)
1136                 return sca_inject_ext_call(vcpu, src_id);
1137
1138         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1139                 return -EBUSY;
1140         *extcall = irq->u.extcall;
1141         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1142         return 0;
1143 }
1144
1145 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1146 {
1147         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1148         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1149
1150         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1151                    irq->u.prefix.address);
1152         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1153                                    irq->u.prefix.address, 0);
1154
1155         if (!is_vcpu_stopped(vcpu))
1156                 return -EBUSY;
1157
1158         *prefix = irq->u.prefix;
1159         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1160         return 0;
1161 }
1162
1163 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1164 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1165 {
1166         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1167         struct kvm_s390_stop_info *stop = &li->irq.stop;
1168         int rc = 0;
1169
1170         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1171
1172         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1173                 return -EINVAL;
1174
1175         if (is_vcpu_stopped(vcpu)) {
1176                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1177                         rc = kvm_s390_store_status_unloaded(vcpu,
1178                                                 KVM_S390_STORE_STATUS_NOADDR);
1179                 return rc;
1180         }
1181
1182         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1183                 return -EBUSY;
1184         stop->flags = irq->u.stop.flags;
1185         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1186         return 0;
1187 }
1188
1189 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1190                                  struct kvm_s390_irq *irq)
1191 {
1192         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1193
1194         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1195         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1196
1197         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1198         return 0;
1199 }
1200
1201 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1202                                    struct kvm_s390_irq *irq)
1203 {
1204         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1205
1206         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1207                    irq->u.emerg.code);
1208         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1209                                    irq->u.emerg.code, 0);
1210
1211         /* sending vcpu invalid */
1212         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1213                 return -EINVAL;
1214
1215         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1216         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1217         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1218         return 0;
1219 }
1220
1221 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1222 {
1223         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1224         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1225
1226         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1227                    irq->u.mchk.mcic);
1228         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1229                                    irq->u.mchk.mcic);
1230
1231         /*
1232          * Because repressible machine checks can be indicated along with
1233          * exigent machine checks (PoP, Chapter 11, Interruption action)
1234          * we need to combine cr14, mcic and external damage code.
1235          * Failing storage address and the logout area should not be or'ed
1236          * together, we just indicate the last occurrence of the corresponding
1237          * machine check
1238          */
1239         mchk->cr14 |= irq->u.mchk.cr14;
1240         mchk->mcic |= irq->u.mchk.mcic;
1241         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1242         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1243         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1244                sizeof(mchk->fixed_logout));
1245         if (mchk->mcic & MCHK_EX_MASK)
1246                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1247         else if (mchk->mcic & MCHK_REP_MASK)
1248                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1249         return 0;
1250 }
1251
1252 static int __inject_ckc(struct kvm_vcpu *vcpu)
1253 {
1254         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1255
1256         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1257         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1258                                    0, 0);
1259
1260         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1261         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1262         return 0;
1263 }
1264
1265 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1266 {
1267         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1268
1269         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1270         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1271                                    0, 0);
1272
1273         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1274         atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1275         return 0;
1276 }
1277
1278 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1279                                                   int isc, u32 schid)
1280 {
1281         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1282         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1283         struct kvm_s390_interrupt_info *iter;
1284         u16 id = (schid & 0xffff0000U) >> 16;
1285         u16 nr = schid & 0x0000ffffU;
1286
1287         spin_lock(&fi->lock);
1288         list_for_each_entry(iter, isc_list, list) {
1289                 if (schid && (id != iter->io.subchannel_id ||
1290                               nr != iter->io.subchannel_nr))
1291                         continue;
1292                 /* found an appropriate entry */
1293                 list_del_init(&iter->list);
1294                 fi->counters[FIRQ_CNTR_IO] -= 1;
1295                 if (list_empty(isc_list))
1296                         clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1297                 spin_unlock(&fi->lock);
1298                 return iter;
1299         }
1300         spin_unlock(&fi->lock);
1301         return NULL;
1302 }
1303
1304 /*
1305  * Dequeue and return an I/O interrupt matching any of the interruption
1306  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1307  */
1308 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1309                                                     u64 isc_mask, u32 schid)
1310 {
1311         struct kvm_s390_interrupt_info *inti = NULL;
1312         int isc;
1313
1314         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1315                 if (isc_mask & isc_to_isc_bits(isc))
1316                         inti = get_io_int(kvm, isc, schid);
1317         }
1318         return inti;
1319 }
1320
1321 #define SCCB_MASK 0xFFFFFFF8
1322 #define SCCB_EVENT_PENDING 0x3
1323
1324 static int __inject_service(struct kvm *kvm,
1325                              struct kvm_s390_interrupt_info *inti)
1326 {
1327         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1328
1329         spin_lock(&fi->lock);
1330         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1331         /*
1332          * Early versions of the QEMU s390 bios will inject several
1333          * service interrupts after another without handling a
1334          * condition code indicating busy.
1335          * We will silently ignore those superfluous sccb values.
1336          * A future version of QEMU will take care of serialization
1337          * of servc requests
1338          */
1339         if (fi->srv_signal.ext_params & SCCB_MASK)
1340                 goto out;
1341         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1342         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1343 out:
1344         spin_unlock(&fi->lock);
1345         kfree(inti);
1346         return 0;
1347 }
1348
1349 static int __inject_virtio(struct kvm *kvm,
1350                             struct kvm_s390_interrupt_info *inti)
1351 {
1352         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1353
1354         spin_lock(&fi->lock);
1355         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1356                 spin_unlock(&fi->lock);
1357                 return -EBUSY;
1358         }
1359         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1360         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1361         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1362         spin_unlock(&fi->lock);
1363         return 0;
1364 }
1365
1366 static int __inject_pfault_done(struct kvm *kvm,
1367                                  struct kvm_s390_interrupt_info *inti)
1368 {
1369         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1370
1371         spin_lock(&fi->lock);
1372         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1373                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1374                 spin_unlock(&fi->lock);
1375                 return -EBUSY;
1376         }
1377         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1378         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1379         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1380         spin_unlock(&fi->lock);
1381         return 0;
1382 }
1383
1384 #define CR_PENDING_SUBCLASS 28
1385 static int __inject_float_mchk(struct kvm *kvm,
1386                                 struct kvm_s390_interrupt_info *inti)
1387 {
1388         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1389
1390         spin_lock(&fi->lock);
1391         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1392         fi->mchk.mcic |= inti->mchk.mcic;
1393         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1394         spin_unlock(&fi->lock);
1395         kfree(inti);
1396         return 0;
1397 }
1398
1399 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1400 {
1401         struct kvm_s390_float_interrupt *fi;
1402         struct list_head *list;
1403         int isc;
1404
1405         fi = &kvm->arch.float_int;
1406         spin_lock(&fi->lock);
1407         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1408                 spin_unlock(&fi->lock);
1409                 return -EBUSY;
1410         }
1411         fi->counters[FIRQ_CNTR_IO] += 1;
1412
1413         isc = int_word_to_isc(inti->io.io_int_word);
1414         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1415         list_add_tail(&inti->list, list);
1416         set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1417         spin_unlock(&fi->lock);
1418         return 0;
1419 }
1420
1421 /*
1422  * Find a destination VCPU for a floating irq and kick it.
1423  */
1424 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1425 {
1426         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1427         struct kvm_s390_local_interrupt *li;
1428         struct kvm_vcpu *dst_vcpu;
1429         int sigcpu, online_vcpus, nr_tries = 0;
1430
1431         online_vcpus = atomic_read(&kvm->online_vcpus);
1432         if (!online_vcpus)
1433                 return;
1434
1435         /* find idle VCPUs first, then round robin */
1436         sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1437         if (sigcpu == online_vcpus) {
1438                 do {
1439                         sigcpu = fi->next_rr_cpu;
1440                         fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1441                         /* avoid endless loops if all vcpus are stopped */
1442                         if (nr_tries++ >= online_vcpus)
1443                                 return;
1444                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1445         }
1446         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1447
1448         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1449         li = &dst_vcpu->arch.local_int;
1450         spin_lock(&li->lock);
1451         switch (type) {
1452         case KVM_S390_MCHK:
1453                 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1454                 break;
1455         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1456                 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1457                 break;
1458         default:
1459                 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1460                 break;
1461         }
1462         spin_unlock(&li->lock);
1463         kvm_s390_vcpu_wakeup(dst_vcpu);
1464 }
1465
1466 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1467 {
1468         u64 type = READ_ONCE(inti->type);
1469         int rc;
1470
1471         switch (type) {
1472         case KVM_S390_MCHK:
1473                 rc = __inject_float_mchk(kvm, inti);
1474                 break;
1475         case KVM_S390_INT_VIRTIO:
1476                 rc = __inject_virtio(kvm, inti);
1477                 break;
1478         case KVM_S390_INT_SERVICE:
1479                 rc = __inject_service(kvm, inti);
1480                 break;
1481         case KVM_S390_INT_PFAULT_DONE:
1482                 rc = __inject_pfault_done(kvm, inti);
1483                 break;
1484         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1485                 rc = __inject_io(kvm, inti);
1486                 break;
1487         default:
1488                 rc = -EINVAL;
1489         }
1490         if (rc)
1491                 return rc;
1492
1493         __floating_irq_kick(kvm, type);
1494         return 0;
1495 }
1496
1497 int kvm_s390_inject_vm(struct kvm *kvm,
1498                        struct kvm_s390_interrupt *s390int)
1499 {
1500         struct kvm_s390_interrupt_info *inti;
1501         int rc;
1502
1503         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1504         if (!inti)
1505                 return -ENOMEM;
1506
1507         inti->type = s390int->type;
1508         switch (inti->type) {
1509         case KVM_S390_INT_VIRTIO:
1510                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1511                          s390int->parm, s390int->parm64);
1512                 inti->ext.ext_params = s390int->parm;
1513                 inti->ext.ext_params2 = s390int->parm64;
1514                 break;
1515         case KVM_S390_INT_SERVICE:
1516                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1517                 inti->ext.ext_params = s390int->parm;
1518                 break;
1519         case KVM_S390_INT_PFAULT_DONE:
1520                 inti->ext.ext_params2 = s390int->parm64;
1521                 break;
1522         case KVM_S390_MCHK:
1523                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1524                          s390int->parm64);
1525                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1526                 inti->mchk.mcic = s390int->parm64;
1527                 break;
1528         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1529                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1530                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1531                 else
1532                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1533                                  s390int->type & IOINT_CSSID_MASK,
1534                                  s390int->type & IOINT_SSID_MASK,
1535                                  s390int->type & IOINT_SCHID_MASK);
1536                 inti->io.subchannel_id = s390int->parm >> 16;
1537                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1538                 inti->io.io_int_parm = s390int->parm64 >> 32;
1539                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1540                 break;
1541         default:
1542                 kfree(inti);
1543                 return -EINVAL;
1544         }
1545         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1546                                  2);
1547
1548         rc = __inject_vm(kvm, inti);
1549         if (rc)
1550                 kfree(inti);
1551         return rc;
1552 }
1553
1554 int kvm_s390_reinject_io_int(struct kvm *kvm,
1555                               struct kvm_s390_interrupt_info *inti)
1556 {
1557         return __inject_vm(kvm, inti);
1558 }
1559
1560 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1561                        struct kvm_s390_irq *irq)
1562 {
1563         irq->type = s390int->type;
1564         switch (irq->type) {
1565         case KVM_S390_PROGRAM_INT:
1566                 if (s390int->parm & 0xffff0000)
1567                         return -EINVAL;
1568                 irq->u.pgm.code = s390int->parm;
1569                 break;
1570         case KVM_S390_SIGP_SET_PREFIX:
1571                 irq->u.prefix.address = s390int->parm;
1572                 break;
1573         case KVM_S390_SIGP_STOP:
1574                 irq->u.stop.flags = s390int->parm;
1575                 break;
1576         case KVM_S390_INT_EXTERNAL_CALL:
1577                 if (s390int->parm & 0xffff0000)
1578                         return -EINVAL;
1579                 irq->u.extcall.code = s390int->parm;
1580                 break;
1581         case KVM_S390_INT_EMERGENCY:
1582                 if (s390int->parm & 0xffff0000)
1583                         return -EINVAL;
1584                 irq->u.emerg.code = s390int->parm;
1585                 break;
1586         case KVM_S390_MCHK:
1587                 irq->u.mchk.mcic = s390int->parm64;
1588                 break;
1589         }
1590         return 0;
1591 }
1592
1593 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1594 {
1595         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1596
1597         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1598 }
1599
1600 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1601 {
1602         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1603
1604         spin_lock(&li->lock);
1605         li->irq.stop.flags = 0;
1606         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1607         spin_unlock(&li->lock);
1608 }
1609
1610 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1611 {
1612         int rc;
1613
1614         switch (irq->type) {
1615         case KVM_S390_PROGRAM_INT:
1616                 rc = __inject_prog(vcpu, irq);
1617                 break;
1618         case KVM_S390_SIGP_SET_PREFIX:
1619                 rc = __inject_set_prefix(vcpu, irq);
1620                 break;
1621         case KVM_S390_SIGP_STOP:
1622                 rc = __inject_sigp_stop(vcpu, irq);
1623                 break;
1624         case KVM_S390_RESTART:
1625                 rc = __inject_sigp_restart(vcpu, irq);
1626                 break;
1627         case KVM_S390_INT_CLOCK_COMP:
1628                 rc = __inject_ckc(vcpu);
1629                 break;
1630         case KVM_S390_INT_CPU_TIMER:
1631                 rc = __inject_cpu_timer(vcpu);
1632                 break;
1633         case KVM_S390_INT_EXTERNAL_CALL:
1634                 rc = __inject_extcall(vcpu, irq);
1635                 break;
1636         case KVM_S390_INT_EMERGENCY:
1637                 rc = __inject_sigp_emergency(vcpu, irq);
1638                 break;
1639         case KVM_S390_MCHK:
1640                 rc = __inject_mchk(vcpu, irq);
1641                 break;
1642         case KVM_S390_INT_PFAULT_INIT:
1643                 rc = __inject_pfault_init(vcpu, irq);
1644                 break;
1645         case KVM_S390_INT_VIRTIO:
1646         case KVM_S390_INT_SERVICE:
1647         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1648         default:
1649                 rc = -EINVAL;
1650         }
1651
1652         return rc;
1653 }
1654
1655 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1656 {
1657         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1658         int rc;
1659
1660         spin_lock(&li->lock);
1661         rc = do_inject_vcpu(vcpu, irq);
1662         spin_unlock(&li->lock);
1663         if (!rc)
1664                 kvm_s390_vcpu_wakeup(vcpu);
1665         return rc;
1666 }
1667
1668 static inline void clear_irq_list(struct list_head *_list)
1669 {
1670         struct kvm_s390_interrupt_info *inti, *n;
1671
1672         list_for_each_entry_safe(inti, n, _list, list) {
1673                 list_del(&inti->list);
1674                 kfree(inti);
1675         }
1676 }
1677
1678 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1679                        struct kvm_s390_irq *irq)
1680 {
1681         irq->type = inti->type;
1682         switch (inti->type) {
1683         case KVM_S390_INT_PFAULT_INIT:
1684         case KVM_S390_INT_PFAULT_DONE:
1685         case KVM_S390_INT_VIRTIO:
1686                 irq->u.ext = inti->ext;
1687                 break;
1688         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1689                 irq->u.io = inti->io;
1690                 break;
1691         }
1692 }
1693
1694 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1695 {
1696         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1697         int i;
1698
1699         spin_lock(&fi->lock);
1700         fi->pending_irqs = 0;
1701         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1702         memset(&fi->mchk, 0, sizeof(fi->mchk));
1703         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1704                 clear_irq_list(&fi->lists[i]);
1705         for (i = 0; i < FIRQ_MAX_COUNT; i++)
1706                 fi->counters[i] = 0;
1707         spin_unlock(&fi->lock);
1708 };
1709
1710 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
1711 {
1712         struct kvm_s390_interrupt_info *inti;
1713         struct kvm_s390_float_interrupt *fi;
1714         struct kvm_s390_irq *buf;
1715         struct kvm_s390_irq *irq;
1716         int max_irqs;
1717         int ret = 0;
1718         int n = 0;
1719         int i;
1720
1721         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1722                 return -EINVAL;
1723
1724         /*
1725          * We are already using -ENOMEM to signal
1726          * userspace it may retry with a bigger buffer,
1727          * so we need to use something else for this case
1728          */
1729         buf = vzalloc(len);
1730         if (!buf)
1731                 return -ENOBUFS;
1732
1733         max_irqs = len / sizeof(struct kvm_s390_irq);
1734
1735         fi = &kvm->arch.float_int;
1736         spin_lock(&fi->lock);
1737         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1738                 list_for_each_entry(inti, &fi->lists[i], list) {
1739                         if (n == max_irqs) {
1740                                 /* signal userspace to try again */
1741                                 ret = -ENOMEM;
1742                                 goto out;
1743                         }
1744                         inti_to_irq(inti, &buf[n]);
1745                         n++;
1746                 }
1747         }
1748         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
1749                 if (n == max_irqs) {
1750                         /* signal userspace to try again */
1751                         ret = -ENOMEM;
1752                         goto out;
1753                 }
1754                 irq = (struct kvm_s390_irq *) &buf[n];
1755                 irq->type = KVM_S390_INT_SERVICE;
1756                 irq->u.ext = fi->srv_signal;
1757                 n++;
1758         }
1759         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1760                 if (n == max_irqs) {
1761                                 /* signal userspace to try again */
1762                                 ret = -ENOMEM;
1763                                 goto out;
1764                 }
1765                 irq = (struct kvm_s390_irq *) &buf[n];
1766                 irq->type = KVM_S390_MCHK;
1767                 irq->u.mchk = fi->mchk;
1768                 n++;
1769 }
1770
1771 out:
1772         spin_unlock(&fi->lock);
1773         if (!ret && n > 0) {
1774                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1775                         ret = -EFAULT;
1776         }
1777         vfree(buf);
1778
1779         return ret < 0 ? ret : n;
1780 }
1781
1782 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1783 {
1784         int r;
1785
1786         switch (attr->group) {
1787         case KVM_DEV_FLIC_GET_ALL_IRQS:
1788                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
1789                                           attr->attr);
1790                 break;
1791         default:
1792                 r = -EINVAL;
1793         }
1794
1795         return r;
1796 }
1797
1798 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1799                                      u64 addr)
1800 {
1801         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1802         void *target = NULL;
1803         void __user *source;
1804         u64 size;
1805
1806         if (get_user(inti->type, (u64 __user *)addr))
1807                 return -EFAULT;
1808
1809         switch (inti->type) {
1810         case KVM_S390_INT_PFAULT_INIT:
1811         case KVM_S390_INT_PFAULT_DONE:
1812         case KVM_S390_INT_VIRTIO:
1813         case KVM_S390_INT_SERVICE:
1814                 target = (void *) &inti->ext;
1815                 source = &uptr->u.ext;
1816                 size = sizeof(inti->ext);
1817                 break;
1818         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1819                 target = (void *) &inti->io;
1820                 source = &uptr->u.io;
1821                 size = sizeof(inti->io);
1822                 break;
1823         case KVM_S390_MCHK:
1824                 target = (void *) &inti->mchk;
1825                 source = &uptr->u.mchk;
1826                 size = sizeof(inti->mchk);
1827                 break;
1828         default:
1829                 return -EINVAL;
1830         }
1831
1832         if (copy_from_user(target, source, size))
1833                 return -EFAULT;
1834
1835         return 0;
1836 }
1837
1838 static int enqueue_floating_irq(struct kvm_device *dev,
1839                                 struct kvm_device_attr *attr)
1840 {
1841         struct kvm_s390_interrupt_info *inti = NULL;
1842         int r = 0;
1843         int len = attr->attr;
1844
1845         if (len % sizeof(struct kvm_s390_irq) != 0)
1846                 return -EINVAL;
1847         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1848                 return -EINVAL;
1849
1850         while (len >= sizeof(struct kvm_s390_irq)) {
1851                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1852                 if (!inti)
1853                         return -ENOMEM;
1854
1855                 r = copy_irq_from_user(inti, attr->addr);
1856                 if (r) {
1857                         kfree(inti);
1858                         return r;
1859                 }
1860                 r = __inject_vm(dev->kvm, inti);
1861                 if (r) {
1862                         kfree(inti);
1863                         return r;
1864                 }
1865                 len -= sizeof(struct kvm_s390_irq);
1866                 attr->addr += sizeof(struct kvm_s390_irq);
1867         }
1868
1869         return r;
1870 }
1871
1872 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1873 {
1874         if (id >= MAX_S390_IO_ADAPTERS)
1875                 return NULL;
1876         return kvm->arch.adapters[id];
1877 }
1878
1879 static int register_io_adapter(struct kvm_device *dev,
1880                                struct kvm_device_attr *attr)
1881 {
1882         struct s390_io_adapter *adapter;
1883         struct kvm_s390_io_adapter adapter_info;
1884
1885         if (copy_from_user(&adapter_info,
1886                            (void __user *)attr->addr, sizeof(adapter_info)))
1887                 return -EFAULT;
1888
1889         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1890             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1891                 return -EINVAL;
1892
1893         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1894         if (!adapter)
1895                 return -ENOMEM;
1896
1897         INIT_LIST_HEAD(&adapter->maps);
1898         init_rwsem(&adapter->maps_lock);
1899         atomic_set(&adapter->nr_maps, 0);
1900         adapter->id = adapter_info.id;
1901         adapter->isc = adapter_info.isc;
1902         adapter->maskable = adapter_info.maskable;
1903         adapter->masked = false;
1904         adapter->swap = adapter_info.swap;
1905         dev->kvm->arch.adapters[adapter->id] = adapter;
1906
1907         return 0;
1908 }
1909
1910 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1911 {
1912         int ret;
1913         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1914
1915         if (!adapter || !adapter->maskable)
1916                 return -EINVAL;
1917         ret = adapter->masked;
1918         adapter->masked = masked;
1919         return ret;
1920 }
1921
1922 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1923 {
1924         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1925         struct s390_map_info *map;
1926         int ret;
1927
1928         if (!adapter || !addr)
1929                 return -EINVAL;
1930
1931         map = kzalloc(sizeof(*map), GFP_KERNEL);
1932         if (!map) {
1933                 ret = -ENOMEM;
1934                 goto out;
1935         }
1936         INIT_LIST_HEAD(&map->list);
1937         map->guest_addr = addr;
1938         map->addr = gmap_translate(kvm->arch.gmap, addr);
1939         if (map->addr == -EFAULT) {
1940                 ret = -EFAULT;
1941                 goto out;
1942         }
1943         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1944         if (ret < 0)
1945                 goto out;
1946         BUG_ON(ret != 1);
1947         down_write(&adapter->maps_lock);
1948         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1949                 list_add_tail(&map->list, &adapter->maps);
1950                 ret = 0;
1951         } else {
1952                 put_page(map->page);
1953                 ret = -EINVAL;
1954         }
1955         up_write(&adapter->maps_lock);
1956 out:
1957         if (ret)
1958                 kfree(map);
1959         return ret;
1960 }
1961
1962 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1963 {
1964         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1965         struct s390_map_info *map, *tmp;
1966         int found = 0;
1967
1968         if (!adapter || !addr)
1969                 return -EINVAL;
1970
1971         down_write(&adapter->maps_lock);
1972         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1973                 if (map->guest_addr == addr) {
1974                         found = 1;
1975                         atomic_dec(&adapter->nr_maps);
1976                         list_del(&map->list);
1977                         put_page(map->page);
1978                         kfree(map);
1979                         break;
1980                 }
1981         }
1982         up_write(&adapter->maps_lock);
1983
1984         return found ? 0 : -EINVAL;
1985 }
1986
1987 void kvm_s390_destroy_adapters(struct kvm *kvm)
1988 {
1989         int i;
1990         struct s390_map_info *map, *tmp;
1991
1992         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1993                 if (!kvm->arch.adapters[i])
1994                         continue;
1995                 list_for_each_entry_safe(map, tmp,
1996                                          &kvm->arch.adapters[i]->maps, list) {
1997                         list_del(&map->list);
1998                         put_page(map->page);
1999                         kfree(map);
2000                 }
2001                 kfree(kvm->arch.adapters[i]);
2002         }
2003 }
2004
2005 static int modify_io_adapter(struct kvm_device *dev,
2006                              struct kvm_device_attr *attr)
2007 {
2008         struct kvm_s390_io_adapter_req req;
2009         struct s390_io_adapter *adapter;
2010         int ret;
2011
2012         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2013                 return -EFAULT;
2014
2015         adapter = get_io_adapter(dev->kvm, req.id);
2016         if (!adapter)
2017                 return -EINVAL;
2018         switch (req.type) {
2019         case KVM_S390_IO_ADAPTER_MASK:
2020                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2021                 if (ret > 0)
2022                         ret = 0;
2023                 break;
2024         case KVM_S390_IO_ADAPTER_MAP:
2025                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2026                 break;
2027         case KVM_S390_IO_ADAPTER_UNMAP:
2028                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2029                 break;
2030         default:
2031                 ret = -EINVAL;
2032         }
2033
2034         return ret;
2035 }
2036
2037 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2038
2039 {
2040         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2041         u32 schid;
2042
2043         if (attr->flags)
2044                 return -EINVAL;
2045         if (attr->attr != sizeof(schid))
2046                 return -EINVAL;
2047         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2048                 return -EFAULT;
2049         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2050         /*
2051          * If userspace is conforming to the architecture, we can have at most
2052          * one pending I/O interrupt per subchannel, so this is effectively a
2053          * clear all.
2054          */
2055         return 0;
2056 }
2057
2058 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2059 {
2060         int r = 0;
2061         unsigned int i;
2062         struct kvm_vcpu *vcpu;
2063
2064         switch (attr->group) {
2065         case KVM_DEV_FLIC_ENQUEUE:
2066                 r = enqueue_floating_irq(dev, attr);
2067                 break;
2068         case KVM_DEV_FLIC_CLEAR_IRQS:
2069                 kvm_s390_clear_float_irqs(dev->kvm);
2070                 break;
2071         case KVM_DEV_FLIC_APF_ENABLE:
2072                 dev->kvm->arch.gmap->pfault_enabled = 1;
2073                 break;
2074         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2075                 dev->kvm->arch.gmap->pfault_enabled = 0;
2076                 /*
2077                  * Make sure no async faults are in transition when
2078                  * clearing the queues. So we don't need to worry
2079                  * about late coming workers.
2080                  */
2081                 synchronize_srcu(&dev->kvm->srcu);
2082                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2083                         kvm_clear_async_pf_completion_queue(vcpu);
2084                 break;
2085         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2086                 r = register_io_adapter(dev, attr);
2087                 break;
2088         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2089                 r = modify_io_adapter(dev, attr);
2090                 break;
2091         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2092                 r = clear_io_irq(dev->kvm, attr);
2093                 break;
2094         default:
2095                 r = -EINVAL;
2096         }
2097
2098         return r;
2099 }
2100
2101 static int flic_has_attr(struct kvm_device *dev,
2102                              struct kvm_device_attr *attr)
2103 {
2104         switch (attr->group) {
2105         case KVM_DEV_FLIC_GET_ALL_IRQS:
2106         case KVM_DEV_FLIC_ENQUEUE:
2107         case KVM_DEV_FLIC_CLEAR_IRQS:
2108         case KVM_DEV_FLIC_APF_ENABLE:
2109         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2110         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2111         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2112         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2113                 return 0;
2114         }
2115         return -ENXIO;
2116 }
2117
2118 static int flic_create(struct kvm_device *dev, u32 type)
2119 {
2120         if (!dev)
2121                 return -EINVAL;
2122         if (dev->kvm->arch.flic)
2123                 return -EINVAL;
2124         dev->kvm->arch.flic = dev;
2125         return 0;
2126 }
2127
2128 static void flic_destroy(struct kvm_device *dev)
2129 {
2130         dev->kvm->arch.flic = NULL;
2131         kfree(dev);
2132 }
2133
2134 /* s390 floating irq controller (flic) */
2135 struct kvm_device_ops kvm_flic_ops = {
2136         .name = "kvm-flic",
2137         .get_attr = flic_get_attr,
2138         .set_attr = flic_set_attr,
2139         .has_attr = flic_has_attr,
2140         .create = flic_create,
2141         .destroy = flic_destroy,
2142 };
2143
2144 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2145 {
2146         unsigned long bit;
2147
2148         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2149
2150         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2151 }
2152
2153 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2154                                           u64 addr)
2155 {
2156         struct s390_map_info *map;
2157
2158         if (!adapter)
2159                 return NULL;
2160
2161         list_for_each_entry(map, &adapter->maps, list) {
2162                 if (map->guest_addr == addr)
2163                         return map;
2164         }
2165         return NULL;
2166 }
2167
2168 static int adapter_indicators_set(struct kvm *kvm,
2169                                   struct s390_io_adapter *adapter,
2170                                   struct kvm_s390_adapter_int *adapter_int)
2171 {
2172         unsigned long bit;
2173         int summary_set, idx;
2174         struct s390_map_info *info;
2175         void *map;
2176
2177         info = get_map_info(adapter, adapter_int->ind_addr);
2178         if (!info)
2179                 return -1;
2180         map = page_address(info->page);
2181         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2182         set_bit(bit, map);
2183         idx = srcu_read_lock(&kvm->srcu);
2184         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2185         set_page_dirty_lock(info->page);
2186         info = get_map_info(adapter, adapter_int->summary_addr);
2187         if (!info) {
2188                 srcu_read_unlock(&kvm->srcu, idx);
2189                 return -1;
2190         }
2191         map = page_address(info->page);
2192         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2193                           adapter->swap);
2194         summary_set = test_and_set_bit(bit, map);
2195         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2196         set_page_dirty_lock(info->page);
2197         srcu_read_unlock(&kvm->srcu, idx);
2198         return summary_set ? 0 : 1;
2199 }
2200
2201 /*
2202  * < 0 - not injected due to error
2203  * = 0 - coalesced, summary indicator already active
2204  * > 0 - injected interrupt
2205  */
2206 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2207                            struct kvm *kvm, int irq_source_id, int level,
2208                            bool line_status)
2209 {
2210         int ret;
2211         struct s390_io_adapter *adapter;
2212
2213         /* We're only interested in the 0->1 transition. */
2214         if (!level)
2215                 return 0;
2216         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2217         if (!adapter)
2218                 return -1;
2219         down_read(&adapter->maps_lock);
2220         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2221         up_read(&adapter->maps_lock);
2222         if ((ret > 0) && !adapter->masked) {
2223                 struct kvm_s390_interrupt s390int = {
2224                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
2225                         .parm = 0,
2226                         .parm64 = (adapter->isc << 27) | 0x80000000,
2227                 };
2228                 ret = kvm_s390_inject_vm(kvm, &s390int);
2229                 if (ret == 0)
2230                         ret = 1;
2231         }
2232         return ret;
2233 }
2234
2235 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
2236                           const struct kvm_irq_routing_entry *ue)
2237 {
2238         int ret;
2239
2240         switch (ue->type) {
2241         case KVM_IRQ_ROUTING_S390_ADAPTER:
2242                 e->set = set_adapter_int;
2243                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2244                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2245                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2246                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2247                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2248                 ret = 0;
2249                 break;
2250         default:
2251                 ret = -EINVAL;
2252         }
2253
2254         return ret;
2255 }
2256
2257 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2258                 int irq_source_id, int level, bool line_status)
2259 {
2260         return -EINVAL;
2261 }
2262
2263 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2264 {
2265         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2266         struct kvm_s390_irq *buf;
2267         int r = 0;
2268         int n;
2269
2270         buf = vmalloc(len);
2271         if (!buf)
2272                 return -ENOMEM;
2273
2274         if (copy_from_user((void *) buf, irqstate, len)) {
2275                 r = -EFAULT;
2276                 goto out_free;
2277         }
2278
2279         /*
2280          * Don't allow setting the interrupt state
2281          * when there are already interrupts pending
2282          */
2283         spin_lock(&li->lock);
2284         if (li->pending_irqs) {
2285                 r = -EBUSY;
2286                 goto out_unlock;
2287         }
2288
2289         for (n = 0; n < len / sizeof(*buf); n++) {
2290                 r = do_inject_vcpu(vcpu, &buf[n]);
2291                 if (r)
2292                         break;
2293         }
2294
2295 out_unlock:
2296         spin_unlock(&li->lock);
2297 out_free:
2298         vfree(buf);
2299
2300         return r;
2301 }
2302
2303 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2304                             struct kvm_s390_irq *irq,
2305                             unsigned long irq_type)
2306 {
2307         switch (irq_type) {
2308         case IRQ_PEND_MCHK_EX:
2309         case IRQ_PEND_MCHK_REP:
2310                 irq->type = KVM_S390_MCHK;
2311                 irq->u.mchk = li->irq.mchk;
2312                 break;
2313         case IRQ_PEND_PROG:
2314                 irq->type = KVM_S390_PROGRAM_INT;
2315                 irq->u.pgm = li->irq.pgm;
2316                 break;
2317         case IRQ_PEND_PFAULT_INIT:
2318                 irq->type = KVM_S390_INT_PFAULT_INIT;
2319                 irq->u.ext = li->irq.ext;
2320                 break;
2321         case IRQ_PEND_EXT_EXTERNAL:
2322                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2323                 irq->u.extcall = li->irq.extcall;
2324                 break;
2325         case IRQ_PEND_EXT_CLOCK_COMP:
2326                 irq->type = KVM_S390_INT_CLOCK_COMP;
2327                 break;
2328         case IRQ_PEND_EXT_CPU_TIMER:
2329                 irq->type = KVM_S390_INT_CPU_TIMER;
2330                 break;
2331         case IRQ_PEND_SIGP_STOP:
2332                 irq->type = KVM_S390_SIGP_STOP;
2333                 irq->u.stop = li->irq.stop;
2334                 break;
2335         case IRQ_PEND_RESTART:
2336                 irq->type = KVM_S390_RESTART;
2337                 break;
2338         case IRQ_PEND_SET_PREFIX:
2339                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2340                 irq->u.prefix = li->irq.prefix;
2341                 break;
2342         }
2343 }
2344
2345 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2346 {
2347         int scn;
2348         unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2349         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2350         unsigned long pending_irqs;
2351         struct kvm_s390_irq irq;
2352         unsigned long irq_type;
2353         int cpuaddr;
2354         int n = 0;
2355
2356         spin_lock(&li->lock);
2357         pending_irqs = li->pending_irqs;
2358         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2359                sizeof(sigp_emerg_pending));
2360         spin_unlock(&li->lock);
2361
2362         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2363                 memset(&irq, 0, sizeof(irq));
2364                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2365                         continue;
2366                 if (n + sizeof(irq) > len)
2367                         return -ENOBUFS;
2368                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2369                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2370                         return -EFAULT;
2371                 n += sizeof(irq);
2372         }
2373
2374         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2375                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2376                         memset(&irq, 0, sizeof(irq));
2377                         if (n + sizeof(irq) > len)
2378                                 return -ENOBUFS;
2379                         irq.type = KVM_S390_INT_EMERGENCY;
2380                         irq.u.emerg.code = cpuaddr;
2381                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2382                                 return -EFAULT;
2383                         n += sizeof(irq);
2384                 }
2385         }
2386
2387         if (sca_ext_call_pending(vcpu, &scn)) {
2388                 if (n + sizeof(irq) > len)
2389                         return -ENOBUFS;
2390                 memset(&irq, 0, sizeof(irq));
2391                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2392                 irq.u.extcall.code = scn;
2393                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2394                         return -EFAULT;
2395                 n += sizeof(irq);
2396         }
2397
2398         return n;
2399 }