KVM: s390: convert handle_store_prefix()
[cascardo/linux.git] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
29 #include "gaccess.h"
30 #include "kvm-s390.h"
31 #include "trace.h"
32
33 /* Handle SCK (SET CLOCK) interception */
34 static int handle_set_clock(struct kvm_vcpu *vcpu)
35 {
36         struct kvm_vcpu *cpup;
37         s64 hostclk, val;
38         int i, rc;
39         u64 op2;
40
41         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44         op2 = kvm_s390_get_base_disp_s(vcpu);
45         if (op2 & 7)    /* Operand must be on a doubleword boundary */
46                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47         rc = read_guest(vcpu, op2, &val, sizeof(val));
48         if (rc)
49                 return kvm_s390_inject_prog_cond(vcpu, rc);
50
51         if (store_tod_clock(&hostclk)) {
52                 kvm_s390_set_psw_cc(vcpu, 3);
53                 return 0;
54         }
55         val = (val - hostclk) & ~0x3fUL;
56
57         mutex_lock(&vcpu->kvm->lock);
58         kvm_for_each_vcpu(i, cpup, vcpu->kvm)
59                 cpup->arch.sie_block->epoch = val;
60         mutex_unlock(&vcpu->kvm->lock);
61
62         kvm_s390_set_psw_cc(vcpu, 0);
63         return 0;
64 }
65
66 static int handle_set_prefix(struct kvm_vcpu *vcpu)
67 {
68         u64 operand2;
69         u32 address;
70         int rc;
71
72         vcpu->stat.instruction_spx++;
73
74         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76
77         operand2 = kvm_s390_get_base_disp_s(vcpu);
78
79         /* must be word boundary */
80         if (operand2 & 3)
81                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82
83         /* get the value */
84         rc = read_guest(vcpu, operand2, &address, sizeof(address));
85         if (rc)
86                 return kvm_s390_inject_prog_cond(vcpu, rc);
87
88         address &= 0x7fffe000u;
89
90         /*
91          * Make sure the new value is valid memory. We only need to check the
92          * first page, since address is 8k aligned and memory pieces are always
93          * at least 1MB aligned and have at least a size of 1MB.
94          */
95         if (kvm_is_error_gpa(vcpu->kvm, address))
96                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
97
98         kvm_s390_set_prefix(vcpu, address);
99
100         VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
101         trace_kvm_s390_handle_prefix(vcpu, 1, address);
102         return 0;
103 }
104
105 static int handle_store_prefix(struct kvm_vcpu *vcpu)
106 {
107         u64 operand2;
108         u32 address;
109         int rc;
110
111         vcpu->stat.instruction_stpx++;
112
113         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
114                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
115
116         operand2 = kvm_s390_get_base_disp_s(vcpu);
117
118         /* must be word boundary */
119         if (operand2 & 3)
120                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
121
122         address = vcpu->arch.sie_block->prefix;
123         address = address & 0x7fffe000u;
124
125         /* get the value */
126         rc = write_guest(vcpu, operand2, &address, sizeof(address));
127         if (rc)
128                 return kvm_s390_inject_prog_cond(vcpu, rc);
129
130         VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
131         trace_kvm_s390_handle_prefix(vcpu, 0, address);
132         return 0;
133 }
134
135 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
136 {
137         u64 useraddr;
138
139         vcpu->stat.instruction_stap++;
140
141         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
142                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
143
144         useraddr = kvm_s390_get_base_disp_s(vcpu);
145
146         if (useraddr & 1)
147                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
148
149         if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
150                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
151
152         VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
153         trace_kvm_s390_handle_stap(vcpu, useraddr);
154         return 0;
155 }
156
157 static void __skey_check_enable(struct kvm_vcpu *vcpu)
158 {
159         if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
160                 return;
161
162         s390_enable_skey();
163         trace_kvm_s390_skey_related_inst(vcpu);
164         vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
165 }
166
167
168 static int handle_skey(struct kvm_vcpu *vcpu)
169 {
170         __skey_check_enable(vcpu);
171
172         vcpu->stat.instruction_storage_key++;
173
174         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
175                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
176
177         vcpu->arch.sie_block->gpsw.addr =
178                 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
179         VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
180         return 0;
181 }
182
183 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
184 {
185         psw_t *psw = &vcpu->arch.sie_block->gpsw;
186
187         vcpu->stat.instruction_ipte_interlock++;
188         if (psw_bits(*psw).p)
189                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
190         wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
191         psw->addr = __rewind_psw(*psw, 4);
192         VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
193         return 0;
194 }
195
196 static int handle_test_block(struct kvm_vcpu *vcpu)
197 {
198         unsigned long hva;
199         gpa_t addr;
200         int reg2;
201
202         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
203                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
204
205         kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
206         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
207         addr = kvm_s390_real_to_abs(vcpu, addr);
208
209         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
210         if (kvm_is_error_hva(hva))
211                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
212         /*
213          * We don't expect errors on modern systems, and do not care
214          * about storage keys (yet), so let's just clear the page.
215          */
216         if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
217                 return -EFAULT;
218         kvm_s390_set_psw_cc(vcpu, 0);
219         vcpu->run->s.regs.gprs[0] = 0;
220         return 0;
221 }
222
223 static int handle_tpi(struct kvm_vcpu *vcpu)
224 {
225         struct kvm_s390_interrupt_info *inti;
226         u64 addr;
227         int cc;
228
229         addr = kvm_s390_get_base_disp_s(vcpu);
230         if (addr & 3)
231                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
232         cc = 0;
233         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
234         if (!inti)
235                 goto no_interrupt;
236         cc = 1;
237         if (addr) {
238                 /*
239                  * Store the two-word I/O interruption code into the
240                  * provided area.
241                  */
242                 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
243                     || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
244                     || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
245                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
246         } else {
247                 /*
248                  * Store the three-word I/O interruption code into
249                  * the appropriate lowcore area.
250                  */
251                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
252                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
253                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
254                 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
255         }
256         kfree(inti);
257 no_interrupt:
258         /* Set condition code and we're done. */
259         kvm_s390_set_psw_cc(vcpu, cc);
260         return 0;
261 }
262
263 static int handle_tsch(struct kvm_vcpu *vcpu)
264 {
265         struct kvm_s390_interrupt_info *inti;
266
267         inti = kvm_s390_get_io_int(vcpu->kvm, 0,
268                                    vcpu->run->s.regs.gprs[1]);
269
270         /*
271          * Prepare exit to userspace.
272          * We indicate whether we dequeued a pending I/O interrupt
273          * so that userspace can re-inject it if the instruction gets
274          * a program check. While this may re-order the pending I/O
275          * interrupts, this is no problem since the priority is kept
276          * intact.
277          */
278         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
279         vcpu->run->s390_tsch.dequeued = !!inti;
280         if (inti) {
281                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
282                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
283                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
284                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
285         }
286         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
287         kfree(inti);
288         return -EREMOTE;
289 }
290
291 static int handle_io_inst(struct kvm_vcpu *vcpu)
292 {
293         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
294
295         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
296                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
297
298         if (vcpu->kvm->arch.css_support) {
299                 /*
300                  * Most I/O instructions will be handled by userspace.
301                  * Exceptions are tpi and the interrupt portion of tsch.
302                  */
303                 if (vcpu->arch.sie_block->ipa == 0xb236)
304                         return handle_tpi(vcpu);
305                 if (vcpu->arch.sie_block->ipa == 0xb235)
306                         return handle_tsch(vcpu);
307                 /* Handle in userspace. */
308                 return -EOPNOTSUPP;
309         } else {
310                 /*
311                  * Set condition code 3 to stop the guest from issuing channel
312                  * I/O instructions.
313                  */
314                 kvm_s390_set_psw_cc(vcpu, 3);
315                 return 0;
316         }
317 }
318
319 static int handle_stfl(struct kvm_vcpu *vcpu)
320 {
321         int rc;
322
323         vcpu->stat.instruction_stfl++;
324
325         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
326                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
327
328         rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
329                             vfacilities, 4);
330         if (rc)
331                 return rc;
332         VCPU_EVENT(vcpu, 5, "store facility list value %x",
333                    *(unsigned int *) vfacilities);
334         trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
335         return 0;
336 }
337
338 static void handle_new_psw(struct kvm_vcpu *vcpu)
339 {
340         /* Check whether the new psw is enabled for machine checks. */
341         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
342                 kvm_s390_deliver_pending_machine_checks(vcpu);
343 }
344
345 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
346 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
347 #define PSW_ADDR_24 0x0000000000ffffffUL
348 #define PSW_ADDR_31 0x000000007fffffffUL
349
350 static int is_valid_psw(psw_t *psw) {
351         if (psw->mask & PSW_MASK_UNASSIGNED)
352                 return 0;
353         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
354                 if (psw->addr & ~PSW_ADDR_31)
355                         return 0;
356         }
357         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
358                 return 0;
359         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
360                 return 0;
361         return 1;
362 }
363
364 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
365 {
366         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
367         psw_compat_t new_psw;
368         u64 addr;
369
370         if (gpsw->mask & PSW_MASK_PSTATE)
371                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
372
373         addr = kvm_s390_get_base_disp_s(vcpu);
374         if (addr & 7)
375                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
376         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
377                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
378         if (!(new_psw.mask & PSW32_MASK_BASE))
379                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
380         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
381         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
382         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
383         if (!is_valid_psw(gpsw))
384                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
385         handle_new_psw(vcpu);
386         return 0;
387 }
388
389 static int handle_lpswe(struct kvm_vcpu *vcpu)
390 {
391         psw_t new_psw;
392         u64 addr;
393
394         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
395                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
396
397         addr = kvm_s390_get_base_disp_s(vcpu);
398         if (addr & 7)
399                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
400         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
401                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
402         vcpu->arch.sie_block->gpsw = new_psw;
403         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
404                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
405         handle_new_psw(vcpu);
406         return 0;
407 }
408
409 static int handle_stidp(struct kvm_vcpu *vcpu)
410 {
411         u64 operand2;
412
413         vcpu->stat.instruction_stidp++;
414
415         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
416                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
417
418         operand2 = kvm_s390_get_base_disp_s(vcpu);
419
420         if (operand2 & 7)
421                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
422
423         if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
424                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
425
426         VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
427         return 0;
428 }
429
430 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
431 {
432         int cpus = 0;
433         int n;
434
435         cpus = atomic_read(&vcpu->kvm->online_vcpus);
436
437         /* deal with other level 3 hypervisors */
438         if (stsi(mem, 3, 2, 2))
439                 mem->count = 0;
440         if (mem->count < 8)
441                 mem->count++;
442         for (n = mem->count - 1; n > 0 ; n--)
443                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
444
445         mem->vm[0].cpus_total = cpus;
446         mem->vm[0].cpus_configured = cpus;
447         mem->vm[0].cpus_standby = 0;
448         mem->vm[0].cpus_reserved = 0;
449         mem->vm[0].caf = 1000;
450         memcpy(mem->vm[0].name, "KVMguest", 8);
451         ASCEBC(mem->vm[0].name, 8);
452         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
453         ASCEBC(mem->vm[0].cpi, 16);
454 }
455
456 static int handle_stsi(struct kvm_vcpu *vcpu)
457 {
458         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
459         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
460         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
461         unsigned long mem = 0;
462         u64 operand2;
463         int rc = 0;
464
465         vcpu->stat.instruction_stsi++;
466         VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
467
468         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
469                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
470
471         if (fc > 3) {
472                 kvm_s390_set_psw_cc(vcpu, 3);
473                 return 0;
474         }
475
476         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
477             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
478                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
479
480         if (fc == 0) {
481                 vcpu->run->s.regs.gprs[0] = 3 << 28;
482                 kvm_s390_set_psw_cc(vcpu, 0);
483                 return 0;
484         }
485
486         operand2 = kvm_s390_get_base_disp_s(vcpu);
487
488         if (operand2 & 0xfff)
489                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
490
491         switch (fc) {
492         case 1: /* same handling for 1 and 2 */
493         case 2:
494                 mem = get_zeroed_page(GFP_KERNEL);
495                 if (!mem)
496                         goto out_no_data;
497                 if (stsi((void *) mem, fc, sel1, sel2))
498                         goto out_no_data;
499                 break;
500         case 3:
501                 if (sel1 != 2 || sel2 != 2)
502                         goto out_no_data;
503                 mem = get_zeroed_page(GFP_KERNEL);
504                 if (!mem)
505                         goto out_no_data;
506                 handle_stsi_3_2_2(vcpu, (void *) mem);
507                 break;
508         }
509
510         if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
511                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
512                 goto out_exception;
513         }
514         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
515         free_page(mem);
516         kvm_s390_set_psw_cc(vcpu, 0);
517         vcpu->run->s.regs.gprs[0] = 0;
518         return 0;
519 out_no_data:
520         kvm_s390_set_psw_cc(vcpu, 3);
521 out_exception:
522         free_page(mem);
523         return rc;
524 }
525
526 static const intercept_handler_t b2_handlers[256] = {
527         [0x02] = handle_stidp,
528         [0x04] = handle_set_clock,
529         [0x10] = handle_set_prefix,
530         [0x11] = handle_store_prefix,
531         [0x12] = handle_store_cpu_address,
532         [0x21] = handle_ipte_interlock,
533         [0x29] = handle_skey,
534         [0x2a] = handle_skey,
535         [0x2b] = handle_skey,
536         [0x2c] = handle_test_block,
537         [0x30] = handle_io_inst,
538         [0x31] = handle_io_inst,
539         [0x32] = handle_io_inst,
540         [0x33] = handle_io_inst,
541         [0x34] = handle_io_inst,
542         [0x35] = handle_io_inst,
543         [0x36] = handle_io_inst,
544         [0x37] = handle_io_inst,
545         [0x38] = handle_io_inst,
546         [0x39] = handle_io_inst,
547         [0x3a] = handle_io_inst,
548         [0x3b] = handle_io_inst,
549         [0x3c] = handle_io_inst,
550         [0x50] = handle_ipte_interlock,
551         [0x5f] = handle_io_inst,
552         [0x74] = handle_io_inst,
553         [0x76] = handle_io_inst,
554         [0x7d] = handle_stsi,
555         [0xb1] = handle_stfl,
556         [0xb2] = handle_lpswe,
557 };
558
559 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
560 {
561         intercept_handler_t handler;
562
563         /*
564          * A lot of B2 instructions are priviledged. Here we check for
565          * the privileged ones, that we can handle in the kernel.
566          * Anything else goes to userspace.
567          */
568         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
569         if (handler)
570                 return handler(vcpu);
571
572         return -EOPNOTSUPP;
573 }
574
575 static int handle_epsw(struct kvm_vcpu *vcpu)
576 {
577         int reg1, reg2;
578
579         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
580
581         /* This basically extracts the mask half of the psw. */
582         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
583         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
584         if (reg2) {
585                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
586                 vcpu->run->s.regs.gprs[reg2] |=
587                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
588         }
589         return 0;
590 }
591
592 #define PFMF_RESERVED   0xfffc0101UL
593 #define PFMF_SK         0x00020000UL
594 #define PFMF_CF         0x00010000UL
595 #define PFMF_UI         0x00008000UL
596 #define PFMF_FSC        0x00007000UL
597 #define PFMF_NQ         0x00000800UL
598 #define PFMF_MR         0x00000400UL
599 #define PFMF_MC         0x00000200UL
600 #define PFMF_KEY        0x000000feUL
601
602 static int handle_pfmf(struct kvm_vcpu *vcpu)
603 {
604         int reg1, reg2;
605         unsigned long start, end;
606
607         vcpu->stat.instruction_pfmf++;
608
609         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
610
611         if (!MACHINE_HAS_PFMF)
612                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
613
614         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
615                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
616
617         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
618                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
619
620         /* Only provide non-quiescing support if the host supports it */
621         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
622                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
623
624         /* No support for conditional-SSKE */
625         if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
626                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
627
628         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
629         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
630         case 0x00000000:
631                 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
632                 break;
633         case 0x00001000:
634                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
635                 break;
636         /* We dont support EDAT2
637         case 0x00002000:
638                 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
639                 break;*/
640         default:
641                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
642         }
643         while (start < end) {
644                 unsigned long useraddr;
645
646                 useraddr = gmap_translate(start, vcpu->arch.gmap);
647                 if (IS_ERR((void *)useraddr))
648                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
649
650                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
651                         if (clear_user((void __user *)useraddr, PAGE_SIZE))
652                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
653                 }
654
655                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
656                         __skey_check_enable(vcpu);
657                         if (set_guest_storage_key(current->mm, useraddr,
658                                         vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
659                                         vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
660                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
661                 }
662
663                 start += PAGE_SIZE;
664         }
665         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
666                 vcpu->run->s.regs.gprs[reg2] = end;
667         return 0;
668 }
669
670 static int handle_essa(struct kvm_vcpu *vcpu)
671 {
672         /* entries expected to be 1FF */
673         int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
674         unsigned long *cbrlo, cbrle;
675         struct gmap *gmap;
676         int i;
677
678         VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
679         gmap = vcpu->arch.gmap;
680         vcpu->stat.instruction_essa++;
681         if (!kvm_s390_cmma_enabled(vcpu->kvm))
682                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
683
684         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
685                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
686
687         if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
688                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
689
690         /* Rewind PSW to repeat the ESSA instruction */
691         vcpu->arch.sie_block->gpsw.addr =
692                 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
693         vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
694         cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
695         down_read(&gmap->mm->mmap_sem);
696         for (i = 0; i < entries; ++i) {
697                 cbrle = cbrlo[i];
698                 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
699                         /* invalid entry */
700                         break;
701                 /* try to free backing */
702                 __gmap_zap(cbrle, gmap);
703         }
704         up_read(&gmap->mm->mmap_sem);
705         if (i < entries)
706                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
707         return 0;
708 }
709
710 static const intercept_handler_t b9_handlers[256] = {
711         [0x8a] = handle_ipte_interlock,
712         [0x8d] = handle_epsw,
713         [0x8e] = handle_ipte_interlock,
714         [0x8f] = handle_ipte_interlock,
715         [0xab] = handle_essa,
716         [0xaf] = handle_pfmf,
717 };
718
719 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
720 {
721         intercept_handler_t handler;
722
723         /* This is handled just as for the B2 instructions. */
724         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
725         if (handler)
726                 return handler(vcpu);
727
728         return -EOPNOTSUPP;
729 }
730
731 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
732 {
733         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
734         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
735         u64 useraddr;
736         u32 val = 0;
737         int reg, rc;
738
739         vcpu->stat.instruction_lctl++;
740
741         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
742                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
743
744         useraddr = kvm_s390_get_base_disp_rs(vcpu);
745
746         if (useraddr & 3)
747                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
748
749         VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
750                    useraddr);
751         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
752
753         reg = reg1;
754         do {
755                 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
756                 if (rc)
757                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
758                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
759                 vcpu->arch.sie_block->gcr[reg] |= val;
760                 useraddr += 4;
761                 if (reg == reg3)
762                         break;
763                 reg = (reg + 1) % 16;
764         } while (1);
765
766         return 0;
767 }
768
769 static int handle_lctlg(struct kvm_vcpu *vcpu)
770 {
771         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
772         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
773         u64 useraddr;
774         int reg, rc;
775
776         vcpu->stat.instruction_lctlg++;
777
778         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
779                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
780
781         useraddr = kvm_s390_get_base_disp_rsy(vcpu);
782
783         if (useraddr & 7)
784                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
785
786         reg = reg1;
787
788         VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
789                    useraddr);
790         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
791
792         do {
793                 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
794                                (u64 __user *) useraddr);
795                 if (rc)
796                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
797                 useraddr += 8;
798                 if (reg == reg3)
799                         break;
800                 reg = (reg + 1) % 16;
801         } while (1);
802
803         return 0;
804 }
805
806 static const intercept_handler_t eb_handlers[256] = {
807         [0x2f] = handle_lctlg,
808 };
809
810 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
811 {
812         intercept_handler_t handler;
813
814         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
815         if (handler)
816                 return handler(vcpu);
817         return -EOPNOTSUPP;
818 }
819
820 static int handle_tprot(struct kvm_vcpu *vcpu)
821 {
822         u64 address1, address2;
823         struct vm_area_struct *vma;
824         unsigned long user_address;
825
826         vcpu->stat.instruction_tprot++;
827
828         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
829                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
830
831         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
832
833         /* we only handle the Linux memory detection case:
834          * access key == 0
835          * guest DAT == off
836          * everything else goes to userspace. */
837         if (address2 & 0xf0)
838                 return -EOPNOTSUPP;
839         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
840                 return -EOPNOTSUPP;
841
842         down_read(&current->mm->mmap_sem);
843         user_address = __gmap_translate(address1, vcpu->arch.gmap);
844         if (IS_ERR_VALUE(user_address))
845                 goto out_inject;
846         vma = find_vma(current->mm, user_address);
847         if (!vma)
848                 goto out_inject;
849         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
850         if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
851                 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
852         if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
853                 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
854
855         up_read(&current->mm->mmap_sem);
856         return 0;
857
858 out_inject:
859         up_read(&current->mm->mmap_sem);
860         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
861 }
862
863 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
864 {
865         /* For e5xx... instructions we only handle TPROT */
866         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
867                 return handle_tprot(vcpu);
868         return -EOPNOTSUPP;
869 }
870
871 static int handle_sckpf(struct kvm_vcpu *vcpu)
872 {
873         u32 value;
874
875         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
876                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
877
878         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
879                 return kvm_s390_inject_program_int(vcpu,
880                                                    PGM_SPECIFICATION);
881
882         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
883         vcpu->arch.sie_block->todpr = value;
884
885         return 0;
886 }
887
888 static const intercept_handler_t x01_handlers[256] = {
889         [0x07] = handle_sckpf,
890 };
891
892 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
893 {
894         intercept_handler_t handler;
895
896         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
897         if (handler)
898                 return handler(vcpu);
899         return -EOPNOTSUPP;
900 }