Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
[cascardo/linux.git] / arch / s390 / kvm / sigp.c
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24                         u64 *reg)
25 {
26         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27         int rc;
28
29         if (cpu_addr >= KVM_MAX_VCPUS)
30                 return SIGP_CC_NOT_OPERATIONAL;
31
32         spin_lock(&fi->lock);
33         if (fi->local_int[cpu_addr] == NULL)
34                 rc = SIGP_CC_NOT_OPERATIONAL;
35         else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36                    & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38         else {
39                 *reg &= 0xffffffff00000000UL;
40                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
41                     & CPUSTAT_ECALL_PEND)
42                         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
44                     & CPUSTAT_STOPPED)
45                         *reg |= SIGP_STATUS_STOPPED;
46                 rc = SIGP_CC_STATUS_STORED;
47         }
48         spin_unlock(&fi->lock);
49
50         VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51         return rc;
52 }
53
54 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55 {
56         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57         struct kvm_s390_local_interrupt *li;
58         struct kvm_s390_interrupt_info *inti;
59         int rc;
60
61         if (cpu_addr >= KVM_MAX_VCPUS)
62                 return SIGP_CC_NOT_OPERATIONAL;
63
64         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
65         if (!inti)
66                 return -ENOMEM;
67
68         inti->type = KVM_S390_INT_EMERGENCY;
69         inti->emerg.code = vcpu->vcpu_id;
70
71         spin_lock(&fi->lock);
72         li = fi->local_int[cpu_addr];
73         if (li == NULL) {
74                 rc = SIGP_CC_NOT_OPERATIONAL;
75                 kfree(inti);
76                 goto unlock;
77         }
78         spin_lock_bh(&li->lock);
79         list_add_tail(&inti->list, &li->list);
80         atomic_set(&li->active, 1);
81         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82         if (waitqueue_active(&li->wq))
83                 wake_up_interruptible(&li->wq);
84         spin_unlock_bh(&li->lock);
85         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86         VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87 unlock:
88         spin_unlock(&fi->lock);
89         return rc;
90 }
91
92 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
93 {
94         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
95         struct kvm_s390_local_interrupt *li;
96         struct kvm_s390_interrupt_info *inti;
97         int rc;
98
99         if (cpu_addr >= KVM_MAX_VCPUS)
100                 return SIGP_CC_NOT_OPERATIONAL;
101
102         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
103         if (!inti)
104                 return -ENOMEM;
105
106         inti->type = KVM_S390_INT_EXTERNAL_CALL;
107         inti->extcall.code = vcpu->vcpu_id;
108
109         spin_lock(&fi->lock);
110         li = fi->local_int[cpu_addr];
111         if (li == NULL) {
112                 rc = SIGP_CC_NOT_OPERATIONAL;
113                 kfree(inti);
114                 goto unlock;
115         }
116         spin_lock_bh(&li->lock);
117         list_add_tail(&inti->list, &li->list);
118         atomic_set(&li->active, 1);
119         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
120         if (waitqueue_active(&li->wq))
121                 wake_up_interruptible(&li->wq);
122         spin_unlock_bh(&li->lock);
123         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
124         VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
125 unlock:
126         spin_unlock(&fi->lock);
127         return rc;
128 }
129
130 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
131 {
132         struct kvm_s390_interrupt_info *inti;
133
134         inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
135         if (!inti)
136                 return -ENOMEM;
137         inti->type = KVM_S390_SIGP_STOP;
138
139         spin_lock_bh(&li->lock);
140         if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
141                 goto out;
142         list_add_tail(&inti->list, &li->list);
143         atomic_set(&li->active, 1);
144         atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
145         li->action_bits |= action;
146         if (waitqueue_active(&li->wq))
147                 wake_up_interruptible(&li->wq);
148 out:
149         spin_unlock_bh(&li->lock);
150
151         return SIGP_CC_ORDER_CODE_ACCEPTED;
152 }
153
154 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
155 {
156         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
157         struct kvm_s390_local_interrupt *li;
158         int rc;
159
160         if (cpu_addr >= KVM_MAX_VCPUS)
161                 return SIGP_CC_NOT_OPERATIONAL;
162
163         spin_lock(&fi->lock);
164         li = fi->local_int[cpu_addr];
165         if (li == NULL) {
166                 rc = SIGP_CC_NOT_OPERATIONAL;
167                 goto unlock;
168         }
169
170         rc = __inject_sigp_stop(li, action);
171
172 unlock:
173         spin_unlock(&fi->lock);
174         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
175         return rc;
176 }
177
178 int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
179 {
180         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
181         return __inject_sigp_stop(li, action);
182 }
183
184 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
185 {
186         int rc;
187
188         switch (parameter & 0xff) {
189         case 0:
190                 rc = SIGP_CC_NOT_OPERATIONAL;
191                 break;
192         case 1:
193         case 2:
194                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
195                 break;
196         default:
197                 rc = -EOPNOTSUPP;
198         }
199         return rc;
200 }
201
202 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
203                              u64 *reg)
204 {
205         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
206         struct kvm_s390_local_interrupt *li = NULL;
207         struct kvm_s390_interrupt_info *inti;
208         int rc;
209         u8 tmp;
210
211         /* make sure that the new value is valid memory */
212         address = address & 0x7fffe000u;
213         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
214            copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
215                 *reg &= 0xffffffff00000000UL;
216                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
217                 return SIGP_CC_STATUS_STORED;
218         }
219
220         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
221         if (!inti)
222                 return SIGP_CC_BUSY;
223
224         spin_lock(&fi->lock);
225         if (cpu_addr < KVM_MAX_VCPUS)
226                 li = fi->local_int[cpu_addr];
227
228         if (li == NULL) {
229                 *reg &= 0xffffffff00000000UL;
230                 *reg |= SIGP_STATUS_INCORRECT_STATE;
231                 rc = SIGP_CC_STATUS_STORED;
232                 kfree(inti);
233                 goto out_fi;
234         }
235
236         spin_lock_bh(&li->lock);
237         /* cpu must be in stopped state */
238         if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
239                 *reg &= 0xffffffff00000000UL;
240                 *reg |= SIGP_STATUS_INCORRECT_STATE;
241                 rc = SIGP_CC_STATUS_STORED;
242                 kfree(inti);
243                 goto out_li;
244         }
245
246         inti->type = KVM_S390_SIGP_SET_PREFIX;
247         inti->prefix.address = address;
248
249         list_add_tail(&inti->list, &li->list);
250         atomic_set(&li->active, 1);
251         if (waitqueue_active(&li->wq))
252                 wake_up_interruptible(&li->wq);
253         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
254
255         VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
256 out_li:
257         spin_unlock_bh(&li->lock);
258 out_fi:
259         spin_unlock(&fi->lock);
260         return rc;
261 }
262
263 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
264                                 u64 *reg)
265 {
266         int rc;
267         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
268
269         if (cpu_addr >= KVM_MAX_VCPUS)
270                 return SIGP_CC_NOT_OPERATIONAL;
271
272         spin_lock(&fi->lock);
273         if (fi->local_int[cpu_addr] == NULL)
274                 rc = SIGP_CC_NOT_OPERATIONAL;
275         else {
276                 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
277                     & CPUSTAT_RUNNING) {
278                         /* running */
279                         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
280                 } else {
281                         /* not running */
282                         *reg &= 0xffffffff00000000UL;
283                         *reg |= SIGP_STATUS_NOT_RUNNING;
284                         rc = SIGP_CC_STATUS_STORED;
285                 }
286         }
287         spin_unlock(&fi->lock);
288
289         VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
290                    rc);
291
292         return rc;
293 }
294
295 static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
296 {
297         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
298         struct kvm_s390_local_interrupt *li;
299         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
300
301         if (cpu_addr >= KVM_MAX_VCPUS)
302                 return SIGP_CC_NOT_OPERATIONAL;
303
304         spin_lock(&fi->lock);
305         li = fi->local_int[cpu_addr];
306         if (li == NULL) {
307                 rc = SIGP_CC_NOT_OPERATIONAL;
308                 goto out;
309         }
310
311         spin_lock_bh(&li->lock);
312         if (li->action_bits & ACTION_STOP_ON_STOP)
313                 rc = SIGP_CC_BUSY;
314         else
315                 VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
316                         cpu_addr);
317         spin_unlock_bh(&li->lock);
318 out:
319         spin_unlock(&fi->lock);
320         return rc;
321 }
322
323 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
324 {
325         int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
326         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
327         int base2 = vcpu->arch.sie_block->ipb >> 28;
328         int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
329         u32 parameter;
330         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
331         u8 order_code;
332         int rc;
333
334         /* sigp in userspace can exit */
335         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
336                 return kvm_s390_inject_program_int(vcpu,
337                                                    PGM_PRIVILEGED_OPERATION);
338
339         order_code = disp2;
340         if (base2)
341                 order_code += vcpu->run->s.regs.gprs[base2];
342
343         if (r1 % 2)
344                 parameter = vcpu->run->s.regs.gprs[r1];
345         else
346                 parameter = vcpu->run->s.regs.gprs[r1 + 1];
347
348         trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
349         switch (order_code) {
350         case SIGP_SENSE:
351                 vcpu->stat.instruction_sigp_sense++;
352                 rc = __sigp_sense(vcpu, cpu_addr,
353                                   &vcpu->run->s.regs.gprs[r1]);
354                 break;
355         case SIGP_EXTERNAL_CALL:
356                 vcpu->stat.instruction_sigp_external_call++;
357                 rc = __sigp_external_call(vcpu, cpu_addr);
358                 break;
359         case SIGP_EMERGENCY_SIGNAL:
360                 vcpu->stat.instruction_sigp_emergency++;
361                 rc = __sigp_emergency(vcpu, cpu_addr);
362                 break;
363         case SIGP_STOP:
364                 vcpu->stat.instruction_sigp_stop++;
365                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
366                 break;
367         case SIGP_STOP_AND_STORE_STATUS:
368                 vcpu->stat.instruction_sigp_stop++;
369                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
370                                                  ACTION_STOP_ON_STOP);
371                 break;
372         case SIGP_SET_ARCHITECTURE:
373                 vcpu->stat.instruction_sigp_arch++;
374                 rc = __sigp_set_arch(vcpu, parameter);
375                 break;
376         case SIGP_SET_PREFIX:
377                 vcpu->stat.instruction_sigp_prefix++;
378                 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
379                                        &vcpu->run->s.regs.gprs[r1]);
380                 break;
381         case SIGP_SENSE_RUNNING:
382                 vcpu->stat.instruction_sigp_sense_running++;
383                 rc = __sigp_sense_running(vcpu, cpu_addr,
384                                           &vcpu->run->s.regs.gprs[r1]);
385                 break;
386         case SIGP_RESTART:
387                 vcpu->stat.instruction_sigp_restart++;
388                 rc = __sigp_restart(vcpu, cpu_addr);
389                 if (rc == SIGP_CC_BUSY)
390                         break;
391                 /* user space must know about restart */
392         default:
393                 return -EOPNOTSUPP;
394         }
395
396         if (rc < 0)
397                 return rc;
398
399         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
400         vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
401         return 0;
402 }