Merge tag 'kvm-arm-for-v4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[cascardo/linux.git] / arch / arm / kvm / arm.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/cpu_pm.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/kvm_host.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/fs.h>
27 #include <linux/mman.h>
28 #include <linux/sched.h>
29 #include <linux/kvm.h>
30 #include <trace/events/kvm.h>
31 #include <kvm/arm_pmu.h>
32
33 #define CREATE_TRACE_POINTS
34 #include "trace.h"
35
36 #include <asm/uaccess.h>
37 #include <asm/ptrace.h>
38 #include <asm/mman.h>
39 #include <asm/tlbflush.h>
40 #include <asm/cacheflush.h>
41 #include <asm/virt.h>
42 #include <asm/kvm_arm.h>
43 #include <asm/kvm_asm.h>
44 #include <asm/kvm_mmu.h>
45 #include <asm/kvm_emulate.h>
46 #include <asm/kvm_coproc.h>
47 #include <asm/kvm_psci.h>
48 #include <asm/sections.h>
49
50 #ifdef REQUIRES_VIRT
51 __asm__(".arch_extension        virt");
52 #endif
53
54 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
55 static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
56 static unsigned long hyp_default_vectors;
57
58 /* Per-CPU variable containing the currently running vcpu. */
59 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
60
61 /* The VMID used in the VTTBR */
62 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
63 static u32 kvm_next_vmid;
64 static unsigned int kvm_vmid_bits __read_mostly;
65 static DEFINE_SPINLOCK(kvm_vmid_lock);
66
67 static bool vgic_present;
68
69 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
70
71 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
72 {
73         BUG_ON(preemptible());
74         __this_cpu_write(kvm_arm_running_vcpu, vcpu);
75 }
76
77 /**
78  * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
79  * Must be called from non-preemptible context
80  */
81 struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
82 {
83         BUG_ON(preemptible());
84         return __this_cpu_read(kvm_arm_running_vcpu);
85 }
86
87 /**
88  * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
89  */
90 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
91 {
92         return &kvm_arm_running_vcpu;
93 }
94
95 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
96 {
97         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
98 }
99
100 int kvm_arch_hardware_setup(void)
101 {
102         return 0;
103 }
104
105 void kvm_arch_check_processor_compat(void *rtn)
106 {
107         *(int *)rtn = 0;
108 }
109
110
111 /**
112  * kvm_arch_init_vm - initializes a VM data structure
113  * @kvm:        pointer to the KVM struct
114  */
115 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
116 {
117         int ret = 0;
118
119         if (type)
120                 return -EINVAL;
121
122         ret = kvm_alloc_stage2_pgd(kvm);
123         if (ret)
124                 goto out_fail_alloc;
125
126         ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
127         if (ret)
128                 goto out_free_stage2_pgd;
129
130         kvm_vgic_early_init(kvm);
131         kvm_timer_init(kvm);
132
133         /* Mark the initial VMID generation invalid */
134         kvm->arch.vmid_gen = 0;
135
136         /* The maximum number of VCPUs is limited by the host's GIC model */
137         kvm->arch.max_vcpus = vgic_present ?
138                                 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
139
140         return ret;
141 out_free_stage2_pgd:
142         kvm_free_stage2_pgd(kvm);
143 out_fail_alloc:
144         return ret;
145 }
146
147 bool kvm_arch_has_vcpu_debugfs(void)
148 {
149         return false;
150 }
151
152 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
153 {
154         return 0;
155 }
156
157 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
158 {
159         return VM_FAULT_SIGBUS;
160 }
161
162
163 /**
164  * kvm_arch_destroy_vm - destroy the VM data structure
165  * @kvm:        pointer to the KVM struct
166  */
167 void kvm_arch_destroy_vm(struct kvm *kvm)
168 {
169         int i;
170
171         kvm_free_stage2_pgd(kvm);
172
173         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
174                 if (kvm->vcpus[i]) {
175                         kvm_arch_vcpu_free(kvm->vcpus[i]);
176                         kvm->vcpus[i] = NULL;
177                 }
178         }
179
180         kvm_vgic_destroy(kvm);
181 }
182
183 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
184 {
185         int r;
186         switch (ext) {
187         case KVM_CAP_IRQCHIP:
188                 r = vgic_present;
189                 break;
190         case KVM_CAP_IOEVENTFD:
191         case KVM_CAP_DEVICE_CTRL:
192         case KVM_CAP_USER_MEMORY:
193         case KVM_CAP_SYNC_MMU:
194         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
195         case KVM_CAP_ONE_REG:
196         case KVM_CAP_ARM_PSCI:
197         case KVM_CAP_ARM_PSCI_0_2:
198         case KVM_CAP_READONLY_MEM:
199         case KVM_CAP_MP_STATE:
200                 r = 1;
201                 break;
202         case KVM_CAP_COALESCED_MMIO:
203                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
204                 break;
205         case KVM_CAP_ARM_SET_DEVICE_ADDR:
206                 r = 1;
207                 break;
208         case KVM_CAP_NR_VCPUS:
209                 r = num_online_cpus();
210                 break;
211         case KVM_CAP_MAX_VCPUS:
212                 r = KVM_MAX_VCPUS;
213                 break;
214         default:
215                 r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
216                 break;
217         }
218         return r;
219 }
220
221 long kvm_arch_dev_ioctl(struct file *filp,
222                         unsigned int ioctl, unsigned long arg)
223 {
224         return -EINVAL;
225 }
226
227
228 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
229 {
230         int err;
231         struct kvm_vcpu *vcpu;
232
233         if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
234                 err = -EBUSY;
235                 goto out;
236         }
237
238         if (id >= kvm->arch.max_vcpus) {
239                 err = -EINVAL;
240                 goto out;
241         }
242
243         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
244         if (!vcpu) {
245                 err = -ENOMEM;
246                 goto out;
247         }
248
249         err = kvm_vcpu_init(vcpu, kvm, id);
250         if (err)
251                 goto free_vcpu;
252
253         err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
254         if (err)
255                 goto vcpu_uninit;
256
257         return vcpu;
258 vcpu_uninit:
259         kvm_vcpu_uninit(vcpu);
260 free_vcpu:
261         kmem_cache_free(kvm_vcpu_cache, vcpu);
262 out:
263         return ERR_PTR(err);
264 }
265
266 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
267 {
268         kvm_vgic_vcpu_early_init(vcpu);
269 }
270
271 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
272 {
273         kvm_mmu_free_memory_caches(vcpu);
274         kvm_timer_vcpu_terminate(vcpu);
275         kvm_vgic_vcpu_destroy(vcpu);
276         kvm_pmu_vcpu_destroy(vcpu);
277         kvm_vcpu_uninit(vcpu);
278         kmem_cache_free(kvm_vcpu_cache, vcpu);
279 }
280
281 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
282 {
283         kvm_arch_vcpu_free(vcpu);
284 }
285
286 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
287 {
288         return kvm_timer_should_fire(vcpu);
289 }
290
291 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
292 {
293         kvm_timer_schedule(vcpu);
294 }
295
296 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
297 {
298         kvm_timer_unschedule(vcpu);
299 }
300
301 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
302 {
303         /* Force users to call KVM_ARM_VCPU_INIT */
304         vcpu->arch.target = -1;
305         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
306
307         /* Set up the timer */
308         kvm_timer_vcpu_init(vcpu);
309
310         kvm_arm_reset_debug_ptr(vcpu);
311
312         return 0;
313 }
314
315 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
316 {
317         vcpu->cpu = cpu;
318         vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
319
320         kvm_arm_set_running_vcpu(vcpu);
321 }
322
323 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
324 {
325         /*
326          * The arch-generic KVM code expects the cpu field of a vcpu to be -1
327          * if the vcpu is no longer assigned to a cpu.  This is used for the
328          * optimized make_all_cpus_request path.
329          */
330         vcpu->cpu = -1;
331
332         kvm_arm_set_running_vcpu(NULL);
333         kvm_timer_vcpu_put(vcpu);
334 }
335
336 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
337                                     struct kvm_mp_state *mp_state)
338 {
339         if (vcpu->arch.power_off)
340                 mp_state->mp_state = KVM_MP_STATE_STOPPED;
341         else
342                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
343
344         return 0;
345 }
346
347 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
348                                     struct kvm_mp_state *mp_state)
349 {
350         switch (mp_state->mp_state) {
351         case KVM_MP_STATE_RUNNABLE:
352                 vcpu->arch.power_off = false;
353                 break;
354         case KVM_MP_STATE_STOPPED:
355                 vcpu->arch.power_off = true;
356                 break;
357         default:
358                 return -EINVAL;
359         }
360
361         return 0;
362 }
363
364 /**
365  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
366  * @v:          The VCPU pointer
367  *
368  * If the guest CPU is not waiting for interrupts or an interrupt line is
369  * asserted, the CPU is by definition runnable.
370  */
371 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
372 {
373         return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
374                 && !v->arch.power_off && !v->arch.pause);
375 }
376
377 /* Just ensure a guest exit from a particular CPU */
378 static void exit_vm_noop(void *info)
379 {
380 }
381
382 void force_vm_exit(const cpumask_t *mask)
383 {
384         preempt_disable();
385         smp_call_function_many(mask, exit_vm_noop, NULL, true);
386         preempt_enable();
387 }
388
389 /**
390  * need_new_vmid_gen - check that the VMID is still valid
391  * @kvm: The VM's VMID to check
392  *
393  * return true if there is a new generation of VMIDs being used
394  *
395  * The hardware supports only 256 values with the value zero reserved for the
396  * host, so we check if an assigned value belongs to a previous generation,
397  * which which requires us to assign a new value. If we're the first to use a
398  * VMID for the new generation, we must flush necessary caches and TLBs on all
399  * CPUs.
400  */
401 static bool need_new_vmid_gen(struct kvm *kvm)
402 {
403         return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
404 }
405
406 /**
407  * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
408  * @kvm The guest that we are about to run
409  *
410  * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
411  * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
412  * caches and TLBs.
413  */
414 static void update_vttbr(struct kvm *kvm)
415 {
416         phys_addr_t pgd_phys;
417         u64 vmid;
418
419         if (!need_new_vmid_gen(kvm))
420                 return;
421
422         spin_lock(&kvm_vmid_lock);
423
424         /*
425          * We need to re-check the vmid_gen here to ensure that if another vcpu
426          * already allocated a valid vmid for this vm, then this vcpu should
427          * use the same vmid.
428          */
429         if (!need_new_vmid_gen(kvm)) {
430                 spin_unlock(&kvm_vmid_lock);
431                 return;
432         }
433
434         /* First user of a new VMID generation? */
435         if (unlikely(kvm_next_vmid == 0)) {
436                 atomic64_inc(&kvm_vmid_gen);
437                 kvm_next_vmid = 1;
438
439                 /*
440                  * On SMP we know no other CPUs can use this CPU's or each
441                  * other's VMID after force_vm_exit returns since the
442                  * kvm_vmid_lock blocks them from reentry to the guest.
443                  */
444                 force_vm_exit(cpu_all_mask);
445                 /*
446                  * Now broadcast TLB + ICACHE invalidation over the inner
447                  * shareable domain to make sure all data structures are
448                  * clean.
449                  */
450                 kvm_call_hyp(__kvm_flush_vm_context);
451         }
452
453         kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
454         kvm->arch.vmid = kvm_next_vmid;
455         kvm_next_vmid++;
456         kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
457
458         /* update vttbr to be used with the new vmid */
459         pgd_phys = virt_to_phys(kvm->arch.pgd);
460         BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
461         vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
462         kvm->arch.vttbr = pgd_phys | vmid;
463
464         spin_unlock(&kvm_vmid_lock);
465 }
466
467 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
468 {
469         struct kvm *kvm = vcpu->kvm;
470         int ret = 0;
471
472         if (likely(vcpu->arch.has_run_once))
473                 return 0;
474
475         vcpu->arch.has_run_once = true;
476
477         /*
478          * Map the VGIC hardware resources before running a vcpu the first
479          * time on this VM.
480          */
481         if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
482                 ret = kvm_vgic_map_resources(kvm);
483                 if (ret)
484                         return ret;
485         }
486
487         /*
488          * Enable the arch timers only if we have an in-kernel VGIC
489          * and it has been properly initialized, since we cannot handle
490          * interrupts from the virtual timer with a userspace gic.
491          */
492         if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
493                 ret = kvm_timer_enable(vcpu);
494
495         return ret;
496 }
497
498 bool kvm_arch_intc_initialized(struct kvm *kvm)
499 {
500         return vgic_initialized(kvm);
501 }
502
503 void kvm_arm_halt_guest(struct kvm *kvm)
504 {
505         int i;
506         struct kvm_vcpu *vcpu;
507
508         kvm_for_each_vcpu(i, vcpu, kvm)
509                 vcpu->arch.pause = true;
510         kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
511 }
512
513 void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
514 {
515         vcpu->arch.pause = true;
516         kvm_vcpu_kick(vcpu);
517 }
518
519 void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
520 {
521         struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
522
523         vcpu->arch.pause = false;
524         swake_up(wq);
525 }
526
527 void kvm_arm_resume_guest(struct kvm *kvm)
528 {
529         int i;
530         struct kvm_vcpu *vcpu;
531
532         kvm_for_each_vcpu(i, vcpu, kvm)
533                 kvm_arm_resume_vcpu(vcpu);
534 }
535
536 static void vcpu_sleep(struct kvm_vcpu *vcpu)
537 {
538         struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
539
540         swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
541                                        (!vcpu->arch.pause)));
542 }
543
544 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
545 {
546         return vcpu->arch.target >= 0;
547 }
548
549 /**
550  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
551  * @vcpu:       The VCPU pointer
552  * @run:        The kvm_run structure pointer used for userspace state exchange
553  *
554  * This function is called through the VCPU_RUN ioctl called from user space. It
555  * will execute VM code in a loop until the time slice for the process is used
556  * or some emulation is needed from user space in which case the function will
557  * return with return value 0 and with the kvm_run structure filled in with the
558  * required data for the requested emulation.
559  */
560 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
561 {
562         int ret;
563         sigset_t sigsaved;
564
565         if (unlikely(!kvm_vcpu_initialized(vcpu)))
566                 return -ENOEXEC;
567
568         ret = kvm_vcpu_first_run_init(vcpu);
569         if (ret)
570                 return ret;
571
572         if (run->exit_reason == KVM_EXIT_MMIO) {
573                 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
574                 if (ret)
575                         return ret;
576         }
577
578         if (vcpu->sigset_active)
579                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
580
581         ret = 1;
582         run->exit_reason = KVM_EXIT_UNKNOWN;
583         while (ret > 0) {
584                 /*
585                  * Check conditions before entering the guest
586                  */
587                 cond_resched();
588
589                 update_vttbr(vcpu->kvm);
590
591                 if (vcpu->arch.power_off || vcpu->arch.pause)
592                         vcpu_sleep(vcpu);
593
594                 /*
595                  * Preparing the interrupts to be injected also
596                  * involves poking the GIC, which must be done in a
597                  * non-preemptible context.
598                  */
599                 preempt_disable();
600                 kvm_pmu_flush_hwstate(vcpu);
601                 kvm_timer_flush_hwstate(vcpu);
602                 kvm_vgic_flush_hwstate(vcpu);
603
604                 local_irq_disable();
605
606                 /*
607                  * Re-check atomic conditions
608                  */
609                 if (signal_pending(current)) {
610                         ret = -EINTR;
611                         run->exit_reason = KVM_EXIT_INTR;
612                 }
613
614                 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
615                         vcpu->arch.power_off || vcpu->arch.pause) {
616                         local_irq_enable();
617                         kvm_pmu_sync_hwstate(vcpu);
618                         kvm_timer_sync_hwstate(vcpu);
619                         kvm_vgic_sync_hwstate(vcpu);
620                         preempt_enable();
621                         continue;
622                 }
623
624                 kvm_arm_setup_debug(vcpu);
625
626                 /**************************************************************
627                  * Enter the guest
628                  */
629                 trace_kvm_entry(*vcpu_pc(vcpu));
630                 guest_enter_irqoff();
631                 vcpu->mode = IN_GUEST_MODE;
632
633                 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
634
635                 vcpu->mode = OUTSIDE_GUEST_MODE;
636                 vcpu->stat.exits++;
637                 /*
638                  * Back from guest
639                  *************************************************************/
640
641                 kvm_arm_clear_debug(vcpu);
642
643                 /*
644                  * We may have taken a host interrupt in HYP mode (ie
645                  * while executing the guest). This interrupt is still
646                  * pending, as we haven't serviced it yet!
647                  *
648                  * We're now back in SVC mode, with interrupts
649                  * disabled.  Enabling the interrupts now will have
650                  * the effect of taking the interrupt again, in SVC
651                  * mode this time.
652                  */
653                 local_irq_enable();
654
655                 /*
656                  * We do local_irq_enable() before calling guest_exit() so
657                  * that if a timer interrupt hits while running the guest we
658                  * account that tick as being spent in the guest.  We enable
659                  * preemption after calling guest_exit() so that if we get
660                  * preempted we make sure ticks after that is not counted as
661                  * guest time.
662                  */
663                 guest_exit();
664                 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
665
666                 /*
667                  * We must sync the PMU and timer state before the vgic state so
668                  * that the vgic can properly sample the updated state of the
669                  * interrupt line.
670                  */
671                 kvm_pmu_sync_hwstate(vcpu);
672                 kvm_timer_sync_hwstate(vcpu);
673
674                 kvm_vgic_sync_hwstate(vcpu);
675
676                 preempt_enable();
677
678                 ret = handle_exit(vcpu, run, ret);
679         }
680
681         if (vcpu->sigset_active)
682                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
683         return ret;
684 }
685
686 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
687 {
688         int bit_index;
689         bool set;
690         unsigned long *ptr;
691
692         if (number == KVM_ARM_IRQ_CPU_IRQ)
693                 bit_index = __ffs(HCR_VI);
694         else /* KVM_ARM_IRQ_CPU_FIQ */
695                 bit_index = __ffs(HCR_VF);
696
697         ptr = (unsigned long *)&vcpu->arch.irq_lines;
698         if (level)
699                 set = test_and_set_bit(bit_index, ptr);
700         else
701                 set = test_and_clear_bit(bit_index, ptr);
702
703         /*
704          * If we didn't change anything, no need to wake up or kick other CPUs
705          */
706         if (set == level)
707                 return 0;
708
709         /*
710          * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
711          * trigger a world-switch round on the running physical CPU to set the
712          * virtual IRQ/FIQ fields in the HCR appropriately.
713          */
714         kvm_vcpu_kick(vcpu);
715
716         return 0;
717 }
718
719 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
720                           bool line_status)
721 {
722         u32 irq = irq_level->irq;
723         unsigned int irq_type, vcpu_idx, irq_num;
724         int nrcpus = atomic_read(&kvm->online_vcpus);
725         struct kvm_vcpu *vcpu = NULL;
726         bool level = irq_level->level;
727
728         irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
729         vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
730         irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
731
732         trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
733
734         switch (irq_type) {
735         case KVM_ARM_IRQ_TYPE_CPU:
736                 if (irqchip_in_kernel(kvm))
737                         return -ENXIO;
738
739                 if (vcpu_idx >= nrcpus)
740                         return -EINVAL;
741
742                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
743                 if (!vcpu)
744                         return -EINVAL;
745
746                 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
747                         return -EINVAL;
748
749                 return vcpu_interrupt_line(vcpu, irq_num, level);
750         case KVM_ARM_IRQ_TYPE_PPI:
751                 if (!irqchip_in_kernel(kvm))
752                         return -ENXIO;
753
754                 if (vcpu_idx >= nrcpus)
755                         return -EINVAL;
756
757                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
758                 if (!vcpu)
759                         return -EINVAL;
760
761                 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
762                         return -EINVAL;
763
764                 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
765         case KVM_ARM_IRQ_TYPE_SPI:
766                 if (!irqchip_in_kernel(kvm))
767                         return -ENXIO;
768
769                 if (irq_num < VGIC_NR_PRIVATE_IRQS)
770                         return -EINVAL;
771
772                 return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
773         }
774
775         return -EINVAL;
776 }
777
778 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
779                                const struct kvm_vcpu_init *init)
780 {
781         unsigned int i;
782         int phys_target = kvm_target_cpu();
783
784         if (init->target != phys_target)
785                 return -EINVAL;
786
787         /*
788          * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
789          * use the same target.
790          */
791         if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
792                 return -EINVAL;
793
794         /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
795         for (i = 0; i < sizeof(init->features) * 8; i++) {
796                 bool set = (init->features[i / 32] & (1 << (i % 32)));
797
798                 if (set && i >= KVM_VCPU_MAX_FEATURES)
799                         return -ENOENT;
800
801                 /*
802                  * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
803                  * use the same feature set.
804                  */
805                 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
806                     test_bit(i, vcpu->arch.features) != set)
807                         return -EINVAL;
808
809                 if (set)
810                         set_bit(i, vcpu->arch.features);
811         }
812
813         vcpu->arch.target = phys_target;
814
815         /* Now we know what it is, we can reset it. */
816         return kvm_reset_vcpu(vcpu);
817 }
818
819
820 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
821                                          struct kvm_vcpu_init *init)
822 {
823         int ret;
824
825         ret = kvm_vcpu_set_target(vcpu, init);
826         if (ret)
827                 return ret;
828
829         /*
830          * Ensure a rebooted VM will fault in RAM pages and detect if the
831          * guest MMU is turned off and flush the caches as needed.
832          */
833         if (vcpu->arch.has_run_once)
834                 stage2_unmap_vm(vcpu->kvm);
835
836         vcpu_reset_hcr(vcpu);
837
838         /*
839          * Handle the "start in power-off" case.
840          */
841         if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
842                 vcpu->arch.power_off = true;
843         else
844                 vcpu->arch.power_off = false;
845
846         return 0;
847 }
848
849 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
850                                  struct kvm_device_attr *attr)
851 {
852         int ret = -ENXIO;
853
854         switch (attr->group) {
855         default:
856                 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
857                 break;
858         }
859
860         return ret;
861 }
862
863 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
864                                  struct kvm_device_attr *attr)
865 {
866         int ret = -ENXIO;
867
868         switch (attr->group) {
869         default:
870                 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
871                 break;
872         }
873
874         return ret;
875 }
876
877 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
878                                  struct kvm_device_attr *attr)
879 {
880         int ret = -ENXIO;
881
882         switch (attr->group) {
883         default:
884                 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
885                 break;
886         }
887
888         return ret;
889 }
890
891 long kvm_arch_vcpu_ioctl(struct file *filp,
892                          unsigned int ioctl, unsigned long arg)
893 {
894         struct kvm_vcpu *vcpu = filp->private_data;
895         void __user *argp = (void __user *)arg;
896         struct kvm_device_attr attr;
897
898         switch (ioctl) {
899         case KVM_ARM_VCPU_INIT: {
900                 struct kvm_vcpu_init init;
901
902                 if (copy_from_user(&init, argp, sizeof(init)))
903                         return -EFAULT;
904
905                 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
906         }
907         case KVM_SET_ONE_REG:
908         case KVM_GET_ONE_REG: {
909                 struct kvm_one_reg reg;
910
911                 if (unlikely(!kvm_vcpu_initialized(vcpu)))
912                         return -ENOEXEC;
913
914                 if (copy_from_user(&reg, argp, sizeof(reg)))
915                         return -EFAULT;
916                 if (ioctl == KVM_SET_ONE_REG)
917                         return kvm_arm_set_reg(vcpu, &reg);
918                 else
919                         return kvm_arm_get_reg(vcpu, &reg);
920         }
921         case KVM_GET_REG_LIST: {
922                 struct kvm_reg_list __user *user_list = argp;
923                 struct kvm_reg_list reg_list;
924                 unsigned n;
925
926                 if (unlikely(!kvm_vcpu_initialized(vcpu)))
927                         return -ENOEXEC;
928
929                 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
930                         return -EFAULT;
931                 n = reg_list.n;
932                 reg_list.n = kvm_arm_num_regs(vcpu);
933                 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
934                         return -EFAULT;
935                 if (n < reg_list.n)
936                         return -E2BIG;
937                 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
938         }
939         case KVM_SET_DEVICE_ATTR: {
940                 if (copy_from_user(&attr, argp, sizeof(attr)))
941                         return -EFAULT;
942                 return kvm_arm_vcpu_set_attr(vcpu, &attr);
943         }
944         case KVM_GET_DEVICE_ATTR: {
945                 if (copy_from_user(&attr, argp, sizeof(attr)))
946                         return -EFAULT;
947                 return kvm_arm_vcpu_get_attr(vcpu, &attr);
948         }
949         case KVM_HAS_DEVICE_ATTR: {
950                 if (copy_from_user(&attr, argp, sizeof(attr)))
951                         return -EFAULT;
952                 return kvm_arm_vcpu_has_attr(vcpu, &attr);
953         }
954         default:
955                 return -EINVAL;
956         }
957 }
958
959 /**
960  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
961  * @kvm: kvm instance
962  * @log: slot id and address to which we copy the log
963  *
964  * Steps 1-4 below provide general overview of dirty page logging. See
965  * kvm_get_dirty_log_protect() function description for additional details.
966  *
967  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
968  * always flush the TLB (step 4) even if previous step failed  and the dirty
969  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
970  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
971  * writes will be marked dirty for next log read.
972  *
973  *   1. Take a snapshot of the bit and clear it if needed.
974  *   2. Write protect the corresponding page.
975  *   3. Copy the snapshot to the userspace.
976  *   4. Flush TLB's if needed.
977  */
978 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
979 {
980         bool is_dirty = false;
981         int r;
982
983         mutex_lock(&kvm->slots_lock);
984
985         r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
986
987         if (is_dirty)
988                 kvm_flush_remote_tlbs(kvm);
989
990         mutex_unlock(&kvm->slots_lock);
991         return r;
992 }
993
994 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
995                                         struct kvm_arm_device_addr *dev_addr)
996 {
997         unsigned long dev_id, type;
998
999         dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1000                 KVM_ARM_DEVICE_ID_SHIFT;
1001         type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1002                 KVM_ARM_DEVICE_TYPE_SHIFT;
1003
1004         switch (dev_id) {
1005         case KVM_ARM_DEVICE_VGIC_V2:
1006                 if (!vgic_present)
1007                         return -ENXIO;
1008                 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1009         default:
1010                 return -ENODEV;
1011         }
1012 }
1013
1014 long kvm_arch_vm_ioctl(struct file *filp,
1015                        unsigned int ioctl, unsigned long arg)
1016 {
1017         struct kvm *kvm = filp->private_data;
1018         void __user *argp = (void __user *)arg;
1019
1020         switch (ioctl) {
1021         case KVM_CREATE_IRQCHIP: {
1022                 int ret;
1023                 if (!vgic_present)
1024                         return -ENXIO;
1025                 mutex_lock(&kvm->lock);
1026                 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1027                 mutex_unlock(&kvm->lock);
1028                 return ret;
1029         }
1030         case KVM_ARM_SET_DEVICE_ADDR: {
1031                 struct kvm_arm_device_addr dev_addr;
1032
1033                 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1034                         return -EFAULT;
1035                 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1036         }
1037         case KVM_ARM_PREFERRED_TARGET: {
1038                 int err;
1039                 struct kvm_vcpu_init init;
1040
1041                 err = kvm_vcpu_preferred_target(&init);
1042                 if (err)
1043                         return err;
1044
1045                 if (copy_to_user(argp, &init, sizeof(init)))
1046                         return -EFAULT;
1047
1048                 return 0;
1049         }
1050         default:
1051                 return -EINVAL;
1052         }
1053 }
1054
1055 static void cpu_init_hyp_mode(void *dummy)
1056 {
1057         phys_addr_t pgd_ptr;
1058         unsigned long hyp_stack_ptr;
1059         unsigned long stack_page;
1060         unsigned long vector_ptr;
1061
1062         /* Switch from the HYP stub to our own HYP init vector */
1063         __hyp_set_vectors(kvm_get_idmap_vector());
1064
1065         pgd_ptr = kvm_mmu_get_httbr();
1066         stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1067         hyp_stack_ptr = stack_page + PAGE_SIZE;
1068         vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
1069
1070         __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1071         __cpu_init_stage2();
1072
1073         kvm_arm_init_debug();
1074 }
1075
1076 static void cpu_hyp_reinit(void)
1077 {
1078         if (is_kernel_in_hyp_mode()) {
1079                 /*
1080                  * __cpu_init_stage2() is safe to call even if the PM
1081                  * event was cancelled before the CPU was reset.
1082                  */
1083                 __cpu_init_stage2();
1084         } else {
1085                 if (__hyp_get_vectors() == hyp_default_vectors)
1086                         cpu_init_hyp_mode(NULL);
1087         }
1088 }
1089
1090 static void cpu_hyp_reset(void)
1091 {
1092         if (!is_kernel_in_hyp_mode())
1093                 __cpu_reset_hyp_mode(hyp_default_vectors,
1094                                      kvm_get_idmap_start());
1095 }
1096
1097 static void _kvm_arch_hardware_enable(void *discard)
1098 {
1099         if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1100                 cpu_hyp_reinit();
1101                 __this_cpu_write(kvm_arm_hardware_enabled, 1);
1102         }
1103 }
1104
1105 int kvm_arch_hardware_enable(void)
1106 {
1107         _kvm_arch_hardware_enable(NULL);
1108         return 0;
1109 }
1110
1111 static void _kvm_arch_hardware_disable(void *discard)
1112 {
1113         if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1114                 cpu_hyp_reset();
1115                 __this_cpu_write(kvm_arm_hardware_enabled, 0);
1116         }
1117 }
1118
1119 void kvm_arch_hardware_disable(void)
1120 {
1121         _kvm_arch_hardware_disable(NULL);
1122 }
1123
1124 #ifdef CONFIG_CPU_PM
1125 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1126                                     unsigned long cmd,
1127                                     void *v)
1128 {
1129         /*
1130          * kvm_arm_hardware_enabled is left with its old value over
1131          * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1132          * re-enable hyp.
1133          */
1134         switch (cmd) {
1135         case CPU_PM_ENTER:
1136                 if (__this_cpu_read(kvm_arm_hardware_enabled))
1137                         /*
1138                          * don't update kvm_arm_hardware_enabled here
1139                          * so that the hardware will be re-enabled
1140                          * when we resume. See below.
1141                          */
1142                         cpu_hyp_reset();
1143
1144                 return NOTIFY_OK;
1145         case CPU_PM_EXIT:
1146                 if (__this_cpu_read(kvm_arm_hardware_enabled))
1147                         /* The hardware was enabled before suspend. */
1148                         cpu_hyp_reinit();
1149
1150                 return NOTIFY_OK;
1151
1152         default:
1153                 return NOTIFY_DONE;
1154         }
1155 }
1156
1157 static struct notifier_block hyp_init_cpu_pm_nb = {
1158         .notifier_call = hyp_init_cpu_pm_notifier,
1159 };
1160
1161 static void __init hyp_cpu_pm_init(void)
1162 {
1163         cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1164 }
1165 static void __init hyp_cpu_pm_exit(void)
1166 {
1167         cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1168 }
1169 #else
1170 static inline void hyp_cpu_pm_init(void)
1171 {
1172 }
1173 static inline void hyp_cpu_pm_exit(void)
1174 {
1175 }
1176 #endif
1177
1178 static void teardown_common_resources(void)
1179 {
1180         free_percpu(kvm_host_cpu_state);
1181 }
1182
1183 static int init_common_resources(void)
1184 {
1185         kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
1186         if (!kvm_host_cpu_state) {
1187                 kvm_err("Cannot allocate host CPU state\n");
1188                 return -ENOMEM;
1189         }
1190
1191         /* set size of VMID supported by CPU */
1192         kvm_vmid_bits = kvm_get_vmid_bits();
1193         kvm_info("%d-bit VMID\n", kvm_vmid_bits);
1194
1195         return 0;
1196 }
1197
1198 static int init_subsystems(void)
1199 {
1200         int err = 0;
1201
1202         /*
1203          * Enable hardware so that subsystem initialisation can access EL2.
1204          */
1205         on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1206
1207         /*
1208          * Register CPU lower-power notifier
1209          */
1210         hyp_cpu_pm_init();
1211
1212         /*
1213          * Init HYP view of VGIC
1214          */
1215         err = kvm_vgic_hyp_init();
1216         switch (err) {
1217         case 0:
1218                 vgic_present = true;
1219                 break;
1220         case -ENODEV:
1221         case -ENXIO:
1222                 vgic_present = false;
1223                 err = 0;
1224                 break;
1225         default:
1226                 goto out;
1227         }
1228
1229         /*
1230          * Init HYP architected timer support
1231          */
1232         err = kvm_timer_hyp_init();
1233         if (err)
1234                 goto out;
1235
1236         kvm_perf_init();
1237         kvm_coproc_table_init();
1238
1239 out:
1240         on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1241
1242         return err;
1243 }
1244
1245 static void teardown_hyp_mode(void)
1246 {
1247         int cpu;
1248
1249         if (is_kernel_in_hyp_mode())
1250                 return;
1251
1252         free_hyp_pgds();
1253         for_each_possible_cpu(cpu)
1254                 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1255         hyp_cpu_pm_exit();
1256 }
1257
1258 static int init_vhe_mode(void)
1259 {
1260         kvm_info("VHE mode initialized successfully\n");
1261         return 0;
1262 }
1263
1264 /**
1265  * Inits Hyp-mode on all online CPUs
1266  */
1267 static int init_hyp_mode(void)
1268 {
1269         int cpu;
1270         int err = 0;
1271
1272         /*
1273          * Allocate Hyp PGD and setup Hyp identity mapping
1274          */
1275         err = kvm_mmu_init();
1276         if (err)
1277                 goto out_err;
1278
1279         /*
1280          * It is probably enough to obtain the default on one
1281          * CPU. It's unlikely to be different on the others.
1282          */
1283         hyp_default_vectors = __hyp_get_vectors();
1284
1285         /*
1286          * Allocate stack pages for Hypervisor-mode
1287          */
1288         for_each_possible_cpu(cpu) {
1289                 unsigned long stack_page;
1290
1291                 stack_page = __get_free_page(GFP_KERNEL);
1292                 if (!stack_page) {
1293                         err = -ENOMEM;
1294                         goto out_err;
1295                 }
1296
1297                 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1298         }
1299
1300         /*
1301          * Map the Hyp-code called directly from the host
1302          */
1303         err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1304                                   kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1305         if (err) {
1306                 kvm_err("Cannot map world-switch code\n");
1307                 goto out_err;
1308         }
1309
1310         err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1311                                   kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1312         if (err) {
1313                 kvm_err("Cannot map rodata section\n");
1314                 goto out_err;
1315         }
1316
1317         /*
1318          * Map the Hyp stack pages
1319          */
1320         for_each_possible_cpu(cpu) {
1321                 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1322                 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1323                                           PAGE_HYP);
1324
1325                 if (err) {
1326                         kvm_err("Cannot map hyp stack\n");
1327                         goto out_err;
1328                 }
1329         }
1330
1331         for_each_possible_cpu(cpu) {
1332                 kvm_cpu_context_t *cpu_ctxt;
1333
1334                 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
1335                 err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
1336
1337                 if (err) {
1338                         kvm_err("Cannot map host CPU state: %d\n", err);
1339                         goto out_err;
1340                 }
1341         }
1342
1343         kvm_info("Hyp mode initialized successfully\n");
1344
1345         return 0;
1346
1347 out_err:
1348         teardown_hyp_mode();
1349         kvm_err("error initializing Hyp mode: %d\n", err);
1350         return err;
1351 }
1352
1353 static void check_kvm_target_cpu(void *ret)
1354 {
1355         *(int *)ret = kvm_target_cpu();
1356 }
1357
1358 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1359 {
1360         struct kvm_vcpu *vcpu;
1361         int i;
1362
1363         mpidr &= MPIDR_HWID_BITMASK;
1364         kvm_for_each_vcpu(i, vcpu, kvm) {
1365                 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1366                         return vcpu;
1367         }
1368         return NULL;
1369 }
1370
1371 /**
1372  * Initialize Hyp-mode and memory mappings on all CPUs.
1373  */
1374 int kvm_arch_init(void *opaque)
1375 {
1376         int err;
1377         int ret, cpu;
1378
1379         if (!is_hyp_mode_available()) {
1380                 kvm_err("HYP mode not available\n");
1381                 return -ENODEV;
1382         }
1383
1384         for_each_online_cpu(cpu) {
1385                 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1386                 if (ret < 0) {
1387                         kvm_err("Error, CPU %d not supported!\n", cpu);
1388                         return -ENODEV;
1389                 }
1390         }
1391
1392         err = init_common_resources();
1393         if (err)
1394                 return err;
1395
1396         if (is_kernel_in_hyp_mode())
1397                 err = init_vhe_mode();
1398         else
1399                 err = init_hyp_mode();
1400         if (err)
1401                 goto out_err;
1402
1403         err = init_subsystems();
1404         if (err)
1405                 goto out_hyp;
1406
1407         return 0;
1408
1409 out_hyp:
1410         teardown_hyp_mode();
1411 out_err:
1412         teardown_common_resources();
1413         return err;
1414 }
1415
1416 /* NOP: Compiling as a module not supported */
1417 void kvm_arch_exit(void)
1418 {
1419         kvm_perf_teardown();
1420 }
1421
1422 static int arm_init(void)
1423 {
1424         int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1425         return rc;
1426 }
1427
1428 module_init(arm_init);