2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
36 #include <asm/unified.h>
39 #include <asm/cputype.h>
41 #include <asm/procinfo.h>
43 #include <asm/sections.h>
44 #include <asm/setup.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach-types.h>
47 #include <asm/cacheflush.h>
48 #include <asm/cachetype.h>
49 #include <asm/tlbflush.h>
50 #include <asm/xen/hypervisor.h>
53 #include <asm/mach/arch.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/time.h>
56 #include <asm/system_info.h>
57 #include <asm/system_misc.h>
58 #include <asm/traps.h>
59 #include <asm/unwind.h>
60 #include <asm/memblock.h>
66 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69 static int __init fpe_setup(char *line)
71 memcpy(fpe_type, line, 8);
75 __setup("fpe=", fpe_setup);
78 extern void init_default_cache_policy(unsigned long);
79 extern void paging_init(const struct machine_desc *desc);
80 extern void early_paging_init(const struct machine_desc *);
81 extern void sanity_check_meminfo(void);
82 extern enum reboot_mode reboot_mode;
83 extern void setup_dma_zone(const struct machine_desc *desc);
85 unsigned int processor_id;
86 EXPORT_SYMBOL(processor_id);
87 unsigned int __machine_arch_type __read_mostly;
88 EXPORT_SYMBOL(__machine_arch_type);
89 unsigned int cacheid __read_mostly;
90 EXPORT_SYMBOL(cacheid);
92 unsigned int __atags_pointer __initdata;
94 unsigned int system_rev;
95 EXPORT_SYMBOL(system_rev);
97 const char *system_serial;
98 EXPORT_SYMBOL(system_serial);
100 unsigned int system_serial_low;
101 EXPORT_SYMBOL(system_serial_low);
103 unsigned int system_serial_high;
104 EXPORT_SYMBOL(system_serial_high);
106 unsigned int elf_hwcap __read_mostly;
107 EXPORT_SYMBOL(elf_hwcap);
109 unsigned int elf_hwcap2 __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap2);
114 struct processor processor __read_mostly;
117 struct cpu_tlb_fns cpu_tlb __read_mostly;
120 struct cpu_user_fns cpu_user __read_mostly;
123 struct cpu_cache_fns cpu_cache __read_mostly;
125 #ifdef CONFIG_OUTER_CACHE
126 struct outer_cache_fns outer_cache __read_mostly;
127 EXPORT_SYMBOL(outer_cache);
131 * Cached cpu_architecture() result for use by assembler code.
132 * C code should use the cpu_architecture() function instead of accessing this
135 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
142 } ____cacheline_aligned;
144 #ifndef CONFIG_CPU_V7M
145 static struct stack stacks[NR_CPUS];
148 char elf_platform[ELF_PLATFORM_SIZE];
149 EXPORT_SYMBOL(elf_platform);
151 static const char *cpu_name;
152 static const char *machine_name;
153 static char __initdata cmd_line[COMMAND_LINE_SIZE];
154 const struct machine_desc *machine_desc __initdata;
156 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
157 #define ENDIANNESS ((char)endian_test.l)
159 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
162 * Standard memory resources
164 static struct resource mem_res[] = {
169 .flags = IORESOURCE_MEM
172 .name = "Kernel code",
175 .flags = IORESOURCE_MEM
178 .name = "Kernel data",
181 .flags = IORESOURCE_MEM
185 #define video_ram mem_res[0]
186 #define kernel_code mem_res[1]
187 #define kernel_data mem_res[2]
189 static struct resource io_res[] = {
194 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 .flags = IORESOURCE_IO | IORESOURCE_BUSY
210 #define lp0 io_res[0]
211 #define lp1 io_res[1]
212 #define lp2 io_res[2]
214 static const char *proc_arch[] = {
234 #ifdef CONFIG_CPU_V7M
235 static int __get_cpu_architecture(void)
237 return CPU_ARCH_ARMv7M;
240 static int __get_cpu_architecture(void)
244 if ((read_cpuid_id() & 0x0008f000) == 0) {
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
247 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
249 cpu_arch = (read_cpuid_id() >> 16) & 7;
251 cpu_arch += CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
253 /* Revised CPUID format. Read the Memory Model Feature
254 * Register 0 and check for VMSAv7 or PMSAv7 */
255 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
256 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
257 (mmfr0 & 0x000000f0) >= 0x00000030)
258 cpu_arch = CPU_ARCH_ARMv7;
259 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
260 (mmfr0 & 0x000000f0) == 0x00000020)
261 cpu_arch = CPU_ARCH_ARMv6;
263 cpu_arch = CPU_ARCH_UNKNOWN;
265 cpu_arch = CPU_ARCH_UNKNOWN;
271 int __pure cpu_architecture(void)
273 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
275 return __cpu_architecture;
278 static int cpu_has_aliasing_icache(unsigned int arch)
281 unsigned int id_reg, num_sets, line_size;
283 /* PIPT caches never alias. */
284 if (icache_is_pipt())
287 /* arch specifies the register format */
290 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
291 : /* No output operands */
294 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
296 line_size = 4 << ((id_reg & 0x7) + 2);
297 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
298 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
301 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
304 /* I-cache aliases will be handled by D-cache aliasing code */
308 return aliasing_icache;
311 static void __init cacheid_init(void)
313 unsigned int arch = cpu_architecture();
315 if (arch == CPU_ARCH_ARMv7M) {
317 } else if (arch >= CPU_ARCH_ARMv6) {
318 unsigned int cachetype = read_cpuid_cachetype();
319 if ((cachetype & (7 << 29)) == 4 << 29) {
320 /* ARMv7 register format */
321 arch = CPU_ARCH_ARMv7;
322 cacheid = CACHEID_VIPT_NONALIASING;
323 switch (cachetype & (3 << 14)) {
325 cacheid |= CACHEID_ASID_TAGGED;
328 cacheid |= CACHEID_PIPT;
332 arch = CPU_ARCH_ARMv6;
333 if (cachetype & (1 << 23))
334 cacheid = CACHEID_VIPT_ALIASING;
336 cacheid = CACHEID_VIPT_NONALIASING;
338 if (cpu_has_aliasing_icache(arch))
339 cacheid |= CACHEID_VIPT_I_ALIASING;
341 cacheid = CACHEID_VIVT;
344 pr_info("CPU: %s data cache, %s instruction cache\n",
345 cache_is_vivt() ? "VIVT" :
346 cache_is_vipt_aliasing() ? "VIPT aliasing" :
347 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
348 cache_is_vivt() ? "VIVT" :
349 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
350 icache_is_vipt_aliasing() ? "VIPT aliasing" :
351 icache_is_pipt() ? "PIPT" :
352 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
356 * These functions re-use the assembly code in head.S, which
357 * already provide the required functionality.
359 extern struct proc_info_list *lookup_processor_type(unsigned int);
361 void __init early_print(const char *str, ...)
363 extern void printascii(const char *);
368 vsnprintf(buf, sizeof(buf), str, ap);
371 #ifdef CONFIG_DEBUG_LL
377 static void __init cpuid_init_hwcaps(void)
382 if (cpu_architecture() < CPU_ARCH_ARMv7)
385 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
387 elf_hwcap |= HWCAP_IDIVA;
389 elf_hwcap |= HWCAP_IDIVT;
391 /* LPAE implies atomic ldrd/strd instructions */
392 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
394 elf_hwcap |= HWCAP_LPAE;
396 /* check for supported v8 Crypto instructions */
397 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
399 block = cpuid_feature_extract_field(isar5, 4);
401 elf_hwcap2 |= HWCAP2_PMULL;
403 elf_hwcap2 |= HWCAP2_AES;
405 block = cpuid_feature_extract_field(isar5, 8);
407 elf_hwcap2 |= HWCAP2_SHA1;
409 block = cpuid_feature_extract_field(isar5, 12);
411 elf_hwcap2 |= HWCAP2_SHA2;
413 block = cpuid_feature_extract_field(isar5, 16);
415 elf_hwcap2 |= HWCAP2_CRC32;
418 static void __init elf_hwcap_fixup(void)
420 unsigned id = read_cpuid_id();
423 * HWCAP_TLS is available only on 1136 r1p0 and later,
424 * see also kuser_get_tls_init.
426 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
427 ((id >> 20) & 3) == 0) {
428 elf_hwcap &= ~HWCAP_TLS;
432 /* Verify if CPUID scheme is implemented */
433 if ((id & 0x000f0000) != 0x000f0000)
437 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
438 * avoid advertising SWP; it may not be atomic with
439 * multiprocessing cores.
441 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
442 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
443 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
444 elf_hwcap &= ~HWCAP_SWP;
448 * cpu_init - initialise one CPU.
450 * cpu_init sets up the per-CPU stacks.
452 void notrace cpu_init(void)
454 #ifndef CONFIG_CPU_V7M
455 unsigned int cpu = smp_processor_id();
456 struct stack *stk = &stacks[cpu];
458 if (cpu >= NR_CPUS) {
459 pr_crit("CPU%u: bad primary CPU number\n", cpu);
464 * This only works on resume and secondary cores. For booting on the
465 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
467 set_my_cpu_offset(per_cpu_offset(cpu));
472 * Define the placement constraint for the inline asm directive below.
473 * In Thumb-2, msr with an immediate value is not allowed.
475 #ifdef CONFIG_THUMB2_KERNEL
482 * setup stacks for re-entrant exception handlers
486 "add r14, %0, %2\n\t"
489 "add r14, %0, %4\n\t"
492 "add r14, %0, %6\n\t"
495 "add r14, %0, %8\n\t"
500 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
501 "I" (offsetof(struct stack, irq[0])),
502 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
503 "I" (offsetof(struct stack, abt[0])),
504 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
505 "I" (offsetof(struct stack, und[0])),
506 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
507 "I" (offsetof(struct stack, fiq[0])),
508 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
513 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
515 void __init smp_setup_processor_id(void)
518 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
519 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
521 cpu_logical_map(0) = cpu;
522 for (i = 1; i < nr_cpu_ids; ++i)
523 cpu_logical_map(i) = i == cpu ? 0 : i;
526 * clear __my_cpu_offset on boot CPU to avoid hang caused by
527 * using percpu variable early, for example, lockdep will
528 * access percpu variable inside lock_release
530 set_my_cpu_offset(0);
532 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
535 struct mpidr_hash mpidr_hash;
538 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
539 * level in order to build a linear index from an
540 * MPIDR value. Resulting algorithm is a collision
541 * free hash carried out through shifting and ORing
543 static void __init smp_build_mpidr_hash(void)
546 u32 fs[3], bits[3], ls, mask = 0;
548 * Pre-scan the list of MPIDRS and filter out bits that do
549 * not contribute to affinity levels, ie they never toggle.
551 for_each_possible_cpu(i)
552 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
553 pr_debug("mask of set bits 0x%x\n", mask);
555 * Find and stash the last and first bit set at all affinity levels to
556 * check how many bits are required to represent them.
558 for (i = 0; i < 3; i++) {
559 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
561 * Find the MSB bit and LSB bits position
562 * to determine how many bits are required
563 * to express the affinity level.
566 fs[i] = affinity ? ffs(affinity) - 1 : 0;
567 bits[i] = ls - fs[i];
570 * An index can be created from the MPIDR by isolating the
571 * significant bits at each affinity level and by shifting
572 * them in order to compress the 24 bits values space to a
573 * compressed set of values. This is equivalent to hashing
574 * the MPIDR through shifting and ORing. It is a collision free
575 * hash though not minimal since some levels might contain a number
576 * of CPUs that is not an exact power of 2 and their bit
577 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
579 mpidr_hash.shift_aff[0] = fs[0];
580 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
581 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
583 mpidr_hash.mask = mask;
584 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
585 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
586 mpidr_hash.shift_aff[0],
587 mpidr_hash.shift_aff[1],
588 mpidr_hash.shift_aff[2],
592 * 4x is an arbitrary value used to warn on a hash table much bigger
593 * than expected on most systems.
595 if (mpidr_hash_size() > 4 * num_possible_cpus())
596 pr_warn("Large number of MPIDR hash buckets detected\n");
597 sync_cache_w(&mpidr_hash);
601 static void __init setup_processor(void)
603 struct proc_info_list *list;
606 * locate processor in the list of supported processor
607 * types. The linker builds this table for us from the
608 * entries in arch/arm/mm/proc-*.S
610 list = lookup_processor_type(read_cpuid_id());
612 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
617 cpu_name = list->cpu_name;
618 __cpu_architecture = __get_cpu_architecture();
621 processor = *list->proc;
624 cpu_tlb = *list->tlb;
627 cpu_user = *list->user;
630 cpu_cache = *list->cache;
633 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
634 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
635 proc_arch[cpu_architecture()], get_cr());
637 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
638 list->arch_name, ENDIANNESS);
639 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
640 list->elf_name, ENDIANNESS);
641 elf_hwcap = list->elf_hwcap;
645 #ifndef CONFIG_ARM_THUMB
646 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
649 init_default_cache_policy(list->__cpu_mm_mmu_flags);
651 erratum_a15_798181_init();
659 void __init dump_machine_table(void)
661 const struct machine_desc *p;
663 early_print("Available machine support:\n\nID (hex)\tNAME\n");
664 for_each_machine_desc(p)
665 early_print("%08x\t%s\n", p->nr, p->name);
667 early_print("\nPlease check your kernel config and/or bootloader.\n");
670 /* can't use cpu_relax() here as it may require MMU setup */;
673 int __init arm_add_memory(u64 start, u64 size)
678 * Ensure that start/size are aligned to a page boundary.
679 * Size is rounded down, start is rounded up.
681 aligned_start = PAGE_ALIGN(start);
682 if (aligned_start > start + size)
685 size -= aligned_start - start;
687 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
688 if (aligned_start > ULONG_MAX) {
689 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
694 if (aligned_start + size > ULONG_MAX) {
695 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
698 * To ensure bank->start + bank->size is representable in
699 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
700 * This means we lose a page after masking.
702 size = ULONG_MAX - aligned_start;
706 if (aligned_start < PHYS_OFFSET) {
707 if (aligned_start + size <= PHYS_OFFSET) {
708 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
709 aligned_start, aligned_start + size);
713 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
714 aligned_start, (u64)PHYS_OFFSET);
716 size -= PHYS_OFFSET - aligned_start;
717 aligned_start = PHYS_OFFSET;
720 start = aligned_start;
721 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
724 * Check whether this memory region has non-zero size or
725 * invalid node number.
730 memblock_add(start, size);
735 * Pick out the memory size. We look for mem=size@start,
736 * where start and size are "size[KkMm]"
739 static int __init early_mem(char *p)
741 static int usermem __initdata = 0;
747 * If the user specifies memory size, we
748 * blow away any automatically generated
753 memblock_remove(memblock_start_of_DRAM(),
754 memblock_end_of_DRAM() - memblock_start_of_DRAM());
758 size = memparse(p, &endp);
760 start = memparse(endp + 1, NULL);
762 arm_add_memory(start, size);
766 early_param("mem", early_mem);
768 static void __init request_standard_resources(const struct machine_desc *mdesc)
770 struct memblock_region *region;
771 struct resource *res;
773 kernel_code.start = virt_to_phys(_text);
774 kernel_code.end = virt_to_phys(_etext - 1);
775 kernel_data.start = virt_to_phys(_sdata);
776 kernel_data.end = virt_to_phys(_end - 1);
778 for_each_memblock(memory, region) {
779 res = memblock_virt_alloc(sizeof(*res), 0);
780 res->name = "System RAM";
781 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
782 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
783 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
785 request_resource(&iomem_resource, res);
787 if (kernel_code.start >= res->start &&
788 kernel_code.end <= res->end)
789 request_resource(res, &kernel_code);
790 if (kernel_data.start >= res->start &&
791 kernel_data.end <= res->end)
792 request_resource(res, &kernel_data);
795 if (mdesc->video_start) {
796 video_ram.start = mdesc->video_start;
797 video_ram.end = mdesc->video_end;
798 request_resource(&iomem_resource, &video_ram);
802 * Some machines don't have the possibility of ever
803 * possessing lp0, lp1 or lp2
805 if (mdesc->reserve_lp0)
806 request_resource(&ioport_resource, &lp0);
807 if (mdesc->reserve_lp1)
808 request_resource(&ioport_resource, &lp1);
809 if (mdesc->reserve_lp2)
810 request_resource(&ioport_resource, &lp2);
813 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
814 struct screen_info screen_info = {
815 .orig_video_lines = 30,
816 .orig_video_cols = 80,
817 .orig_video_mode = 0,
818 .orig_video_ega_bx = 0,
819 .orig_video_isVGA = 1,
820 .orig_video_points = 8
824 static int __init customize_machine(void)
827 * customizes platform devices, or adds new ones
828 * On DT based machines, we fall back to populating the
829 * machine from the device tree, if no callback is provided,
830 * otherwise we would always need an init_machine callback.
833 if (machine_desc->init_machine)
834 machine_desc->init_machine();
837 of_platform_populate(NULL, of_default_bus_match_table,
842 arch_initcall(customize_machine);
844 static int __init init_machine_late(void)
846 struct device_node *root;
849 if (machine_desc->init_late)
850 machine_desc->init_late();
852 root = of_find_node_by_path("/");
854 ret = of_property_read_string(root, "serial-number",
857 system_serial = NULL;
861 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
867 late_initcall(init_machine_late);
870 static inline unsigned long long get_total_mem(void)
874 total = max_low_pfn - min_low_pfn;
875 return total << PAGE_SHIFT;
879 * reserve_crashkernel() - reserves memory are for crash kernel
881 * This function reserves memory area given in "crashkernel=" kernel command
882 * line parameter. The memory reserved is used by a dump capture kernel when
883 * primary kernel is crashing.
885 static void __init reserve_crashkernel(void)
887 unsigned long long crash_size, crash_base;
888 unsigned long long total_mem;
891 total_mem = get_total_mem();
892 ret = parse_crashkernel(boot_command_line, total_mem,
893 &crash_size, &crash_base);
897 ret = memblock_reserve(crash_base, crash_size);
899 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
900 (unsigned long)crash_base);
904 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
905 (unsigned long)(crash_size >> 20),
906 (unsigned long)(crash_base >> 20),
907 (unsigned long)(total_mem >> 20));
909 crashk_res.start = crash_base;
910 crashk_res.end = crash_base + crash_size - 1;
911 insert_resource(&iomem_resource, &crashk_res);
914 static inline void reserve_crashkernel(void) {}
915 #endif /* CONFIG_KEXEC */
917 void __init hyp_mode_check(void)
919 #ifdef CONFIG_ARM_VIRT_EXT
922 if (is_hyp_mode_available()) {
923 pr_info("CPU: All CPU(s) started in HYP mode.\n");
924 pr_info("CPU: Virtualization extensions available.\n");
925 } else if (is_hyp_mode_mismatched()) {
926 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
927 __boot_cpu_mode & MODE_MASK);
928 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
930 pr_info("CPU: All CPU(s) started in SVC mode.\n");
934 void __init setup_arch(char **cmdline_p)
936 const struct machine_desc *mdesc;
939 mdesc = setup_machine_fdt(__atags_pointer);
941 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
942 machine_desc = mdesc;
943 machine_name = mdesc->name;
944 dump_stack_set_arch_desc("%s", mdesc->name);
946 if (mdesc->reboot_mode != REBOOT_HARD)
947 reboot_mode = mdesc->reboot_mode;
949 init_mm.start_code = (unsigned long) _text;
950 init_mm.end_code = (unsigned long) _etext;
951 init_mm.end_data = (unsigned long) _edata;
952 init_mm.brk = (unsigned long) _end;
954 /* populate cmd_line too for later use, preserving boot_command_line */
955 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
956 *cmdline_p = cmd_line;
961 early_paging_init(mdesc);
963 setup_dma_zone(mdesc);
964 sanity_check_meminfo();
965 arm_memblock_init(mdesc);
968 request_standard_resources(mdesc);
971 arm_pm_restart = mdesc->restart;
973 unflatten_device_tree();
975 arm_dt_init_cpu_maps();
980 if (!mdesc->smp_init || !mdesc->smp_init()) {
981 if (psci_smp_available())
982 smp_set_ops(&psci_smp_ops);
984 smp_set_ops(mdesc->smp);
987 smp_build_mpidr_hash();
994 reserve_crashkernel();
996 #ifdef CONFIG_MULTI_IRQ_HANDLER
997 handle_arch_irq = mdesc->handle_irq;
1001 #if defined(CONFIG_VGA_CONSOLE)
1002 conswitchp = &vga_con;
1003 #elif defined(CONFIG_DUMMY_CONSOLE)
1004 conswitchp = &dummy_con;
1008 if (mdesc->init_early)
1009 mdesc->init_early();
1013 static int __init topology_init(void)
1017 for_each_possible_cpu(cpu) {
1018 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1019 cpuinfo->cpu.hotpluggable = 1;
1020 register_cpu(&cpuinfo->cpu, cpu);
1025 subsys_initcall(topology_init);
1027 #ifdef CONFIG_HAVE_PROC_CPU
1028 static int __init proc_cpu_init(void)
1030 struct proc_dir_entry *res;
1032 res = proc_mkdir("cpu", NULL);
1037 fs_initcall(proc_cpu_init);
1040 static const char *hwcap_str[] = {
1066 static const char *hwcap2_str[] = {
1075 static int c_show(struct seq_file *m, void *v)
1080 for_each_online_cpu(i) {
1082 * glibc reads /proc/cpuinfo to determine the number of
1083 * online processors, looking for lines beginning with
1084 * "processor". Give glibc what it expects.
1086 seq_printf(m, "processor\t: %d\n", i);
1087 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1088 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1089 cpu_name, cpuid & 15, elf_platform);
1091 #if defined(CONFIG_SMP)
1092 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1093 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1094 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1096 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1097 loops_per_jiffy / (500000/HZ),
1098 (loops_per_jiffy / (5000/HZ)) % 100);
1100 /* dump out the processor features */
1101 seq_puts(m, "Features\t: ");
1103 for (j = 0; hwcap_str[j]; j++)
1104 if (elf_hwcap & (1 << j))
1105 seq_printf(m, "%s ", hwcap_str[j]);
1107 for (j = 0; hwcap2_str[j]; j++)
1108 if (elf_hwcap2 & (1 << j))
1109 seq_printf(m, "%s ", hwcap2_str[j]);
1111 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1112 seq_printf(m, "CPU architecture: %s\n",
1113 proc_arch[cpu_architecture()]);
1115 if ((cpuid & 0x0008f000) == 0x00000000) {
1117 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1119 if ((cpuid & 0x0008f000) == 0x00007000) {
1121 seq_printf(m, "CPU variant\t: 0x%02x\n",
1122 (cpuid >> 16) & 127);
1125 seq_printf(m, "CPU variant\t: 0x%x\n",
1126 (cpuid >> 20) & 15);
1128 seq_printf(m, "CPU part\t: 0x%03x\n",
1129 (cpuid >> 4) & 0xfff);
1131 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1134 seq_printf(m, "Hardware\t: %s\n", machine_name);
1135 seq_printf(m, "Revision\t: %04x\n", system_rev);
1136 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1141 static void *c_start(struct seq_file *m, loff_t *pos)
1143 return *pos < 1 ? (void *)1 : NULL;
1146 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1152 static void c_stop(struct seq_file *m, void *v)
1156 const struct seq_operations cpuinfo_op = {