3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/memory.h>
40 #include <linux/nmi.h>
43 #include <asm/kdump.h>
45 #include <asm/processor.h>
46 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/btext.h>
55 #include <asm/nvram.h>
56 #include <asm/setup.h>
58 #include <asm/iommu.h>
59 #include <asm/serial.h>
60 #include <asm/cache.h>
63 #include <asm/firmware.h>
66 #include <asm/kexec.h>
67 #include <asm/mmu_context.h>
68 #include <asm/code-patching.h>
69 #include <asm/kvm_ppc.h>
70 #include <asm/hugetlb.h>
71 #include <asm/livepatch.h>
74 #define DBG(fmt...) udbg_printf(fmt)
79 int spinning_secondaries;
82 /* Pick defaults since we might want to patch instructions
83 * before we've read this from the device tree.
85 struct ppc64_caches ppc64_caches = {
91 EXPORT_SYMBOL_GPL(ppc64_caches);
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
101 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
102 static void setup_tlb_core_data(void)
106 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
108 for_each_possible_cpu(cpu) {
109 int first = cpu_first_thread_sibling(cpu);
112 * If we boot via kdump on a non-primary thread,
113 * make sure we point at the thread that actually
116 if (cpu_first_thread_sibling(boot_cpuid) == first)
119 paca[cpu].tcd_ptr = &paca[first].tcd;
122 * If we have threads, we need either tlbsrx.
123 * or e6500 tablewalk mode, or else TLB handlers
124 * will be racy and could produce duplicate entries.
126 if (smt_enabled_at_boot >= 2 &&
127 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
128 book3e_htw_mode != PPC_HTW_E6500) {
129 /* Should we panic instead? */
130 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
136 static void setup_tlb_core_data(void)
143 static char *smt_enabled_cmdline;
145 /* Look for ibm,smt-enabled OF option */
146 static void check_smt_enabled(void)
148 struct device_node *dn;
149 const char *smt_option;
151 /* Default to enabling all threads */
152 smt_enabled_at_boot = threads_per_core;
154 /* Allow the command line to overrule the OF option */
155 if (smt_enabled_cmdline) {
156 if (!strcmp(smt_enabled_cmdline, "on"))
157 smt_enabled_at_boot = threads_per_core;
158 else if (!strcmp(smt_enabled_cmdline, "off"))
159 smt_enabled_at_boot = 0;
164 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
166 smt_enabled_at_boot =
167 min(threads_per_core, smt);
170 dn = of_find_node_by_path("/options");
172 smt_option = of_get_property(dn, "ibm,smt-enabled",
176 if (!strcmp(smt_option, "on"))
177 smt_enabled_at_boot = threads_per_core;
178 else if (!strcmp(smt_option, "off"))
179 smt_enabled_at_boot = 0;
187 /* Look for smt-enabled= cmdline option */
188 static int __init early_smt_enabled(char *p)
190 smt_enabled_cmdline = p;
193 early_param("smt-enabled", early_smt_enabled);
196 #define check_smt_enabled()
197 #endif /* CONFIG_SMP */
199 /** Fix up paca fields required for the boot cpu */
200 static void fixup_boot_paca(void)
202 /* The boot cpu is started */
203 get_paca()->cpu_start = 1;
204 /* Allow percpu accesses to work until we setup percpu data */
205 get_paca()->data_offset = 0;
208 static void cpu_ready_for_interrupts(void)
210 /* Set IR and DR in PACA MSR */
211 get_paca()->kernel_msr = MSR_KERNEL;
214 * Enable AIL if supported, and we are in hypervisor mode. If we are
215 * not in hypervisor mode, we enable relocation-on interrupts later
216 * in pSeries_setup_arch() using the H_SET_MODE hcall.
218 if (cpu_has_feature(CPU_FTR_HVMODE) &&
219 cpu_has_feature(CPU_FTR_ARCH_207S)) {
220 unsigned long lpcr = mfspr(SPRN_LPCR);
221 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
226 * Early initialization entry point. This is called by head.S
227 * with MMU translation disabled. We rely on the "feature" of
228 * the CPU that ignores the top 2 bits of the address in real
229 * mode so we can access kernel globals normally provided we
230 * only toy with things in the RMO region. From here, we do
231 * some early parsing of the device-tree to setup out MEMBLOCK
232 * data structures, and allocate & initialize the hash table
233 * and segment tables so we can start running with translation
236 * It is this function which will call the probe() callback of
237 * the various platform types and copy the matching one to the
238 * global ppc_md structure. Your platform can eventually do
239 * some very early initializations from the probe() routine, but
240 * this is not recommended, be very careful as, for example, the
241 * device-tree is not accessible via normal means at this point.
244 void __init early_setup(unsigned long dt_ptr)
246 static __initdata struct paca_struct boot_paca;
248 /* -------- printk is _NOT_ safe to use here ! ------- */
250 /* Identify CPU type */
251 identify_cpu(0, mfspr(SPRN_PVR));
253 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
254 initialise_paca(&boot_paca, 0);
255 setup_paca(&boot_paca);
258 /* -------- printk is now safe to use ------- */
260 /* Enable early debugging if any specified (see udbg.h) */
263 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
266 * Do early initialization using the flattened device
267 * tree, such as retrieving the physical memory map or
268 * calculating/retrieving the hash table size.
270 early_init_devtree(__va(dt_ptr));
272 /* Now we know the logical id of our boot cpu, setup the paca. */
273 setup_paca(&paca[boot_cpuid]);
276 /* Probe the machine type */
280 * Setup the trampolines from the lowmem exception vectors
281 * to the kdump kernel when not using a relocatable kernel.
283 setup_kdump_trampoline();
285 /* Initialize the hash table or TLB handling */
288 /* Apply all the dynamic patching */
289 apply_feature_fixups();
292 * At this point, we can let interrupts switch to virtual mode
293 * (the MMU has been setup), so adjust the MSR in the PACA to
294 * have IR and DR set and enable AIL if it exists
296 cpu_ready_for_interrupts();
298 DBG(" <- early_setup()\n");
300 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
302 * This needs to be done *last* (after the above DBG() even)
304 * Right after we return from this function, we turn on the MMU
305 * which means the real-mode access trick that btext does will
306 * no longer work, it needs to switch to using a real MMU
307 * mapping. This call will ensure that it does
310 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
314 void early_setup_secondary(void)
316 /* Mark interrupts disabled in PACA */
317 get_paca()->soft_enabled = 0;
319 /* Initialize the hash table or TLB handling */
320 early_init_mmu_secondary();
323 * At this point, we can let interrupts switch to virtual mode
324 * (the MMU has been setup), so adjust the MSR in the PACA to
325 * have IR and DR set.
327 cpu_ready_for_interrupts();
330 #endif /* CONFIG_SMP */
332 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
333 static bool use_spinloop(void)
335 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
339 * When book3e boots from kexec, the ePAPR spin table does
342 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
345 void smp_release_cpus(void)
353 DBG(" -> smp_release_cpus()\n");
355 /* All secondary cpus are spinning on a common spinloop, release them
356 * all now so they can start to spin on their individual paca
357 * spinloops. For non SMP kernels, the secondary cpus never get out
358 * of the common spinloop.
361 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
363 *ptr = ppc_function_entry(generic_secondary_smp_init);
365 /* And wait a bit for them to catch up */
366 for (i = 0; i < 100000; i++) {
369 if (spinning_secondaries == 0)
373 DBG("spinning_secondaries = %d\n", spinning_secondaries);
375 DBG(" <- smp_release_cpus()\n");
377 #endif /* CONFIG_SMP || CONFIG_KEXEC */
380 * Initialize some remaining members of the ppc64_caches and systemcfg
382 * (at least until we get rid of them completely). This is mostly some
383 * cache informations about the CPU that will be used by cache flush
384 * routines and/or provided to userland
386 static void __init initialize_cache_info(void)
388 struct device_node *np;
389 unsigned long num_cpus = 0;
391 DBG(" -> initialize_cache_info()\n");
393 for_each_node_by_type(np, "cpu") {
397 * We're assuming *all* of the CPUs have the same
398 * d-cache and i-cache sizes... -Peter
401 const __be32 *sizep, *lsizep;
405 lsize = cur_cpu_spec->dcache_bsize;
406 sizep = of_get_property(np, "d-cache-size", NULL);
408 size = be32_to_cpu(*sizep);
409 lsizep = of_get_property(np, "d-cache-block-size",
411 /* fallback if block size missing */
413 lsizep = of_get_property(np,
417 lsize = be32_to_cpu(*lsizep);
418 if (sizep == NULL || lsizep == NULL)
419 DBG("Argh, can't find dcache properties ! "
420 "sizep: %p, lsizep: %p\n", sizep, lsizep);
422 ppc64_caches.dsize = size;
423 ppc64_caches.dline_size = lsize;
424 ppc64_caches.log_dline_size = __ilog2(lsize);
425 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
428 lsize = cur_cpu_spec->icache_bsize;
429 sizep = of_get_property(np, "i-cache-size", NULL);
431 size = be32_to_cpu(*sizep);
432 lsizep = of_get_property(np, "i-cache-block-size",
435 lsizep = of_get_property(np,
439 lsize = be32_to_cpu(*lsizep);
440 if (sizep == NULL || lsizep == NULL)
441 DBG("Argh, can't find icache properties ! "
442 "sizep: %p, lsizep: %p\n", sizep, lsizep);
444 ppc64_caches.isize = size;
445 ppc64_caches.iline_size = lsize;
446 ppc64_caches.log_iline_size = __ilog2(lsize);
447 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
451 DBG(" <- initialize_cache_info()\n");
456 * Do some initial setup of the system. The parameters are those which
457 * were passed in from the bootloader.
459 void __init setup_system(void)
461 DBG(" -> setup_system()\n");
464 * Unflatten the device-tree passed by prom_init or kexec
466 unflatten_device_tree();
469 * Fill the ppc64_caches & systemcfg structures with informations
470 * retrieved from the device-tree.
472 initialize_cache_info();
474 #ifdef CONFIG_PPC_RTAS
476 * Initialize RTAS if available
479 #endif /* CONFIG_PPC_RTAS */
482 * Check if we have an initrd provided via the device-tree
487 * Do some platform specific early initializations, that includes
488 * setting up the hash table pointers. It also sets up some interrupt-mapping
489 * related options that will be used by finish_device_tree()
491 if (ppc_md.init_early)
495 * We can discover serial ports now since the above did setup the
496 * hash table management for us, thus ioremap works. We do that early
497 * so that further code can be debugged
499 find_legacy_serial_ports();
502 * Register early console
504 register_early_udbg_console();
511 smp_setup_cpu_maps();
513 setup_tlb_core_data();
516 * Freescale Book3e parts spin in a loop provided by firmware,
517 * so smp_release_cpus() does nothing for them
519 #if defined(CONFIG_SMP)
520 /* Release secondary cpus out of their spinloops at 0x60 now that
521 * we can map physical -> logical CPU ids
526 pr_info("Starting Linux %s %s\n", init_utsname()->machine,
527 init_utsname()->version);
529 pr_info("-----------------------------------------------------\n");
530 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
531 pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
533 if (ppc64_caches.dline_size != 0x80)
534 pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
535 if (ppc64_caches.iline_size != 0x80)
536 pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
538 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
539 pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
540 pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
541 pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
542 cur_cpu_spec->cpu_user_features2);
543 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
544 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
546 #ifdef CONFIG_PPC_STD_MMU_64
548 pr_info("htab_address = 0x%p\n", htab_address);
550 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
553 if (PHYSICAL_START > 0)
554 pr_info("physical_start = 0x%llx\n",
555 (unsigned long long)PHYSICAL_START);
556 pr_info("-----------------------------------------------------\n");
558 DBG(" <- setup_system()\n");
561 /* This returns the limit below which memory accesses to the linear
562 * mapping are guarnateed not to cause a TLB or SLB miss. This is
563 * used to allocate interrupt or emergency stacks for which our
564 * exception entry path doesn't deal with being interrupted.
566 static u64 safe_stack_limit(void)
568 #ifdef CONFIG_PPC_BOOK3E
569 /* Freescale BookE bolts the entire linear mapping */
570 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
571 return linear_map_top;
572 /* Other BookE, we assume the first GB is bolted */
575 /* BookS, the first segment is bolted */
576 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
577 return 1UL << SID_SHIFT_1T;
578 return 1UL << SID_SHIFT;
582 static void __init irqstack_early_init(void)
584 u64 limit = safe_stack_limit();
588 * Interrupt stacks must be in the first segment since we
589 * cannot afford to take SLB misses on them.
591 for_each_possible_cpu(i) {
592 softirq_ctx[i] = (struct thread_info *)
593 __va(memblock_alloc_base(THREAD_SIZE,
594 THREAD_SIZE, limit));
595 hardirq_ctx[i] = (struct thread_info *)
596 __va(memblock_alloc_base(THREAD_SIZE,
597 THREAD_SIZE, limit));
601 #ifdef CONFIG_PPC_BOOK3E
602 static void __init exc_lvl_early_init(void)
607 for_each_possible_cpu(i) {
608 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
609 critirq_ctx[i] = (struct thread_info *)__va(sp);
610 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
612 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
613 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
614 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
616 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
617 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
618 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
621 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
622 patch_exception(0x040, exc_debug_debug_book3e);
625 #define exc_lvl_early_init()
629 * Stack space used when we detect a bad kernel stack pointer, and
630 * early in SMP boots before relocation is enabled. Exclusive emergency
631 * stack for machine checks.
633 static void __init emergency_stack_init(void)
639 * Emergency stacks must be under 256MB, we cannot afford to take
640 * SLB misses on them. The ABI also requires them to be 128-byte
643 * Since we use these as temporary stacks during secondary CPU
644 * bringup, we need to get at them in real mode. This means they
645 * must also be within the RMO region.
647 limit = min(safe_stack_limit(), ppc64_rma_size);
649 for_each_possible_cpu(i) {
650 struct thread_info *ti;
651 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
652 klp_init_thread_info(ti);
653 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
655 #ifdef CONFIG_PPC_BOOK3S_64
656 /* emergency stack for machine check exception handling. */
657 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
658 klp_init_thread_info(ti);
659 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
665 * Called into from start_kernel this initializes memblock, which is used
666 * to manage page allocation until mem_init is called.
668 void __init setup_arch(char **cmdline_p)
670 *cmdline_p = boot_command_line;
673 * Set cache line size based on type of cpu as a default.
674 * Systems with OF can look in the properties on the cpu node(s)
675 * for a possibly more accurate value.
677 dcache_bsize = ppc64_caches.dline_size;
678 icache_bsize = ppc64_caches.iline_size;
681 /* Reserve large chunks of memory for use by CMA for KVM */
685 * Reserve any gigantic pages requested on the command line.
686 * memblock needs to have been initialized by the time this is
687 * called since this will reserve memory.
689 reserve_hugetlb_gpages();
694 klp_init_thread_info(&init_thread_info);
696 init_mm.start_code = (unsigned long)_stext;
697 init_mm.end_code = (unsigned long) _etext;
698 init_mm.end_data = (unsigned long) _edata;
699 init_mm.brk = klimit;
700 #ifdef CONFIG_PPC_64K_PAGES
701 init_mm.context.pte_frag = NULL;
703 #ifdef CONFIG_SPAPR_TCE_IOMMU
704 mm_iommu_init(&init_mm.context);
706 irqstack_early_init();
707 exc_lvl_early_init();
708 emergency_stack_init();
712 #ifdef CONFIG_DUMMY_CONSOLE
713 conswitchp = &dummy_con;
715 if (ppc_md.setup_arch)
720 /* Initialize the MMU context management stuff */
723 /* Interrupt code needs to be 64K-aligned */
724 if ((unsigned long)_stext & 0xffff)
725 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
726 (unsigned long)_stext);
730 #define PCPU_DYN_SIZE ()
732 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
734 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
735 __pa(MAX_DMA_ADDRESS));
738 static void __init pcpu_fc_free(void *ptr, size_t size)
740 free_bootmem(__pa(ptr), size);
743 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
745 if (cpu_to_node(from) == cpu_to_node(to))
746 return LOCAL_DISTANCE;
748 return REMOTE_DISTANCE;
751 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
752 EXPORT_SYMBOL(__per_cpu_offset);
754 void __init setup_per_cpu_areas(void)
756 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
763 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
764 * to group units. For larger mappings, use 1M atom which
765 * should be large enough to contain a number of units.
767 if (mmu_linear_psize == MMU_PAGE_4K)
768 atom_size = PAGE_SIZE;
772 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
773 pcpu_fc_alloc, pcpu_fc_free);
775 panic("cannot initialize percpu area (err=%d)", rc);
777 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
778 for_each_possible_cpu(cpu) {
779 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
780 paca[cpu].data_offset = __per_cpu_offset[cpu];
785 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
786 unsigned long memory_block_size_bytes(void)
788 if (ppc_md.memory_block_size)
789 return ppc_md.memory_block_size();
791 return MIN_MEMORY_BLOCK_SIZE;
795 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
796 struct ppc_pci_io ppc_pci_io;
797 EXPORT_SYMBOL(ppc_pci_io);
800 #ifdef CONFIG_HARDLOCKUP_DETECTOR
801 u64 hw_nmi_get_sample_period(int watchdog_thresh)
803 return ppc_proc_freq * watchdog_thresh;
807 * The hardlockup detector breaks PMU event based branches and is likely
808 * to get false positives in KVM guests, so disable it by default.
810 static int __init disable_hardlockup_detector(void)
812 hardlockup_detector_disable();
816 early_initcall(disable_hardlockup_detector);