2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
49 * Empty_zero_page is a special page that is used for zero-initialized data
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56 unsigned long size, pgprot_t vma_prot)
59 return pgprot_noncached(vma_prot);
60 else if (file->f_flags & O_SYNC)
61 return pgprot_writecombine(vma_prot);
64 EXPORT_SYMBOL(phys_mem_access_prot);
66 static phys_addr_t __init early_pgtable_alloc(void)
71 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
75 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
76 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
79 ptr = pte_set_fixmap(phys);
81 memset(ptr, 0, PAGE_SIZE);
84 * Implicit barriers also ensure the zeroed page is visible to the page
93 * remap a PMD into pages
95 static void split_pmd(pmd_t *pmd, pte_t *pte)
97 unsigned long pfn = pmd_pfn(*pmd);
102 * Need to have the least restrictive permissions available
103 * permissions will be fixed up later
105 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
107 } while (pte++, i++, i < PTRS_PER_PTE);
110 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
111 unsigned long end, unsigned long pfn,
113 phys_addr_t (*pgtable_alloc)(void))
117 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
118 phys_addr_t pte_phys = pgtable_alloc();
119 pte = pte_set_fixmap(pte_phys);
122 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
126 BUG_ON(pmd_bad(*pmd));
128 pte = pte_set_fixmap_offset(pmd, addr);
130 set_pte(pte, pfn_pte(pfn, prot));
132 } while (pte++, addr += PAGE_SIZE, addr != end);
137 static void split_pud(pud_t *old_pud, pmd_t *pmd)
139 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
140 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
144 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
146 } while (pmd++, i++, i < PTRS_PER_PMD);
149 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
150 phys_addr_t phys, pgprot_t prot,
151 phys_addr_t (*pgtable_alloc)(void))
157 * Check for initial section mappings in the pgd/pud and remove them.
159 if (pud_none(*pud) || pud_sect(*pud)) {
160 phys_addr_t pmd_phys = pgtable_alloc();
161 pmd = pmd_set_fixmap(pmd_phys);
162 if (pud_sect(*pud)) {
164 * need to have the 1G of mappings continue to be
169 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
173 BUG_ON(pud_bad(*pud));
175 pmd = pmd_set_fixmap_offset(pud, addr);
177 next = pmd_addr_end(addr, end);
178 /* try section mapping first */
179 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
181 set_pmd(pmd, __pmd(phys |
182 pgprot_val(mk_sect_prot(prot))));
184 * Check for previous table entries created during
185 * boot (__create_page_tables) and flush them.
187 if (!pmd_none(old_pmd)) {
189 if (pmd_table(old_pmd)) {
190 phys_addr_t table = pmd_page_paddr(old_pmd);
191 if (!WARN_ON_ONCE(slab_is_available()))
192 memblock_free(table, PAGE_SIZE);
196 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
197 prot, pgtable_alloc);
200 } while (pmd++, addr = next, addr != end);
205 static inline bool use_1G_block(unsigned long addr, unsigned long next,
208 if (PAGE_SHIFT != 12)
211 if (((addr | next | phys) & ~PUD_MASK) != 0)
217 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
218 phys_addr_t phys, pgprot_t prot,
219 phys_addr_t (*pgtable_alloc)(void))
224 if (pgd_none(*pgd)) {
225 phys_addr_t pud_phys = pgtable_alloc();
226 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
228 BUG_ON(pgd_bad(*pgd));
230 pud = pud_set_fixmap_offset(pgd, addr);
232 next = pud_addr_end(addr, end);
235 * For 4K granule only, attempt to put down a 1GB block
237 if (use_1G_block(addr, next, phys)) {
238 pud_t old_pud = *pud;
239 set_pud(pud, __pud(phys |
240 pgprot_val(mk_sect_prot(prot))));
243 * If we have an old value for a pud, it will
244 * be pointing to a pmd table that we no longer
245 * need (from swapper_pg_dir).
247 * Look up the old pmd table and free it.
249 if (!pud_none(old_pud)) {
251 if (pud_table(old_pud)) {
252 phys_addr_t table = pud_page_paddr(old_pud);
253 if (!WARN_ON_ONCE(slab_is_available()))
254 memblock_free(table, PAGE_SIZE);
258 alloc_init_pmd(pud, addr, next, phys, prot,
262 } while (pud++, addr = next, addr != end);
268 * Create the page directory entries and any necessary page tables for the
269 * mapping specified by 'md'.
271 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
272 phys_addr_t size, pgprot_t prot,
273 phys_addr_t (*pgtable_alloc)(void))
275 unsigned long addr, length, end, next;
278 * If the virtual and physical address don't have the same offset
279 * within a page, we cannot map the region as the caller expects.
281 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
285 addr = virt & PAGE_MASK;
286 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
290 next = pgd_addr_end(addr, end);
291 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
293 } while (pgd++, addr = next, addr != end);
296 static phys_addr_t late_pgtable_alloc(void)
298 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
301 /* Ensure the zeroed page is visible to the page table walker */
306 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
307 unsigned long virt, phys_addr_t size,
309 phys_addr_t (*alloc)(void))
311 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
314 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
315 phys_addr_t size, pgprot_t prot)
317 if (virt < VMALLOC_START) {
318 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
322 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
323 early_pgtable_alloc);
326 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
327 unsigned long virt, phys_addr_t size,
330 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
334 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
335 phys_addr_t size, pgprot_t prot)
337 if (virt < VMALLOC_START) {
338 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
343 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
347 #ifdef CONFIG_DEBUG_RODATA
348 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
351 * Set up the executable regions using the existing section mappings
352 * for now. This will get more fine grained later once all memory
355 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
356 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
358 if (end < kernel_x_start) {
359 create_mapping(start, __phys_to_virt(start),
360 end - start, PAGE_KERNEL);
361 } else if (start >= kernel_x_end) {
362 create_mapping(start, __phys_to_virt(start),
363 end - start, PAGE_KERNEL);
365 if (start < kernel_x_start)
366 create_mapping(start, __phys_to_virt(start),
367 kernel_x_start - start,
369 create_mapping(kernel_x_start,
370 __phys_to_virt(kernel_x_start),
371 kernel_x_end - kernel_x_start,
373 if (kernel_x_end < end)
374 create_mapping(kernel_x_end,
375 __phys_to_virt(kernel_x_end),
382 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
384 create_mapping(start, __phys_to_virt(start), end - start,
389 static void __init map_mem(void)
391 struct memblock_region *reg;
393 /* map all the memory banks */
394 for_each_memblock(memory, reg) {
395 phys_addr_t start = reg->base;
396 phys_addr_t end = start + reg->size;
400 if (memblock_is_nomap(reg))
403 __map_memblock(start, end);
407 static void __init fixup_executable(void)
409 #ifdef CONFIG_DEBUG_RODATA
410 /* now that we are actually fully mapped, make the start/end more fine grained */
411 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
412 unsigned long aligned_start = round_down(__pa(_stext),
415 create_mapping(aligned_start, __phys_to_virt(aligned_start),
416 __pa(_stext) - aligned_start,
420 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
421 unsigned long aligned_end = round_up(__pa(__init_end),
423 create_mapping(__pa(__init_end), (unsigned long)__init_end,
424 aligned_end - __pa(__init_end),
430 #ifdef CONFIG_DEBUG_RODATA
431 void mark_rodata_ro(void)
433 create_mapping_late(__pa(_stext), (unsigned long)_stext,
434 (unsigned long)_etext - (unsigned long)_stext,
440 void fixup_init(void)
442 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
443 (unsigned long)__init_end - (unsigned long)__init_begin,
448 * paging_init() sets up the page tables, initialises the zone memory
449 * maps and sets up the zero page.
451 void __init paging_init(void)
460 * Check whether a kernel address is valid (derived from arch/x86/).
462 int kern_addr_valid(unsigned long addr)
469 if ((((long)addr) >> VA_BITS) != -1UL)
472 pgd = pgd_offset_k(addr);
476 pud = pud_offset(pgd, addr);
481 return pfn_valid(pud_pfn(*pud));
483 pmd = pmd_offset(pud, addr);
488 return pfn_valid(pmd_pfn(*pmd));
490 pte = pte_offset_kernel(pmd, addr);
494 return pfn_valid(pte_pfn(*pte));
496 #ifdef CONFIG_SPARSEMEM_VMEMMAP
497 #if !ARM64_SWAPPER_USES_SECTION_MAPS
498 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
500 return vmemmap_populate_basepages(start, end, node);
502 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
503 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
505 unsigned long addr = start;
512 next = pmd_addr_end(addr, end);
514 pgd = vmemmap_pgd_populate(addr, node);
518 pud = vmemmap_pud_populate(pgd, addr, node);
522 pmd = pmd_offset(pud, addr);
523 if (pmd_none(*pmd)) {
526 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
530 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
532 vmemmap_verify((pte_t *)pmd, node, addr, next);
533 } while (addr = next, addr != end);
537 #endif /* CONFIG_ARM64_64K_PAGES */
538 void vmemmap_free(unsigned long start, unsigned long end)
541 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
543 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
544 #if CONFIG_PGTABLE_LEVELS > 2
545 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
547 #if CONFIG_PGTABLE_LEVELS > 3
548 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
551 static inline pud_t * fixmap_pud(unsigned long addr)
553 pgd_t *pgd = pgd_offset_k(addr);
555 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
557 return pud_offset(pgd, addr);
560 static inline pmd_t * fixmap_pmd(unsigned long addr)
562 pud_t *pud = fixmap_pud(addr);
564 BUG_ON(pud_none(*pud) || pud_bad(*pud));
566 return pmd_offset(pud, addr);
569 static inline pte_t * fixmap_pte(unsigned long addr)
571 pmd_t *pmd = fixmap_pmd(addr);
573 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
575 return pte_offset_kernel(pmd, addr);
578 void __init early_fixmap_init(void)
583 unsigned long addr = FIXADDR_START;
585 pgd = pgd_offset_k(addr);
586 pgd_populate(&init_mm, pgd, bm_pud);
587 pud = pud_offset(pgd, addr);
588 pud_populate(&init_mm, pud, bm_pmd);
589 pmd = pmd_offset(pud, addr);
590 pmd_populate_kernel(&init_mm, pmd, bm_pte);
593 * The boot-ioremap range spans multiple pmds, for which
594 * we are not preparted:
596 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
597 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
599 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
600 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
602 pr_warn("pmd %p != %p, %p\n",
603 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
604 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
605 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
606 fix_to_virt(FIX_BTMAP_BEGIN));
607 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
608 fix_to_virt(FIX_BTMAP_END));
610 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
611 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
615 void __set_fixmap(enum fixed_addresses idx,
616 phys_addr_t phys, pgprot_t flags)
618 unsigned long addr = __fix_to_virt(idx);
621 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
623 pte = fixmap_pte(addr);
625 if (pgprot_val(flags)) {
626 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
628 pte_clear(&init_mm, addr, pte);
629 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
633 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
635 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
636 pgprot_t prot = PAGE_KERNEL_RO;
641 * Check whether the physical FDT address is set and meets the minimum
642 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
643 * at least 8 bytes so that we can always access the size field of the
644 * FDT header after mapping the first chunk, double check here if that
645 * is indeed the case.
647 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
648 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
652 * Make sure that the FDT region can be mapped without the need to
653 * allocate additional translation table pages, so that it is safe
654 * to call create_mapping() this early.
656 * On 64k pages, the FDT will be mapped using PTEs, so we need to
657 * be in the same PMD as the rest of the fixmap.
658 * On 4k pages, we'll use section mappings for the FDT so we only
659 * have to be in the same PUD.
661 BUILD_BUG_ON(dt_virt_base % SZ_2M);
663 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
664 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
666 offset = dt_phys % SWAPPER_BLOCK_SIZE;
667 dt_virt = (void *)dt_virt_base + offset;
669 /* map the first chunk so we can read the size from the header */
670 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
671 SWAPPER_BLOCK_SIZE, prot);
673 if (fdt_check_header(dt_virt) != 0)
676 size = fdt_totalsize(dt_virt);
677 if (size > MAX_FDT_SIZE)
680 if (offset + size > SWAPPER_BLOCK_SIZE)
681 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
682 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
684 memblock_reserve(dt_phys, size);