arm64: mm: Remove split_p*d() functions
[cascardo/linux.git] / arch / arm64 / mm / mmu.c
index 0f85a46..9d2d7cf 100644 (file)
@@ -77,7 +77,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
        void *ptr;
 
        phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-       BUG_ON(!phys);
 
        /*
         * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
@@ -97,24 +96,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
        return phys;
 }
 
-/*
- * remap a PMD into pages
- */
-static void split_pmd(pmd_t *pmd, pte_t *pte)
-{
-       unsigned long pfn = pmd_pfn(*pmd);
-       int i = 0;
-
-       do {
-               /*
-                * Need to have the least restrictive permissions available
-                * permissions will be fixed up later
-                */
-               set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-               pfn++;
-       } while (pte++, i++, i < PTRS_PER_PTE);
-}
-
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  pgprot_t prot,
@@ -122,15 +103,13 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 {
        pte_t *pte;
 
-       if (pmd_none(*pmd) || pmd_sect(*pmd)) {
+       BUG_ON(pmd_sect(*pmd));
+       if (pmd_none(*pmd)) {
                phys_addr_t pte_phys;
                BUG_ON(!pgtable_alloc);
                pte_phys = pgtable_alloc();
                pte = pte_set_fixmap(pte_phys);
-               if (pmd_sect(*pmd))
-                       split_pmd(pmd, pte);
                __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
-               flush_tlb_all();
                pte_clear_fixmap();
        }
        BUG_ON(pmd_bad(*pmd));
@@ -144,41 +123,10 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
        pte_clear_fixmap();
 }
 
-static void split_pud(pud_t *old_pud, pmd_t *pmd)
-{
-       unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
-       pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
-       int i = 0;
-
-       do {
-               set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
-               addr += PMD_SIZE;
-       } while (pmd++, i++, i < PTRS_PER_PMD);
-}
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-
-       /*
-        * If debug_page_alloc is enabled we must map the linear map
-        * using pages. However, other mappings created by
-        * create_mapping_noalloc must use sections in some cases. Allow
-        * sections to be used in those cases, where no pgtable_alloc
-        * function is provided.
-        */
-       return !pgtable_alloc || !debug_pagealloc_enabled();
-}
-#else
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-       return true;
-}
-#endif
-
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 phys_addr_t (*pgtable_alloc)(void))
+                                 phys_addr_t (*pgtable_alloc)(void),
+                                 bool allow_block_mappings)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -186,20 +134,13 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
        /*
         * Check for initial section mappings in the pgd/pud and remove them.
         */
-       if (pud_none(*pud) || pud_sect(*pud)) {
+       BUG_ON(pud_sect(*pud));
+       if (pud_none(*pud)) {
                phys_addr_t pmd_phys;
                BUG_ON(!pgtable_alloc);
                pmd_phys = pgtable_alloc();
                pmd = pmd_set_fixmap(pmd_phys);
-               if (pud_sect(*pud)) {
-                       /*
-                        * need to have the 1G of mappings continue to be
-                        * present
-                        */
-                       split_pud(pud, pmd);
-               }
                __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
-               flush_tlb_all();
                pmd_clear_fixmap();
        }
        BUG_ON(pud_bad(*pud));
@@ -209,7 +150,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
                if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-                     block_mappings_allowed(pgtable_alloc)) {
+                     allow_block_mappings) {
                        pmd_t old_pmd =*pmd;
                        pmd_set_huge(pmd, phys, prot);
                        /*
@@ -248,7 +189,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
 
 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 phys_addr_t (*pgtable_alloc)(void))
+                                 phys_addr_t (*pgtable_alloc)(void),
+                                 bool allow_block_mappings)
 {
        pud_t *pud;
        unsigned long next;
@@ -268,8 +210,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys) &&
-                   block_mappings_allowed(pgtable_alloc)) {
+               if (use_1G_block(addr, next, phys) && allow_block_mappings) {
                        pud_t old_pud = *pud;
                        pud_set_huge(pud, phys, prot);
 
@@ -290,7 +231,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                        }
                } else {
                        alloc_init_pmd(pud, addr, next, phys, prot,
-                                      pgtable_alloc);
+                                      pgtable_alloc, allow_block_mappings);
                }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
@@ -304,7 +245,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
  */
 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
                                    phys_addr_t size, pgprot_t prot,
-                                   phys_addr_t (*pgtable_alloc)(void))
+                                   phys_addr_t (*pgtable_alloc)(void),
+                                   bool allow_block_mappings)
 {
        unsigned long addr, length, end, next;
 
@@ -322,7 +264,8 @@ static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
+               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
+                              allow_block_mappings);
                phys += next - addr;
        } while (pgd++, addr = next, addr != end);
 }
@@ -340,9 +283,11 @@ static phys_addr_t late_pgtable_alloc(void)
 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
                                 unsigned long virt, phys_addr_t size,
                                 pgprot_t prot,
-                                phys_addr_t (*alloc)(void))
+                                phys_addr_t (*alloc)(void),
+                                bool allow_block_mappings)
 {
-       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
+       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc,
+                allow_block_mappings);
 }
 
 /*
@@ -358,16 +303,15 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
                        &phys, virt);
                return;
        }
-       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-                            NULL);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
-                              pgprot_t prot)
+                              pgprot_t prot, bool allow_block_mappings)
 {
        __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
-                            late_pgtable_alloc);
+                            late_pgtable_alloc, allow_block_mappings);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -380,51 +324,54 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
        }
 
        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-                            late_pgtable_alloc);
+                            late_pgtable_alloc, !debug_pagealloc_enabled());
 }
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
        unsigned long kernel_start = __pa(_text);
-       unsigned long kernel_end = __pa(_etext);
+       unsigned long kernel_end = __pa(__init_begin);
 
        /*
         * Take care not to create a writable alias for the
         * read-only text and rodata sections of the kernel image.
         */
 
-       /* No overlap with the kernel text */
+       /* No overlap with the kernel text/rodata */
        if (end < kernel_start || start >= kernel_end) {
                __create_pgd_mapping(pgd, start, __phys_to_virt(start),
                                     end - start, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
                return;
        }
 
        /*
-        * This block overlaps the kernel text mapping.
+        * This block overlaps the kernel text/rodata mappings.
         * Map the portion(s) which don't overlap.
         */
        if (start < kernel_start)
                __create_pgd_mapping(pgd, start,
                                     __phys_to_virt(start),
                                     kernel_start - start, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
        if (kernel_end < end)
                __create_pgd_mapping(pgd, kernel_end,
                                     __phys_to_virt(kernel_end),
                                     end - kernel_end, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
 
        /*
-        * Map the linear alias of the [_text, _etext) interval as
+        * Map the linear alias of the [_text, __init_begin) interval as
         * read-only/non-executable. This makes the contents of the
         * region accessible to subsystems such as hibernate, but
         * protects it from inadvertent modification or execution.
         */
        __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
                             kernel_end - kernel_start, PAGE_KERNEL_RO,
-                            early_pgtable_alloc);
+                            early_pgtable_alloc, !debug_pagealloc_enabled());
 }
 
 static void __init map_mem(pgd_t *pgd)
@@ -449,14 +396,14 @@ void mark_rodata_ro(void)
 {
        unsigned long section_size;
 
-       section_size = (unsigned long)__start_rodata - (unsigned long)_text;
+       section_size = (unsigned long)_etext - (unsigned long)_text;
        create_mapping_late(__pa(_text), (unsigned long)_text,
                            section_size, PAGE_KERNEL_ROX);
        /*
-        * mark .rodata as read only. Use _etext rather than __end_rodata to
-        * cover NOTES and EXCEPTION_TABLE.
+        * mark .rodata as read only. Use __init_begin rather than __end_rodata
+        * to cover NOTES and EXCEPTION_TABLE.
         */
-       section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
+       section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
        create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
                            section_size, PAGE_KERNEL_RO);
 }
@@ -481,7 +428,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
        BUG_ON(!PAGE_ALIGNED(size));
 
        __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
-                            early_pgtable_alloc);
+                            early_pgtable_alloc, !debug_pagealloc_enabled());
 
        vma->addr       = va_start;
        vma->phys_addr  = pa_start;
@@ -499,8 +446,8 @@ static void __init map_kernel(pgd_t *pgd)
 {
        static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
 
-       map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
-       map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
+       map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+       map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
        map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
                           &vmlinux_init);
        map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);