a530f4f15c0e90b3b447438d51cafd8f81e9b767
[cascardo/linux.git] / arch / arm64 / mm / mmu.c
1 /*
2  * Based on arch/arm/mm/mmu.c
3  *
4  * Copyright (C) 1995-2005 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
28 #include <linux/fs.h>
29 #include <linux/io.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
32
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
40 #include <asm/tlb.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
43
44 #include "mm.h"
45
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
48 /*
49  * Empty_zero_page is a special page that is used for zero-initialized data
50  * and COW.
51  */
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
54
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56                               unsigned long size, pgprot_t vma_prot)
57 {
58         if (!pfn_valid(pfn))
59                 return pgprot_noncached(vma_prot);
60         else if (file->f_flags & O_SYNC)
61                 return pgprot_writecombine(vma_prot);
62         return vma_prot;
63 }
64 EXPORT_SYMBOL(phys_mem_access_prot);
65
66 static phys_addr_t __init early_pgtable_alloc(void)
67 {
68         phys_addr_t phys;
69         void *ptr;
70
71         phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72         BUG_ON(!phys);
73
74         /*
75          * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
76          * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
77          * any level of table.
78          */
79         ptr = pte_set_fixmap(phys);
80
81         memset(ptr, 0, PAGE_SIZE);
82
83         /*
84          * Implicit barriers also ensure the zeroed page is visible to the page
85          * table walker
86          */
87         pte_clear_fixmap();
88
89         return phys;
90 }
91
92 /*
93  * remap a PMD into pages
94  */
95 static void split_pmd(pmd_t *pmd, pte_t *pte)
96 {
97         unsigned long pfn = pmd_pfn(*pmd);
98         int i = 0;
99
100         do {
101                 /*
102                  * Need to have the least restrictive permissions available
103                  * permissions will be fixed up later
104                  */
105                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
106                 pfn++;
107         } while (pte++, i++, i < PTRS_PER_PTE);
108 }
109
110 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
111                                   unsigned long end, unsigned long pfn,
112                                   pgprot_t prot,
113                                   phys_addr_t (*pgtable_alloc)(void))
114 {
115         pte_t *pte;
116
117         if (pmd_none(*pmd) || pmd_sect(*pmd)) {
118                 phys_addr_t pte_phys = pgtable_alloc();
119                 pte = pte_set_fixmap(pte_phys);
120                 if (pmd_sect(*pmd))
121                         split_pmd(pmd, pte);
122                 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
123                 flush_tlb_all();
124                 pte_clear_fixmap();
125         }
126         BUG_ON(pmd_bad(*pmd));
127
128         pte = pte_set_fixmap_offset(pmd, addr);
129         do {
130                 set_pte(pte, pfn_pte(pfn, prot));
131                 pfn++;
132         } while (pte++, addr += PAGE_SIZE, addr != end);
133
134         pte_clear_fixmap();
135 }
136
137 static void split_pud(pud_t *old_pud, pmd_t *pmd)
138 {
139         unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
140         pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
141         int i = 0;
142
143         do {
144                 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
145                 addr += PMD_SIZE;
146         } while (pmd++, i++, i < PTRS_PER_PMD);
147 }
148
149 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
150                                   phys_addr_t phys, pgprot_t prot,
151                                   phys_addr_t (*pgtable_alloc)(void))
152 {
153         pmd_t *pmd;
154         unsigned long next;
155
156         /*
157          * Check for initial section mappings in the pgd/pud and remove them.
158          */
159         if (pud_none(*pud) || pud_sect(*pud)) {
160                 phys_addr_t pmd_phys = pgtable_alloc();
161                 pmd = pmd_set_fixmap(pmd_phys);
162                 if (pud_sect(*pud)) {
163                         /*
164                          * need to have the 1G of mappings continue to be
165                          * present
166                          */
167                         split_pud(pud, pmd);
168                 }
169                 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
170                 flush_tlb_all();
171                 pmd_clear_fixmap();
172         }
173         BUG_ON(pud_bad(*pud));
174
175         pmd = pmd_set_fixmap_offset(pud, addr);
176         do {
177                 next = pmd_addr_end(addr, end);
178                 /* try section mapping first */
179                 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
180                         pmd_t old_pmd =*pmd;
181                         set_pmd(pmd, __pmd(phys |
182                                            pgprot_val(mk_sect_prot(prot))));
183                         /*
184                          * Check for previous table entries created during
185                          * boot (__create_page_tables) and flush them.
186                          */
187                         if (!pmd_none(old_pmd)) {
188                                 flush_tlb_all();
189                                 if (pmd_table(old_pmd)) {
190                                         phys_addr_t table = pmd_page_paddr(old_pmd);
191                                         if (!WARN_ON_ONCE(slab_is_available()))
192                                                 memblock_free(table, PAGE_SIZE);
193                                 }
194                         }
195                 } else {
196                         alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
197                                        prot, pgtable_alloc);
198                 }
199                 phys += next - addr;
200         } while (pmd++, addr = next, addr != end);
201
202         pmd_clear_fixmap();
203 }
204
205 static inline bool use_1G_block(unsigned long addr, unsigned long next,
206                         unsigned long phys)
207 {
208         if (PAGE_SHIFT != 12)
209                 return false;
210
211         if (((addr | next | phys) & ~PUD_MASK) != 0)
212                 return false;
213
214         return true;
215 }
216
217 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
218                                   phys_addr_t phys, pgprot_t prot,
219                                   phys_addr_t (*pgtable_alloc)(void))
220 {
221         pud_t *pud;
222         unsigned long next;
223
224         if (pgd_none(*pgd)) {
225                 phys_addr_t pud_phys = pgtable_alloc();
226                 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
227         }
228         BUG_ON(pgd_bad(*pgd));
229
230         pud = pud_set_fixmap_offset(pgd, addr);
231         do {
232                 next = pud_addr_end(addr, end);
233
234                 /*
235                  * For 4K granule only, attempt to put down a 1GB block
236                  */
237                 if (use_1G_block(addr, next, phys)) {
238                         pud_t old_pud = *pud;
239                         set_pud(pud, __pud(phys |
240                                            pgprot_val(mk_sect_prot(prot))));
241
242                         /*
243                          * If we have an old value for a pud, it will
244                          * be pointing to a pmd table that we no longer
245                          * need (from swapper_pg_dir).
246                          *
247                          * Look up the old pmd table and free it.
248                          */
249                         if (!pud_none(old_pud)) {
250                                 flush_tlb_all();
251                                 if (pud_table(old_pud)) {
252                                         phys_addr_t table = pud_page_paddr(old_pud);
253                                         if (!WARN_ON_ONCE(slab_is_available()))
254                                                 memblock_free(table, PAGE_SIZE);
255                                 }
256                         }
257                 } else {
258                         alloc_init_pmd(pud, addr, next, phys, prot,
259                                        pgtable_alloc);
260                 }
261                 phys += next - addr;
262         } while (pud++, addr = next, addr != end);
263
264         pud_clear_fixmap();
265 }
266
267 /*
268  * Create the page directory entries and any necessary page tables for the
269  * mapping specified by 'md'.
270  */
271 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
272                                     phys_addr_t size, pgprot_t prot,
273                                     phys_addr_t (*pgtable_alloc)(void))
274 {
275         unsigned long addr, length, end, next;
276
277         /*
278          * If the virtual and physical address don't have the same offset
279          * within a page, we cannot map the region as the caller expects.
280          */
281         if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
282                 return;
283
284         phys &= PAGE_MASK;
285         addr = virt & PAGE_MASK;
286         length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
287
288         end = addr + length;
289         do {
290                 next = pgd_addr_end(addr, end);
291                 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
292                 phys += next - addr;
293         } while (pgd++, addr = next, addr != end);
294 }
295
296 static phys_addr_t late_pgtable_alloc(void)
297 {
298         void *ptr = (void *)__get_free_page(PGALLOC_GFP);
299         BUG_ON(!ptr);
300
301         /* Ensure the zeroed page is visible to the page table walker */
302         dsb(ishst);
303         return __pa(ptr);
304 }
305
306 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
307                                  unsigned long virt, phys_addr_t size,
308                                  pgprot_t prot,
309                                  phys_addr_t (*alloc)(void))
310 {
311         init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
312 }
313
314 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
315                                   phys_addr_t size, pgprot_t prot)
316 {
317         if (virt < VMALLOC_START) {
318                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
319                         &phys, virt);
320                 return;
321         }
322         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
323                              early_pgtable_alloc);
324 }
325
326 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
327                                unsigned long virt, phys_addr_t size,
328                                pgprot_t prot)
329 {
330         __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
331                              late_pgtable_alloc);
332 }
333
334 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
335                                   phys_addr_t size, pgprot_t prot)
336 {
337         if (virt < VMALLOC_START) {
338                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
339                         &phys, virt);
340                 return;
341         }
342
343         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
344                              late_pgtable_alloc);
345 }
346
347 #ifdef CONFIG_DEBUG_RODATA
348 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
349 {
350         /*
351          * Set up the executable regions using the existing section mappings
352          * for now. This will get more fine grained later once all memory
353          * is mapped
354          */
355         unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
356         unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
357
358         if (end < kernel_x_start) {
359                 create_mapping(start, __phys_to_virt(start),
360                         end - start, PAGE_KERNEL);
361         } else if (start >= kernel_x_end) {
362                 create_mapping(start, __phys_to_virt(start),
363                         end - start, PAGE_KERNEL);
364         } else {
365                 if (start < kernel_x_start)
366                         create_mapping(start, __phys_to_virt(start),
367                                 kernel_x_start - start,
368                                 PAGE_KERNEL);
369                 create_mapping(kernel_x_start,
370                                 __phys_to_virt(kernel_x_start),
371                                 kernel_x_end - kernel_x_start,
372                                 PAGE_KERNEL_EXEC);
373                 if (kernel_x_end < end)
374                         create_mapping(kernel_x_end,
375                                 __phys_to_virt(kernel_x_end),
376                                 end - kernel_x_end,
377                                 PAGE_KERNEL);
378         }
379
380 }
381 #else
382 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
383 {
384         create_mapping(start, __phys_to_virt(start), end - start,
385                         PAGE_KERNEL_EXEC);
386 }
387 #endif
388
389 static void __init map_mem(void)
390 {
391         struct memblock_region *reg;
392
393         /* map all the memory banks */
394         for_each_memblock(memory, reg) {
395                 phys_addr_t start = reg->base;
396                 phys_addr_t end = start + reg->size;
397
398                 if (start >= end)
399                         break;
400                 if (memblock_is_nomap(reg))
401                         continue;
402
403                 __map_memblock(start, end);
404         }
405 }
406
407 static void __init fixup_executable(void)
408 {
409 #ifdef CONFIG_DEBUG_RODATA
410         /* now that we are actually fully mapped, make the start/end more fine grained */
411         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
412                 unsigned long aligned_start = round_down(__pa(_stext),
413                                                          SWAPPER_BLOCK_SIZE);
414
415                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
416                                 __pa(_stext) - aligned_start,
417                                 PAGE_KERNEL);
418         }
419
420         if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
421                 unsigned long aligned_end = round_up(__pa(__init_end),
422                                                           SWAPPER_BLOCK_SIZE);
423                 create_mapping(__pa(__init_end), (unsigned long)__init_end,
424                                 aligned_end - __pa(__init_end),
425                                 PAGE_KERNEL);
426         }
427 #endif
428 }
429
430 #ifdef CONFIG_DEBUG_RODATA
431 void mark_rodata_ro(void)
432 {
433         create_mapping_late(__pa(_stext), (unsigned long)_stext,
434                                 (unsigned long)_etext - (unsigned long)_stext,
435                                 PAGE_KERNEL_ROX);
436
437 }
438 #endif
439
440 void fixup_init(void)
441 {
442         create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
443                         (unsigned long)__init_end - (unsigned long)__init_begin,
444                         PAGE_KERNEL);
445 }
446
447 /*
448  * paging_init() sets up the page tables, initialises the zone memory
449  * maps and sets up the zero page.
450  */
451 void __init paging_init(void)
452 {
453         map_mem();
454         fixup_executable();
455
456         bootmem_init();
457 }
458
459 /*
460  * Check whether a kernel address is valid (derived from arch/x86/).
461  */
462 int kern_addr_valid(unsigned long addr)
463 {
464         pgd_t *pgd;
465         pud_t *pud;
466         pmd_t *pmd;
467         pte_t *pte;
468
469         if ((((long)addr) >> VA_BITS) != -1UL)
470                 return 0;
471
472         pgd = pgd_offset_k(addr);
473         if (pgd_none(*pgd))
474                 return 0;
475
476         pud = pud_offset(pgd, addr);
477         if (pud_none(*pud))
478                 return 0;
479
480         if (pud_sect(*pud))
481                 return pfn_valid(pud_pfn(*pud));
482
483         pmd = pmd_offset(pud, addr);
484         if (pmd_none(*pmd))
485                 return 0;
486
487         if (pmd_sect(*pmd))
488                 return pfn_valid(pmd_pfn(*pmd));
489
490         pte = pte_offset_kernel(pmd, addr);
491         if (pte_none(*pte))
492                 return 0;
493
494         return pfn_valid(pte_pfn(*pte));
495 }
496 #ifdef CONFIG_SPARSEMEM_VMEMMAP
497 #if !ARM64_SWAPPER_USES_SECTION_MAPS
498 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
499 {
500         return vmemmap_populate_basepages(start, end, node);
501 }
502 #else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
503 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
504 {
505         unsigned long addr = start;
506         unsigned long next;
507         pgd_t *pgd;
508         pud_t *pud;
509         pmd_t *pmd;
510
511         do {
512                 next = pmd_addr_end(addr, end);
513
514                 pgd = vmemmap_pgd_populate(addr, node);
515                 if (!pgd)
516                         return -ENOMEM;
517
518                 pud = vmemmap_pud_populate(pgd, addr, node);
519                 if (!pud)
520                         return -ENOMEM;
521
522                 pmd = pmd_offset(pud, addr);
523                 if (pmd_none(*pmd)) {
524                         void *p = NULL;
525
526                         p = vmemmap_alloc_block_buf(PMD_SIZE, node);
527                         if (!p)
528                                 return -ENOMEM;
529
530                         set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
531                 } else
532                         vmemmap_verify((pte_t *)pmd, node, addr, next);
533         } while (addr = next, addr != end);
534
535         return 0;
536 }
537 #endif  /* CONFIG_ARM64_64K_PAGES */
538 void vmemmap_free(unsigned long start, unsigned long end)
539 {
540 }
541 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
542
543 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
544 #if CONFIG_PGTABLE_LEVELS > 2
545 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
546 #endif
547 #if CONFIG_PGTABLE_LEVELS > 3
548 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
549 #endif
550
551 static inline pud_t * fixmap_pud(unsigned long addr)
552 {
553         pgd_t *pgd = pgd_offset_k(addr);
554
555         BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
556
557         return pud_offset(pgd, addr);
558 }
559
560 static inline pmd_t * fixmap_pmd(unsigned long addr)
561 {
562         pud_t *pud = fixmap_pud(addr);
563
564         BUG_ON(pud_none(*pud) || pud_bad(*pud));
565
566         return pmd_offset(pud, addr);
567 }
568
569 static inline pte_t * fixmap_pte(unsigned long addr)
570 {
571         pmd_t *pmd = fixmap_pmd(addr);
572
573         BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
574
575         return pte_offset_kernel(pmd, addr);
576 }
577
578 void __init early_fixmap_init(void)
579 {
580         pgd_t *pgd;
581         pud_t *pud;
582         pmd_t *pmd;
583         unsigned long addr = FIXADDR_START;
584
585         pgd = pgd_offset_k(addr);
586         pgd_populate(&init_mm, pgd, bm_pud);
587         pud = pud_offset(pgd, addr);
588         pud_populate(&init_mm, pud, bm_pmd);
589         pmd = pmd_offset(pud, addr);
590         pmd_populate_kernel(&init_mm, pmd, bm_pte);
591
592         /*
593          * The boot-ioremap range spans multiple pmds, for which
594          * we are not preparted:
595          */
596         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
597                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
598
599         if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
600              || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
601                 WARN_ON(1);
602                 pr_warn("pmd %p != %p, %p\n",
603                         pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
604                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
605                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
606                         fix_to_virt(FIX_BTMAP_BEGIN));
607                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
608                         fix_to_virt(FIX_BTMAP_END));
609
610                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
611                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
612         }
613 }
614
615 void __set_fixmap(enum fixed_addresses idx,
616                                phys_addr_t phys, pgprot_t flags)
617 {
618         unsigned long addr = __fix_to_virt(idx);
619         pte_t *pte;
620
621         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
622
623         pte = fixmap_pte(addr);
624
625         if (pgprot_val(flags)) {
626                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
627         } else {
628                 pte_clear(&init_mm, addr, pte);
629                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
630         }
631 }
632
633 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
634 {
635         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
636         pgprot_t prot = PAGE_KERNEL_RO;
637         int size, offset;
638         void *dt_virt;
639
640         /*
641          * Check whether the physical FDT address is set and meets the minimum
642          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
643          * at least 8 bytes so that we can always access the size field of the
644          * FDT header after mapping the first chunk, double check here if that
645          * is indeed the case.
646          */
647         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
648         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
649                 return NULL;
650
651         /*
652          * Make sure that the FDT region can be mapped without the need to
653          * allocate additional translation table pages, so that it is safe
654          * to call create_mapping() this early.
655          *
656          * On 64k pages, the FDT will be mapped using PTEs, so we need to
657          * be in the same PMD as the rest of the fixmap.
658          * On 4k pages, we'll use section mappings for the FDT so we only
659          * have to be in the same PUD.
660          */
661         BUILD_BUG_ON(dt_virt_base % SZ_2M);
662
663         BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
664                      __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
665
666         offset = dt_phys % SWAPPER_BLOCK_SIZE;
667         dt_virt = (void *)dt_virt_base + offset;
668
669         /* map the first chunk so we can read the size from the header */
670         create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
671                        SWAPPER_BLOCK_SIZE, prot);
672
673         if (fdt_check_header(dt_virt) != 0)
674                 return NULL;
675
676         size = fdt_totalsize(dt_virt);
677         if (size > MAX_FDT_SIZE)
678                 return NULL;
679
680         if (offset + size > SWAPPER_BLOCK_SIZE)
681                 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
682                                round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
683
684         memblock_reserve(dt_phys, size);
685
686         return dt_virt;
687 }