2 * Extensible Firmware Interface
4 * Based on Extensible Firmware Interface Specification version 2.4
6 * Copyright (C) 2013, 2014 Linaro Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/dmi.h>
15 #include <linux/efi.h>
16 #include <linux/export.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
20 #include <linux/of_fdt.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
24 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
29 struct efi_memory_map memmap;
31 static efi_runtime_services_t *runtime;
33 static u64 efi_system_table;
35 static int uefi_debug __initdata;
36 static int __init uefi_debug_setup(char *str)
42 early_param("uefi_debug", uefi_debug_setup);
44 static int __init is_normal_ram(efi_memory_desc_t *md)
46 if (md->attribute & EFI_MEMORY_WB)
51 static void __init efi_setup_idmap(void)
53 struct memblock_region *r;
54 efi_memory_desc_t *md;
55 u64 paddr, npages, size;
57 for_each_memblock(memory, r)
58 create_id_mapping(r->base, r->size, 0);
60 /* map runtime io spaces */
61 for_each_efi_memory_desc(&memmap, md) {
62 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
64 paddr = md->phys_addr;
65 npages = md->num_pages;
66 memrange_efi_to_native(&paddr, &npages);
67 size = npages << PAGE_SHIFT;
68 create_id_mapping(paddr, size, 1);
72 static int __init uefi_init(void)
75 char vendor[100] = "unknown";
78 efi.systab = early_memremap(efi_system_table,
79 sizeof(efi_system_table_t));
80 if (efi.systab == NULL) {
81 pr_warn("Unable to map EFI system table.\n");
85 set_bit(EFI_BOOT, &efi.flags);
86 set_bit(EFI_64BIT, &efi.flags);
89 * Verify the EFI Table
91 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
92 pr_err("System table signature incorrect\n");
95 if ((efi.systab->hdr.revision >> 16) < 2)
96 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
97 efi.systab->hdr.revision >> 16,
98 efi.systab->hdr.revision & 0xffff);
100 /* Show what we know for posterity */
101 c16 = early_memremap(efi.systab->fw_vendor,
104 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
109 pr_info("EFI v%u.%.02u by %s\n",
110 efi.systab->hdr.revision >> 16,
111 efi.systab->hdr.revision & 0xffff, vendor);
113 retval = efi_config_init(NULL);
115 set_bit(EFI_CONFIG_TABLES, &efi.flags);
117 early_memunmap(c16, sizeof(vendor));
118 early_memunmap(efi.systab, sizeof(efi_system_table_t));
123 static __initdata char memory_type_name[][32] = {
131 {"Conventional Memory"},
133 {"ACPI Reclaim Memory"},
135 {"Memory Mapped I/O"},
141 * Return true for RAM regions we want to permanently reserve.
143 static __init int is_reserve_region(efi_memory_desc_t *md)
145 if (!is_normal_ram(md))
148 if (md->attribute & EFI_MEMORY_RUNTIME)
151 if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
152 md->type == EFI_RESERVED_TYPE)
158 static __init void reserve_regions(void)
160 efi_memory_desc_t *md;
161 u64 paddr, npages, size;
164 pr_info("Processing EFI memory map:\n");
166 for_each_efi_memory_desc(&memmap, md) {
167 paddr = md->phys_addr;
168 npages = md->num_pages;
171 pr_info(" 0x%012llx-0x%012llx [%s]",
172 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
173 memory_type_name[md->type]);
175 memrange_efi_to_native(&paddr, &npages);
176 size = npages << PAGE_SHIFT;
178 if (is_normal_ram(md))
179 early_init_dt_add_memory_arch(paddr, size);
181 if (is_reserve_region(md) ||
182 md->type == EFI_BOOT_SERVICES_CODE ||
183 md->type == EFI_BOOT_SERVICES_DATA) {
184 memblock_reserve(paddr, size);
193 set_bit(EFI_MEMMAP, &efi.flags);
197 static u64 __init free_one_region(u64 start, u64 end)
199 u64 size = end - start;
202 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
204 free_bootmem_late(start, size);
208 static u64 __init free_region(u64 start, u64 end)
210 u64 map_start, map_end, total = 0;
215 map_start = (u64)memmap.phys_map;
216 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
217 map_start &= PAGE_MASK;
219 if (start < map_end && end > map_start) {
220 /* region overlaps UEFI memmap */
221 if (start < map_start)
222 total += free_one_region(start, map_start);
225 total += free_one_region(map_end, end);
227 total += free_one_region(start, end);
232 static void __init free_boot_services(void)
235 u64 keep_end, free_start, free_end;
236 efi_memory_desc_t *md;
239 * If kernel uses larger pages than UEFI, we have to be careful
240 * not to inadvertantly free memory we want to keep if there is
241 * overlap at the kernel page size alignment. We do not want to
242 * free is_reserve_region() memory nor the UEFI memmap itself.
244 * The memory map is sorted, so we keep track of the end of
245 * any previous region we want to keep, remember any region
246 * we want to free and defer freeing it until we encounter
247 * the next region we want to keep. This way, before freeing
248 * it, we can clip it as needed to avoid freeing memory we
249 * want to keep for UEFI.
255 for_each_efi_memory_desc(&memmap, md) {
256 u64 paddr, npages, size;
258 if (is_reserve_region(md)) {
260 * We don't want to free any memory from this region.
263 /* adjust free_end then free region */
264 if (free_end > md->phys_addr)
265 free_end -= PAGE_SIZE;
266 total_freed += free_region(free_start, free_end);
269 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
273 if (md->type != EFI_BOOT_SERVICES_CODE &&
274 md->type != EFI_BOOT_SERVICES_DATA) {
275 /* no need to free this region */
280 * We want to free memory from this region.
282 paddr = md->phys_addr;
283 npages = md->num_pages;
284 memrange_efi_to_native(&paddr, &npages);
285 size = npages << PAGE_SHIFT;
288 if (paddr <= free_end)
289 free_end = paddr + size;
291 total_freed += free_region(free_start, free_end);
293 free_end = paddr + size;
297 free_end = paddr + size;
299 if (free_start < keep_end) {
300 free_start += PAGE_SIZE;
301 if (free_start >= free_end)
306 total_freed += free_region(free_start, free_end);
309 pr_info("Freed 0x%llx bytes of EFI boot services memory",
313 void __init efi_init(void)
315 struct efi_fdt_params params;
317 /* Grab UEFI information placed in FDT by stub */
318 if (!efi_get_fdt_params(¶ms, uefi_debug))
321 efi_system_table = params.system_table;
323 memblock_reserve(params.mmap & PAGE_MASK,
324 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
325 memmap.phys_map = (void *)params.mmap;
326 memmap.map = early_memremap(params.mmap, params.mmap_size);
327 memmap.map_end = memmap.map + params.mmap_size;
328 memmap.desc_size = params.desc_size;
329 memmap.desc_version = params.desc_ver;
337 void __init efi_idmap_init(void)
339 if (!efi_enabled(EFI_BOOT))
342 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
346 static int __init remap_region(efi_memory_desc_t *md, void **new)
348 u64 paddr, vaddr, npages, size;
350 paddr = md->phys_addr;
351 npages = md->num_pages;
352 memrange_efi_to_native(&paddr, &npages);
353 size = npages << PAGE_SHIFT;
355 if (is_normal_ram(md))
356 vaddr = (__force u64)ioremap_cache(paddr, size);
358 vaddr = (__force u64)ioremap(paddr, size);
361 pr_err("Unable to remap 0x%llx pages @ %p\n",
362 npages, (void *)paddr);
366 /* adjust for any rounding when EFI and system pagesize differs */
367 md->virt_addr = vaddr + (md->phys_addr - paddr);
370 pr_info(" EFI remap 0x%012llx => %p\n",
371 md->phys_addr, (void *)md->virt_addr);
373 memcpy(*new, md, memmap.desc_size);
374 *new += memmap.desc_size;
380 * Switch UEFI from an identity map to a kernel virtual map
382 static int __init arm64_enter_virtual_mode(void)
384 efi_memory_desc_t *md;
385 phys_addr_t virtmap_phys;
386 void *virtmap, *virt_md;
392 if (!efi_enabled(EFI_BOOT)) {
393 pr_info("EFI services will not be available.\n");
397 pr_info("Remapping and enabling EFI services.\n");
399 /* replace early memmap mapping with permanent mapping */
400 mapsize = memmap.map_end - memmap.map;
401 early_memunmap(memmap.map, mapsize);
402 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
404 memmap.map_end = memmap.map + mapsize;
406 efi.memmap = &memmap;
408 /* Map the runtime regions */
409 virtmap = kmalloc(mapsize, GFP_KERNEL);
411 pr_err("Failed to allocate EFI virtual memmap\n");
414 virtmap_phys = virt_to_phys(virtmap);
417 for_each_efi_memory_desc(&memmap, md) {
418 if (!(md->attribute & EFI_MEMORY_RUNTIME))
420 if (!remap_region(md, &virt_md))
425 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
428 * If we have no virtual mapping for the System Table at this
429 * point, the memory map doesn't cover the physical offset where
430 * it resides. This means the System Table will be inaccessible
431 * to Runtime Services themselves once the virtual mapping is
434 pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
437 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
440 * DMI depends on EFI on arm64, and dmi_scan_machine() needs to be
441 * called early because dmi_id_init(), which is an arch_initcall itself,
442 * depends on dmi_scan_machine() having been called already.
446 local_irq_save(flags);
447 cpu_switch_mm(idmap_pg_dir, &init_mm);
449 /* Call SetVirtualAddressMap with the physical address of the map */
450 runtime = efi.systab->runtime;
451 efi.set_virtual_address_map = runtime->set_virtual_address_map;
453 status = efi.set_virtual_address_map(count * memmap.desc_size,
456 (efi_memory_desc_t *)virtmap_phys);
457 cpu_set_reserved_ttbr0();
459 local_irq_restore(flags);
463 free_boot_services();
465 if (status != EFI_SUCCESS) {
466 pr_err("Failed to set EFI virtual address map! [%lx]\n",
471 /* Set up runtime services function pointers */
472 runtime = efi.systab->runtime;
473 efi_native_runtime_setup();
474 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
476 efi.runtime_version = efi.systab->hdr.revision;
481 /* unmap all mappings that succeeded: there are 'count' of those */
482 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
484 iounmap((__force void __iomem *)md->virt_addr);
489 early_initcall(arm64_enter_virtual_mode);