ARM/efi: Apply strict permissions for UEFI Runtime Services regions
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 25 Apr 2016 20:06:42 +0000 (21:06 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 28 Apr 2016 09:33:53 +0000 (11:33 +0200)
Recent UEFI versions expose permission attributes for runtime services
memory regions, either in the UEFI memory map or in the separate memory
attributes table.  This allows the kernel to map these regions with
stricter permissions, rather than the RWX permissions that are used by
default. So wire this up in our mapping routine.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Leif Lindholm <leif.lindholm@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Jones <pjones@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-efi@vger.kernel.org
Link: http://lkml.kernel.org/r/1461614832-17633-11-git-send-email-matt@codeblueprint.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/arm/include/asm/efi.h
arch/arm/kernel/efi.c

index e0eea72..b0c341d 100644 (file)
@@ -22,6 +22,7 @@
 void efi_init(void);
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
 #define efi_call_virt(f, ...)                                          \
 ({                                                                     \
index ff8a9d8..9f43ba0 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
 
+static int __init set_permissions(pte_t *ptep, pgtable_t token,
+                                 unsigned long addr, void *data)
+{
+       efi_memory_desc_t *md = data;
+       pte_t pte = *ptep;
+
+       if (md->attribute & EFI_MEMORY_RO)
+               pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+       if (md->attribute & EFI_MEMORY_XP)
+               pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
+       set_pte_ext(ptep, pte, PTE_EXT_NG);
+       return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+                                      efi_memory_desc_t *md)
+{
+       unsigned long base, size;
+
+       base = md->virt_addr;
+       size = md->num_pages << EFI_PAGE_SHIFT;
+
+       /*
+        * We can only use apply_to_page_range() if we can guarantee that the
+        * entire region was mapped using pages. This should be the case if the
+        * region does not cover any naturally aligned SECTION_SIZE sized
+        * blocks.
+        */
+       if (round_down(base + size, SECTION_SIZE) <
+           round_up(base, SECTION_SIZE) + SECTION_SIZE)
+               return apply_to_page_range(mm, base, size, set_permissions, md);
+
+       return 0;
+}
+
 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
        struct map_desc desc = {
@@ -34,5 +69,11 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
                desc.type = MT_DEVICE;
 
        create_mapping_late(mm, &desc, true);
+
+       /*
+        * If stricter permissions were specified, apply them now.
+        */
+       if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
+               return efi_set_mapping_permissions(mm, md);
        return 0;
 }