x86/mm/ASLR: Propagate base load address calculation
authorJiri Kosina <jkosina@suse.cz>
Fri, 13 Feb 2015 15:04:55 +0000 (16:04 +0100)
committerBorislav Petkov <bp@suse.de>
Thu, 19 Feb 2015 10:38:54 +0000 (11:38 +0100)
Commit:

  e2b32e678513 ("x86, kaslr: randomize module base load address")

makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.

This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.

Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.

Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.

x86 module loader is converted to make use of this flag.

Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/boot/compressed/aslr.c
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/include/asm/page_types.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/module.c
arch/x86/kernel/setup.c

index bb13763..7083c16 100644 (file)
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
                LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
 
+struct kaslr_setup_data {
+       __u64 next;
+       __u32 type;
+       __u32 len;
+       __u8 data[1];
+} kaslr_setup_data;
+
 #define I8254_PORT_CONTROL     0x43
 #define I8254_PORT_COUNTER0    0x40
 #define I8254_CMD_READBACK     0xC0
@@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum,
        return slots_fetch_random();
 }
 
-unsigned char *choose_kernel_location(unsigned char *input,
+static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
+{
+       struct setup_data *data;
+
+       kaslr_setup_data.type = SETUP_KASLR;
+       kaslr_setup_data.len = 1;
+       kaslr_setup_data.next = 0;
+       kaslr_setup_data.data[0] = enabled;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       if (data)
+               data->next = (unsigned long)&kaslr_setup_data;
+       else
+               params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
+
+}
+
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
@@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
 #ifdef CONFIG_HIBERNATION
        if (!cmdline_find_option_bool("kaslr")) {
                debug_putstr("KASLR disabled by default...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #else
        if (cmdline_find_option_bool("nokaslr")) {
                debug_putstr("KASLR disabled by cmdline...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #endif
+       add_kaslr_setup_data(params, 1);
 
        /* Record the various known unsafe memory ranges. */
        mem_avoid_init((unsigned long)input, input_size,
index a950864..5903089 100644 (file)
@@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
         * the entire decompressed kernel plus relocation table, or the
         * entire decompressed kernel plus .bss and .brk sections.
         */
-       output = choose_kernel_location(input_data, input_len, output,
+       output = choose_kernel_location(real_mode, input_data, input_len,
+                                       output,
                                        output_len > run_size ? output_len
                                                              : run_size);
 
index 24e3e56..6d67307 100644 (file)
@@ -56,7 +56,8 @@ int cmdline_find_option_bool(const char *option);
 
 #if CONFIG_RANDOMIZE_BASE
 /* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size);
@@ -64,7 +65,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
 bool has_cpuflag(int flag);
 #else
 static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
index f97fbe3..3d43ce3 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/const.h>
 #include <linux/types.h>
+#include <asm/bootparam.h>
 
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT     12
@@ -51,6 +52,8 @@ extern int devmem_is_allowed(unsigned long pagenr);
 extern unsigned long max_low_pfn_mapped;
 extern unsigned long max_pfn_mapped;
 
+extern bool kaslr_enabled;
+
 static inline phys_addr_t get_max_mapped(void)
 {
        return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
index 225b098..44e6dd7 100644 (file)
@@ -7,6 +7,7 @@
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
 #define SETUP_EFI                      4
+#define SETUP_KASLR                    5
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index e69f988..c3c59a3 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/page_types.h>
 
 #if 0
 #define DEBUGP(fmt, ...)                               \
@@ -46,21 +47,13 @@ do {                                                        \
 
 #ifdef CONFIG_RANDOMIZE_BASE
 static unsigned long module_load_offset;
-static int randomize_modules = 1;
 
 /* Mutex protects the module_load_offset. */
 static DEFINE_MUTEX(module_kaslr_mutex);
 
-static int __init parse_nokaslr(char *p)
-{
-       randomize_modules = 0;
-       return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
 static unsigned long int get_module_load_offset(void)
 {
-       if (randomize_modules) {
+       if (kaslr_enabled) {
                mutex_lock(&module_kaslr_mutex);
                /*
                 * Calculate the module_load_offset the first time this
index ab4734e..16b6043 100644 (file)
 unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
 
+bool __read_mostly kaslr_enabled = false;
+
 #ifdef CONFIG_DMI
 RESERVE_BRK(dmi_alloc, 65536);
 #endif
@@ -424,6 +426,11 @@ static void __init reserve_initrd(void)
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
+{
+       kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
+}
+
 static void __init parse_setup_data(void)
 {
        struct setup_data *data;
@@ -451,6 +458,9 @@ static void __init parse_setup_data(void)
                case SETUP_EFI:
                        parse_efi_setup(pa_data, data_len);
                        break;
+               case SETUP_KASLR:
+                       parse_kaslr_setup(pa_data, data_len);
+                       break;
                default:
                        break;
                }
@@ -833,10 +843,14 @@ static void __init trim_low_memory_range(void)
 static int
 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 {
-       pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
-                "(relocation range: 0x%lx-0x%lx)\n",
-                (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
-                __START_KERNEL_map, MODULES_VADDR-1);
+       if (kaslr_enabled)
+               pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+                        (unsigned long)&_text - __START_KERNEL,
+                        __START_KERNEL,
+                        __START_KERNEL_map,
+                        MODULES_VADDR-1);
+       else
+               pr_emerg("Kernel Offset: disabled\n");
 
        return 0;
 }