Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Mar 2016 02:13:59 +0000 (19:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Mar 2016 02:13:59 +0000 (19:13 -0700)
Pull second set of arm64 updates from Catalin Marinas:

 - KASLR bug fixes: use callee-saved register, boot-time I-cache
   maintenance

 - inv_entry asm macro fix (EL0 check typo)

 - pr_notice("Virtual kernel memory layout...") splitting

 - Clean-ups: use p?d_set_huge consistently, allow preemption around
   copy_to_user_page, remove unused __local_flush_icache_all()

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mm: allow preemption in copy_to_user_page
  arm64: consistently use p?d_set_huge
  arm64: kaslr: use callee saved register to preserve SCTLR across C call
  arm64: Split pr_notice("Virtual kernel memory layout...") into multiple pr_cont()
  arm64: drop unused __local_flush_icache_all()
  arm64: fix KASLR boot-time I-cache maintenance
  arm64/kernel: fix incorrect EL0 check in inv_entry macro

1  2 
arch/arm64/include/asm/cacheflush.h
arch/arm64/kernel/head.S
arch/arm64/mm/init.c

@@@ -116,13 -116,6 +116,6 @@@ extern void copy_to_user_page(struct vm
  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  extern void flush_dcache_page(struct page *);
  
- static inline void __local_flush_icache_all(void)
- {
-       asm("ic iallu");
-       dsb(nsh);
-       isb();
- }
  static inline void __flush_icache_all(void)
  {
        asm("ic ialluis");
@@@ -156,4 -149,8 +149,4 @@@ int set_memory_rw(unsigned long addr, i
  int set_memory_x(unsigned long addr, int numpages);
  int set_memory_nx(unsigned long addr, int numpages);
  
 -#ifdef CONFIG_DEBUG_RODATA
 -void mark_rodata_ro(void);
 -#endif
 -
  #endif
diff --combined arch/arm64/kernel/head.S
@@@ -31,7 -31,6 +31,7 @@@
  #include <asm/cputype.h>
  #include <asm/elf.h>
  #include <asm/kernel-pgtable.h>
 +#include <asm/kvm_arm.h>
  #include <asm/memory.h>
  #include <asm/pgtable-hwdef.h>
  #include <asm/pgtable.h>
@@@ -535,27 -534,9 +535,27 @@@ CPU_LE(  bic     x0, x0, #(3 << 24)      )       // Cle
        isb
        ret
  
 +2:
 +#ifdef CONFIG_ARM64_VHE
 +      /*
 +       * Check for VHE being present. For the rest of the EL2 setup,
 +       * x2 being non-zero indicates that we do have VHE, and that the
 +       * kernel is intended to run at EL2.
 +       */
 +      mrs     x2, id_aa64mmfr1_el1
 +      ubfx    x2, x2, #8, #4
 +#else
 +      mov     x2, xzr
 +#endif
 +
        /* Hyp configuration. */
 -2:    mov     x0, #(1 << 31)                  // 64-bit EL1
 +      mov     x0, #HCR_RW                     // 64-bit EL1
 +      cbz     x2, set_hcr
 +      orr     x0, x0, #HCR_TGE                // Enable Host Extensions
 +      orr     x0, x0, #HCR_E2H
 +set_hcr:
        msr     hcr_el2, x0
 +      isb
  
        /* Generic timers. */
        mrs     x0, cnthctl_el2
@@@ -615,13 -596,6 +615,13 @@@ CPU_LE(  movk    x0, #0x30d0, lsl #16    )       // 
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
  
 +      cbz     x2, install_el2_stub
 +
 +      mov     w20, #BOOT_CPU_MODE_EL2         // This CPU booted in EL2
 +      isb
 +      ret
 +
 +install_el2_stub:
        /* Hypervisor stub */
        adrp    x0, __hyp_stub_vectors
        add     x0, x0, #:lo12:__hyp_stub_vectors
@@@ -758,7 -732,7 +758,7 @@@ ENTRY(__early_cpu_boot_status
   */
        .section        ".idmap.text", "ax"
  __enable_mmu:
-       mrs     x18, sctlr_el1                  // preserve old SCTLR_EL1 value
+       mrs     x22, sctlr_el1                  // preserve old SCTLR_EL1 value
        mrs     x1, ID_AA64MMFR0_EL1
        ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
        cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
         * to take into account by discarding the current kernel mapping and
         * creating a new one.
         */
-       msr     sctlr_el1, x18                  // disable the MMU
+       msr     sctlr_el1, x22                  // disable the MMU
        isb
        bl      __create_page_tables            // recreate kernel mapping
  
        msr     sctlr_el1, x19                  // re-enable the MMU
        isb
-       ic      ialluis                         // flush instructions fetched
-       isb                                     // via old mapping
+       ic      iallu                           // flush instructions fetched
+       dsb     nsh                             // via old mapping
+       isb
        add     x27, x27, x23                   // relocated __mmap_switched
  #endif
        br      x27
diff --combined arch/arm64/mm/init.c
@@@ -362,42 -362,38 +362,38 @@@ void __init mem_init(void
  #define MLG(b, t) b, t, ((t) - (b)) >> 30
  #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
  
-       pr_notice("Virtual kernel memory layout:\n"
+       pr_notice("Virtual kernel memory layout:\n");
  #ifdef CONFIG_KASAN
-                 "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+       pr_cont("    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n",
+               MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
  #endif
-                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
-                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+       pr_cont("    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+               MLM(MODULES_VADDR, MODULES_END));
+       pr_cont("    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n",
+               MLG(VMALLOC_START, VMALLOC_END));
+       pr_cont("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+               "    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+               "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+               "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+               MLK_ROUNDUP(_text, __start_rodata),
+               MLK_ROUNDUP(__start_rodata, _etext),
+               MLK_ROUNDUP(__init_begin, __init_end),
+               MLK_ROUNDUP(_sdata, _edata));
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
-                 "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
-                 "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
+       pr_cont("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
+               "              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
 -              MLG((unsigned long)vmemmap,
 -                  (unsigned long)vmemmap + VMEMMAP_SIZE),
++              MLG(VMEMMAP_START,
++                  VMEMMAP_START + VMEMMAP_SIZE),
+               MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
+                   (unsigned long)virt_to_page(high_memory)));
  #endif
-                 "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
-                 "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
- #ifdef CONFIG_KASAN
-                 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
- #endif
-                 MLM(MODULES_VADDR, MODULES_END),
-                 MLG(VMALLOC_START, VMALLOC_END),
-                 MLK_ROUNDUP(_text, __start_rodata),
-                 MLK_ROUNDUP(__start_rodata, _etext),
-                 MLK_ROUNDUP(__init_begin, __init_end),
-                 MLK_ROUNDUP(_sdata, _edata),
- #ifdef CONFIG_SPARSEMEM_VMEMMAP
-                 MLG(VMEMMAP_START,
-                     VMEMMAP_START + VMEMMAP_SIZE),
-                 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
-                     (unsigned long)virt_to_page(high_memory)),
- #endif
-                 MLK(FIXADDR_START, FIXADDR_TOP),
-                 MLM(PCI_IO_START, PCI_IO_END),
-                 MLM(__phys_to_virt(memblock_start_of_DRAM()),
-                     (unsigned long)high_memory));
+       pr_cont("    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n",
+               MLK(FIXADDR_START, FIXADDR_TOP));
+       pr_cont("    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+               MLM(PCI_IO_START, PCI_IO_END));
+       pr_cont("    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
+               MLM(__phys_to_virt(memblock_start_of_DRAM()),
+                   (unsigned long)high_memory));
  
  #undef MLK
  #undef MLM