arm64: flush: use local TLB and I-cache invalidation
authorWill Deacon <will.deacon@arm.com>
Tue, 6 Oct 2015 17:46:23 +0000 (18:46 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 7 Oct 2015 10:45:27 +0000 (11:45 +0100)
There are a number of places where a single CPU is running with a
private page-table and we need to perform maintenance on the TLB and
I-cache in order to ensure correctness, but do not require the operation
to be broadcast to other CPUs.

This patch adds local variants of tlb_flush_all and __flush_icache_all
to support these use-cases and updates the callers respectively.
__local_flush_icache_all also implies an isb, since it is intended to be
used synchronously.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: David Daney <david.daney@cavium.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/efi.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/mm/context.c
arch/arm64/mm/mmu.c

index c75b8d0..54efeda 100644 (file)
@@ -115,6 +115,13 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
+static inline void __local_flush_icache_all(void)
+{
+       asm("ic iallu");
+       dsb(nsh);
+       isb();
+}
+
 static inline void __flush_icache_all(void)
 {
        asm("ic ialluis");
index 7bd2da0..96f944e 100644 (file)
  *             only require the D-TLB to be invalidated.
  *             - kaddr - Kernel virtual memory address
  */
+static inline void local_flush_tlb_all(void)
+{
+       dsb(nshst);
+       asm("tlbi       vmalle1");
+       dsb(nsh);
+       isb();
+}
+
 static inline void flush_tlb_all(void)
 {
        dsb(ishst);
index 13671a9..4d12926 100644 (file)
@@ -344,9 +344,9 @@ static void efi_set_pgd(struct mm_struct *mm)
        else
                cpu_switch_mm(mm->pgd, mm);
 
-       flush_tlb_all();
+       local_flush_tlb_all();
        if (icache_is_aivivt())
-               __flush_icache_all();
+               __local_flush_icache_all();
 }
 
 void efi_virtmap_load(void)
index dbdaacd..fdd4d4d 100644 (file)
@@ -152,7 +152,7 @@ asmlinkage void secondary_start_kernel(void)
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 
        preempt_disable();
index 8297d50..3c5e4e6 100644 (file)
@@ -90,7 +90,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                else
                        cpu_switch_mm(mm->pgd, mm);
 
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /*
                 * Restore per-cpu offset before any kernel
index d70ff14..48b53fb 100644 (file)
@@ -48,9 +48,9 @@ static void flush_context(void)
 {
        /* set the reserved TTBR0 before flushing the TLB */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        if (icache_is_aivivt())
-               __flush_icache_all();
+               __local_flush_icache_all();
 }
 
 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
index 9211b85..71a3104 100644 (file)
@@ -456,7 +456,7 @@ void __init paging_init(void)
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 }