powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Wed, 13 Jul 2016 09:36:43 +0000 (15:06 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 1 Aug 2016 01:15:13 +0000 (11:15 +1000)
Some archs like ppc64 need to do special things when flushing tlb for
hugepage. Add a new helper to flush hugetlb tlb range. This helps us to
avoid flushing the entire tlb mapping for the pid.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/mm/hugetlbpage-radix.c
mm/hugetlb.c

index 10eb0d1..6503776 100644 (file)
@@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize)
        return mmu_psize_defs[psize].ap;
 }
 
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+                                          unsigned long start, unsigned long end);
 extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
                                         unsigned long end, int psize);
 extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
index 0790c4e..146b269 100644 (file)
@@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
        return hash__flush_tlb_range(vma, start, end);
 }
 
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+                                          unsigned long start,
+                                          unsigned long end)
+{
+       if (radix_enabled())
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+       return hash__flush_tlb_range(vma, start, end);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
 {
index 1eca0de..35254a6 100644 (file)
@@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v
        radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
 }
 
+void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                                  unsigned long end)
+{
+       int psize;
+       struct hstate *hstate = hstate_file(vma->vm_file);
+
+       psize = hstate_get_psize(hstate);
+       radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
+}
+
 /*
  * A vairant of hugetlb_get_unmapped_area doing topdown search
  * FIXME!! should we do as x86 does or non hugetlb area does ?
index f904246..af2d882 100644 (file)
@@ -3938,6 +3938,14 @@ same_page:
        return i ? i : -EFAULT;
 }
 
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end)        flush_tlb_range(vma, addr, end)
+#endif
+
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
 {
@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * once we release i_mmap_rwsem, another task can do the final put_page
         * and that page table be reused and filled with junk.
         */
-       flush_tlb_range(vma, start, end);
+       flush_hugetlb_tlb_range(vma, start, end);
        mmu_notifier_invalidate_range(mm, start, end);
        i_mmap_unlock_write(vma->vm_file->f_mapping);
        mmu_notifier_invalidate_range_end(mm, start, end);