powerpc/mm/radix: Add tlb flush of THP ptes
[cascardo/linux.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush.h
1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3
4 #define MMU_NO_CONTEXT  ~0UL
5
6
7 #include <asm/book3s/64/tlbflush-hash.h>
8 #include <asm/book3s/64/tlbflush-radix.h>
9
10 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
11 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
12                                        unsigned long start, unsigned long end)
13 {
14         if (radix_enabled())
15                 return radix__flush_pmd_tlb_range(vma, start, end);
16         return hash__flush_tlb_range(vma, start, end);
17 }
18
19 static inline void flush_tlb_range(struct vm_area_struct *vma,
20                                    unsigned long start, unsigned long end)
21 {
22         if (radix_enabled())
23                 return radix__flush_tlb_range(vma, start, end);
24         return hash__flush_tlb_range(vma, start, end);
25 }
26
27 static inline void flush_tlb_kernel_range(unsigned long start,
28                                           unsigned long end)
29 {
30         if (radix_enabled())
31                 return radix__flush_tlb_kernel_range(start, end);
32         return hash__flush_tlb_kernel_range(start, end);
33 }
34
35 static inline void local_flush_tlb_mm(struct mm_struct *mm)
36 {
37         if (radix_enabled())
38                 return radix__local_flush_tlb_mm(mm);
39         return hash__local_flush_tlb_mm(mm);
40 }
41
42 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
43                                         unsigned long vmaddr)
44 {
45         if (radix_enabled())
46                 return radix__local_flush_tlb_page(vma, vmaddr);
47         return hash__local_flush_tlb_page(vma, vmaddr);
48 }
49
50 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
51                                          unsigned long vmaddr)
52 {
53         if (radix_enabled())
54                 return radix__flush_tlb_page(vma, vmaddr);
55         return hash__flush_tlb_page_nohash(vma, vmaddr);
56 }
57
58 static inline void tlb_flush(struct mmu_gather *tlb)
59 {
60         if (radix_enabled())
61                 return radix__tlb_flush(tlb);
62         return hash__tlb_flush(tlb);
63 }
64
65 #ifdef CONFIG_SMP
66 static inline void flush_tlb_mm(struct mm_struct *mm)
67 {
68         if (radix_enabled())
69                 return radix__flush_tlb_mm(mm);
70         return hash__flush_tlb_mm(mm);
71 }
72
73 static inline void flush_tlb_page(struct vm_area_struct *vma,
74                                   unsigned long vmaddr)
75 {
76         if (radix_enabled())
77                 return radix__flush_tlb_page(vma, vmaddr);
78         return hash__flush_tlb_page(vma, vmaddr);
79 }
80 #else
81 #define flush_tlb_mm(mm)                local_flush_tlb_mm(mm)
82 #define flush_tlb_page(vma, addr)       local_flush_tlb_page(vma, addr)
83 #endif /* CONFIG_SMP */
84 /*
85  * flush the page walk cache for the address
86  */
87 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
88 {
89         /*
90          * Flush the page table walk cache on freeing a page table. We already
91          * have marked the upper/higher level page table entry none by now.
92          * So it is safe to flush PWC here.
93          */
94         if (!radix_enabled())
95                 return;
96
97         radix__flush_tlb_pwc(tlb, address);
98 }
99 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */