1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4 #define MMU_NO_CONTEXT ~0UL
7 #include <asm/book3s/64/tlbflush-hash.h>
8 #include <asm/book3s/64/tlbflush-radix.h>
10 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
11 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end)
15 return radix__flush_pmd_tlb_range(vma, start, end);
16 return hash__flush_tlb_range(vma, start, end);
19 static inline void flush_tlb_range(struct vm_area_struct *vma,
20 unsigned long start, unsigned long end)
23 return radix__flush_tlb_range(vma, start, end);
24 return hash__flush_tlb_range(vma, start, end);
27 static inline void flush_tlb_kernel_range(unsigned long start,
31 return radix__flush_tlb_kernel_range(start, end);
32 return hash__flush_tlb_kernel_range(start, end);
35 static inline void local_flush_tlb_mm(struct mm_struct *mm)
38 return radix__local_flush_tlb_mm(mm);
39 return hash__local_flush_tlb_mm(mm);
42 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
46 return radix__local_flush_tlb_page(vma, vmaddr);
47 return hash__local_flush_tlb_page(vma, vmaddr);
50 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
54 return radix__flush_tlb_page(vma, vmaddr);
55 return hash__flush_tlb_page_nohash(vma, vmaddr);
58 static inline void tlb_flush(struct mmu_gather *tlb)
61 return radix__tlb_flush(tlb);
62 return hash__tlb_flush(tlb);
66 static inline void flush_tlb_mm(struct mm_struct *mm)
69 return radix__flush_tlb_mm(mm);
70 return hash__flush_tlb_mm(mm);
73 static inline void flush_tlb_page(struct vm_area_struct *vma,
77 return radix__flush_tlb_page(vma, vmaddr);
78 return hash__flush_tlb_page(vma, vmaddr);
81 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
82 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
83 #endif /* CONFIG_SMP */
85 * flush the page walk cache for the address
87 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
90 * Flush the page table walk cache on freeing a page table. We already
91 * have marked the upper/higher level page table entry none by now.
92 * So it is safe to flush PWC here.
97 radix__flush_tlb_pwc(tlb, address);
99 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */