1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
2 #define _ASM_POWERPC_BOOK3S_64_HASH_H
6 * Common bits between 4K and 64K pages in a linux-style PTE.
7 * Additional bits may be defined in pgtable-hash64-*.h
9 * Note: We only support user read/write permissions. Supervisor always
10 * have full read/write to pages above PAGE_OFFSET (pages below that
11 * always use the user access permissions).
13 * We could create separate kernel read-only if we used the 3 PP bits
14 * combinations that newer processors provide but we currently don't.
16 #define _PAGE_BIT_SWAP_TYPE 0
18 #define _PAGE_EXEC 0x00001 /* execute permission */
19 #define _PAGE_WRITE 0x00002 /* write access allowed */
20 #define _PAGE_READ 0x00004 /* read access allowed */
21 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
22 #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
23 #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
24 #define _PAGE_SAO 0x00010 /* Strong access order */
25 #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
26 #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
27 #define _PAGE_DIRTY 0x00080 /* C: page changed */
28 #define _PAGE_ACCESSED 0x00100 /* R: page referenced */
32 #ifdef CONFIG_MEM_SOFT_DIRTY
33 #define _PAGE_SOFT_DIRTY 0x00200 /* software: software dirty tracking */
35 #define _PAGE_SOFT_DIRTY 0x00000
37 #define _PAGE_SPECIAL 0x00400 /* software: special page */
38 #define H_PAGE_BUSY 0x00800 /* software: PTE & hash are busy */
41 #define H_PAGE_F_GIX_SHIFT 57
42 #define H_PAGE_F_GIX (7ul << 57) /* HPTE index within HPTEG */
43 #define H_PAGE_F_SECOND (1ul << 60) /* HPTE is in 2ndary HPTEG */
44 #define H_PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
45 #define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
46 #define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
48 * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
49 * Instead of fixing all of them, add an alternate define which
50 * maps CI pte mapping.
52 #define _PAGE_NO_CACHE _PAGE_TOLERANT
54 * We support 57 bit real address in pte. Clear everything above 57, and
55 * every thing below PAGE_SHIFT;
57 #define PTE_RPN_MASK (((1UL << 57) - 1) & (PAGE_MASK))
59 * set of bits not changed in pmd_modify.
61 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
62 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
66 #ifdef CONFIG_PPC_64K_PAGES
67 #include <asm/book3s/64/hash-64k.h>
69 #include <asm/book3s/64/hash-4k.h>
73 * Size of EA range mapped by our pagetables.
75 #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
76 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
77 #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
79 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
80 #define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
82 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
85 * Define the address range of the kernel non-linear virtual area
87 #define KERN_VIRT_START ASM_CONST(0xD000000000000000)
88 #define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
91 * The vmalloc space starts at the beginning of that region, and
92 * occupies half of it on hash CPUs and a quarter of it on Book3E
93 * (we keep a quarter for the virtual memmap)
95 #define VMALLOC_START KERN_VIRT_START
96 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
97 #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
102 #define REGION_SHIFT 60UL
103 #define REGION_MASK (0xfUL << REGION_SHIFT)
104 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
106 #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
107 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
108 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */
109 #define USER_REGION_ID (0UL)
112 * Defines the address of the vmemap area, in its own region on
115 #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
117 #ifdef CONFIG_PPC_MM_SLICES
118 #define HAVE_ARCH_UNMAPPED_AREA
119 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
120 #endif /* CONFIG_PPC_MM_SLICES */
123 * user access blocked by key
125 #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
126 #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
127 #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
128 _PAGE_RW | _PAGE_EXEC)
130 /* No page size encoding in the linux PTE */
131 #define _PAGE_PSIZE 0
134 #define _PTEIDX_SECONDARY 0x8
135 #define _PTEIDX_GROUP_IX 0x7
137 #define _PTE_NONE_MASK _PAGE_HPTEFLAGS
139 * _PAGE_CHG_MASK masks of bits that are to be preserved across
142 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
143 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
146 * Mask of bits returned by pte_pgprot()
148 #define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
149 H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
150 _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
153 * We define 2 sets of base prot bits, one for basic pages (ie,
154 * cacheable kernel and user pages) and one for non cacheable
155 * pages. We always set _PAGE_COHERENT when SMP is enabled or
156 * the processor might need it for DMA coherency.
158 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
159 #define _PAGE_BASE (_PAGE_BASE_NC)
161 /* Permission masks used to generate the __P and __S table,
163 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
165 * Write permissions imply read permissions for now (we could make write-only
166 * pages on BookE but we don't bother for now). Execute permission control is
167 * possible on platforms that define _PAGE_EXEC
169 * Note due to the way vm flags are laid out, the bits are XWR
171 #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
172 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
173 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
174 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
175 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
176 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
177 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
179 #define __P000 PAGE_NONE
180 #define __P001 PAGE_READONLY
181 #define __P010 PAGE_COPY
182 #define __P011 PAGE_COPY
183 #define __P100 PAGE_READONLY_X
184 #define __P101 PAGE_READONLY_X
185 #define __P110 PAGE_COPY_X
186 #define __P111 PAGE_COPY_X
188 #define __S000 PAGE_NONE
189 #define __S001 PAGE_READONLY
190 #define __S010 PAGE_SHARED
191 #define __S011 PAGE_SHARED
192 #define __S100 PAGE_READONLY_X
193 #define __S101 PAGE_READONLY_X
194 #define __S110 PAGE_SHARED_X
195 #define __S111 PAGE_SHARED_X
197 /* Permission masks used for kernel mappings */
198 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
199 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
201 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
202 _PAGE_NON_IDEMPOTENT)
203 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
204 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
205 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
207 /* Protection used for kernel text. We want the debuggers to be able to
208 * set breakpoints anywhere, so don't write protect the kernel text
209 * on platforms where such control is possible.
211 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
212 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
213 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
215 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
218 /* Make modules code happy. We don't set RO yet */
219 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
220 #define PAGE_AGP (PAGE_KERNEL_NC)
222 #define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
223 #define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
226 #define pmd_bad(pmd) (pmd_val(pmd) & PMD_BAD_BITS)
227 #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
229 #define pud_bad(pud) (pud_val(pud) & PUD_BAD_BITS)
230 #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
232 /* Pointers in the page table tree are physical addresses */
233 #define __pgtable_ptr_val(ptr) __pa(ptr)
235 #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
236 #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
237 #define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
238 #define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
240 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, unsigned long pte, int huge);
242 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
243 /* Atomic PTE updates */
244 static inline unsigned long pte_update(struct mm_struct *mm,
246 pte_t *ptep, unsigned long clr,
250 __be64 old_be, tmp_be;
253 __asm__ __volatile__(
254 "1: ldarx %0,0,%3 # pte_update\n\
261 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
262 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
263 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
265 /* huge pages use the old page table lock */
267 assert_pte_locked(mm, addr);
269 old = be64_to_cpu(old_be);
270 if (old & H_PAGE_HASHPTE)
271 hpte_need_flush(mm, addr, ptep, old, huge);
277 * We currently remove entries from the hashtable regardless of whether
278 * the entry was young or dirty.
280 * We should be more intelligent about this but for the moment we override
281 * these functions and force a tlb flush unconditionally
283 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
284 unsigned long addr, pte_t *ptep)
288 if ((pte_val(*ptep) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
290 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
291 return (old & _PAGE_ACCESSED) != 0;
293 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
294 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
297 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
301 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
302 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
306 if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
309 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
312 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
313 unsigned long addr, pte_t *ptep)
315 if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
318 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
321 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
322 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
323 unsigned long addr, pte_t *ptep)
325 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
329 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
332 pte_update(mm, addr, ptep, ~0UL, 0, 0);
336 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
337 * function doesn't need to flush the hash entry
339 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
341 __be64 old, tmp, val, mask;
343 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
344 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
346 val = pte_raw(entry) & mask;
348 __asm__ __volatile__(
355 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
356 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
360 static inline int pgd_bad(pgd_t pgd)
362 return (pgd_val(pgd) == 0);
365 #define __HAVE_ARCH_PTE_SAME
366 static inline int pte_same(pte_t pte_a, pte_t pte_b)
368 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
371 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
373 return (unsigned long)__va(pgd_val(pgd) & ~PGD_MASKED_BITS);
377 /* Generic accessors to PTE bits */
378 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);}
379 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
380 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
381 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
382 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
383 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
385 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
386 static inline bool pte_soft_dirty(pte_t pte)
388 return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
390 static inline pte_t pte_mksoft_dirty(pte_t pte)
392 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
395 static inline pte_t pte_clear_soft_dirty(pte_t pte)
397 return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
399 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
401 #ifdef CONFIG_NUMA_BALANCING
403 * These work without NUMA balancing but the kernel does not care. See the
404 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
405 * work for user pages and always return true for kernel pages.
407 static inline int pte_protnone(pte_t pte)
409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
410 (_PAGE_PRESENT | _PAGE_PRIVILEGED);
412 #endif /* CONFIG_NUMA_BALANCING */
414 static inline int pte_present(pte_t pte)
416 return !!(pte_val(pte) & _PAGE_PRESENT);
419 /* Conversion functions: convert a page and protection to a page entry,
420 * and a page entry and page directory to the page they refer to.
422 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
425 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
427 return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
431 static inline unsigned long pte_pfn(pte_t pte)
433 return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
436 /* Generic modifiers for PTE bits */
437 static inline pte_t pte_wrprotect(pte_t pte)
439 return __pte(pte_val(pte) & ~_PAGE_WRITE);
442 static inline pte_t pte_mkclean(pte_t pte)
444 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
447 static inline pte_t pte_mkold(pte_t pte)
449 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
452 static inline pte_t pte_mkwrite(pte_t pte)
455 * write implies read, hence set both
457 return __pte(pte_val(pte) | _PAGE_RW);
460 static inline pte_t pte_mkdirty(pte_t pte)
462 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
465 static inline pte_t pte_mkyoung(pte_t pte)
467 return __pte(pte_val(pte) | _PAGE_ACCESSED);
470 static inline pte_t pte_mkspecial(pte_t pte)
472 return __pte(pte_val(pte) | _PAGE_SPECIAL);
475 static inline pte_t pte_mkhuge(pte_t pte)
480 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
482 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
485 /* This low level function performs the actual PTE insertion
486 * Setting the PTE depends on the MMU type and other factors. It's
487 * an horrible mess that I'm not going to try to clean up now but
488 * I'm keeping it in one place rather than spread around
490 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
491 pte_t *ptep, pte_t pte, int percpu)
494 * Anything else just stores the PTE normally. That covers all 64-bit
495 * cases, and 32-bit non-hash with 32-bit PTEs.
500 #define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
502 #define pgprot_noncached pgprot_noncached
503 static inline pgprot_t pgprot_noncached(pgprot_t prot)
505 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
506 _PAGE_NON_IDEMPOTENT);
509 #define pgprot_noncached_wc pgprot_noncached_wc
510 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
512 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
516 #define pgprot_cached pgprot_cached
517 static inline pgprot_t pgprot_cached(pgprot_t prot)
519 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
522 #define pgprot_writecombine pgprot_writecombine
523 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
525 return pgprot_noncached_wc(prot);
528 * check a pte mapping have cache inhibited property
530 static inline bool pte_ci(pte_t pte)
532 unsigned long pte_v = pte_val(pte);
534 if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
535 ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
541 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
542 pmd_t *pmdp, unsigned long old_pmd);
544 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
545 unsigned long addr, pmd_t *pmdp,
546 unsigned long old_pmd)
548 WARN(1, "%s called with THP disabled\n", __func__);
550 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
552 #endif /* !__ASSEMBLY__ */
553 #endif /* __KERNEL__ */
554 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */