mm, dax: convert vmf_insert_pfn_pmd() to pfn_t
[cascardo/linux.git] / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5                                       struct vm_area_struct *vma,
6                                       unsigned long address, pmd_t *pmd,
7                                       unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9                          pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10                          struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12                                   struct vm_area_struct *vma,
13                                   unsigned long address, pmd_t *pmd,
14                                   pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16                                unsigned long address, pmd_t *pmd,
17                                pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19                                           unsigned long addr,
20                                           pmd_t *pmd,
21                                           unsigned int flags);
22 extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23                         struct vm_area_struct *vma,
24                         pmd_t *pmd, unsigned long addr, unsigned long next);
25 extern int zap_huge_pmd(struct mmu_gather *tlb,
26                         struct vm_area_struct *vma,
27                         pmd_t *pmd, unsigned long addr);
28 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29                         unsigned long addr, unsigned long end,
30                         unsigned char *vec);
31 extern bool move_huge_pmd(struct vm_area_struct *vma,
32                          struct vm_area_struct *new_vma,
33                          unsigned long old_addr,
34                          unsigned long new_addr, unsigned long old_end,
35                          pmd_t *old_pmd, pmd_t *new_pmd);
36 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
37                         unsigned long addr, pgprot_t newprot,
38                         int prot_numa);
39 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
40                         pfn_t pfn, bool write);
41
42 enum transparent_hugepage_flag {
43         TRANSPARENT_HUGEPAGE_FLAG,
44         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
45         TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
46         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
47         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
48         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
49 #ifdef CONFIG_DEBUG_VM
50         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
51 #endif
52 };
53
54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
56
57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58 #define HPAGE_PMD_SHIFT PMD_SHIFT
59 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
60 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
61
62 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
63
64 #define transparent_hugepage_enabled(__vma)                             \
65         ((transparent_hugepage_flags &                                  \
66           (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
67           (transparent_hugepage_flags &                                 \
68            (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
69            ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
70          !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
71          !is_vma_temporary_stack(__vma))
72 #define transparent_hugepage_defrag(__vma)                              \
73         ((transparent_hugepage_flags &                                  \
74           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
75          (transparent_hugepage_flags &                                  \
76           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&             \
77           (__vma)->vm_flags & VM_HUGEPAGE))
78 #define transparent_hugepage_use_zero_page()                            \
79         (transparent_hugepage_flags &                                   \
80          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
81 #ifdef CONFIG_DEBUG_VM
82 #define transparent_hugepage_debug_cow()                                \
83         (transparent_hugepage_flags &                                   \
84          (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
85 #else /* CONFIG_DEBUG_VM */
86 #define transparent_hugepage_debug_cow() 0
87 #endif /* CONFIG_DEBUG_VM */
88
89 extern unsigned long transparent_hugepage_flags;
90
91 extern void prep_transhuge_page(struct page *page);
92 extern void free_transhuge_page(struct page *page);
93
94 int split_huge_page_to_list(struct page *page, struct list_head *list);
95 static inline int split_huge_page(struct page *page)
96 {
97         return split_huge_page_to_list(page, NULL);
98 }
99 void deferred_split_huge_page(struct page *page);
100
101 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
102                 unsigned long address);
103
104 #define split_huge_pmd(__vma, __pmd, __address)                         \
105         do {                                                            \
106                 pmd_t *____pmd = (__pmd);                               \
107                 if (pmd_trans_huge(*____pmd))                           \
108                         __split_huge_pmd(__vma, __pmd, __address);      \
109         }  while (0)
110
111 #if HPAGE_PMD_ORDER >= MAX_ORDER
112 #error "hugepages can't be allocated by the buddy allocator"
113 #endif
114 extern int hugepage_madvise(struct vm_area_struct *vma,
115                             unsigned long *vm_flags, int advice);
116 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
117                                     unsigned long start,
118                                     unsigned long end,
119                                     long adjust_next);
120 extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
121                 spinlock_t **ptl);
122 /* mmap_sem must be held on entry */
123 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
124                 spinlock_t **ptl)
125 {
126         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
127         if (pmd_trans_huge(*pmd))
128                 return __pmd_trans_huge_lock(pmd, vma, ptl);
129         else
130                 return false;
131 }
132 static inline int hpage_nr_pages(struct page *page)
133 {
134         if (unlikely(PageTransHuge(page)))
135                 return HPAGE_PMD_NR;
136         return 1;
137 }
138
139 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
140                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
141
142 extern struct page *huge_zero_page;
143
144 static inline bool is_huge_zero_page(struct page *page)
145 {
146         return ACCESS_ONCE(huge_zero_page) == page;
147 }
148
149 static inline bool is_huge_zero_pmd(pmd_t pmd)
150 {
151         return is_huge_zero_page(pmd_page(pmd));
152 }
153
154 struct page *get_huge_zero_page(void);
155
156 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
157 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
158 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
159 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
160
161 #define hpage_nr_pages(x) 1
162
163 #define transparent_hugepage_enabled(__vma) 0
164
165 #define transparent_hugepage_flags 0UL
166 static inline int
167 split_huge_page_to_list(struct page *page, struct list_head *list)
168 {
169         return 0;
170 }
171 static inline int split_huge_page(struct page *page)
172 {
173         return 0;
174 }
175 static inline void deferred_split_huge_page(struct page *page) {}
176 #define split_huge_pmd(__vma, __pmd, __address) \
177         do { } while (0)
178 static inline int hugepage_madvise(struct vm_area_struct *vma,
179                                    unsigned long *vm_flags, int advice)
180 {
181         BUG();
182         return 0;
183 }
184 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
185                                          unsigned long start,
186                                          unsigned long end,
187                                          long adjust_next)
188 {
189 }
190 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
191                 spinlock_t **ptl)
192 {
193         return false;
194 }
195
196 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
197                                         unsigned long addr, pmd_t pmd, pmd_t *pmdp)
198 {
199         return 0;
200 }
201
202 static inline bool is_huge_zero_page(struct page *page)
203 {
204         return false;
205 }
206
207 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
208
209 #endif /* _LINUX_HUGE_MM_H */