page-flags: introduce page flags policies wrt compound pages
[cascardo/linux.git] / mm / memory.c
index c387430..d4e4d37 100644 (file)
@@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                } else if (is_migration_entry(entry)) {
                        page = migration_entry_to_page(entry);
 
-                       if (PageAnon(page))
-                               rss[MM_ANONPAGES]++;
-                       else
-                               rss[MM_FILEPAGES]++;
+                       rss[mm_counter(page)]++;
 
                        if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
@@ -874,10 +871,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (page) {
                get_page(page);
                page_dup_rmap(page);
-               if (PageAnon(page))
-                       rss[MM_ANONPAGES]++;
-               else
-                       rss[MM_FILEPAGES]++;
+               rss[mm_counter(page)]++;
        }
 
 out_set_pte:
@@ -1113,9 +1107,8 @@ again:
                        tlb_remove_tlb_entry(tlb, pte, addr);
                        if (unlikely(!page))
                                continue;
-                       if (PageAnon(page))
-                               rss[MM_ANONPAGES]--;
-                       else {
+
+                       if (!PageAnon(page)) {
                                if (pte_dirty(ptent)) {
                                        force_flush = 1;
                                        set_page_dirty(page);
@@ -1123,8 +1116,8 @@ again:
                                if (pte_young(ptent) &&
                                    likely(!(vma->vm_flags & VM_SEQ_READ)))
                                        mark_page_accessed(page);
-                               rss[MM_FILEPAGES]--;
                        }
+                       rss[mm_counter(page)]--;
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
@@ -1146,11 +1139,7 @@ again:
                        struct page *page;
 
                        page = migration_entry_to_page(entry);
-
-                       if (PageAnon(page))
-                               rss[MM_ANONPAGES]--;
-                       else
-                               rss[MM_FILEPAGES]--;
+                       rss[mm_counter(page)]--;
                }
                if (unlikely(!free_swap_and_cache(entry)))
                        print_bad_pte(vma, addr, ptent, NULL);
@@ -1460,7 +1449,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 
        /* Ok, finally just insert the thing.. */
        get_page(page);
-       inc_mm_counter_fast(mm, MM_FILEPAGES);
+       inc_mm_counter_fast(mm, mm_counter_file(page));
        page_add_file_rmap(page);
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
@@ -1949,6 +1938,20 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                copy_user_highpage(dst, src, va, vma);
 }
 
+static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+{
+       struct file *vm_file = vma->vm_file;
+
+       if (vm_file)
+               return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+
+       /*
+        * Special mappings (e.g. VDSO) do not have any file so fake
+        * a default GFP_KERNEL for them.
+        */
+       return GFP_KERNEL;
+}
+
 /*
  * Notify the address space that the page is about to become writable so that
  * it can prohibit this or wait for the page to get into an appropriate state.
@@ -1964,6 +1967,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
        vmf.virtual_address = (void __user *)(address & PAGE_MASK);
        vmf.pgoff = page->index;
        vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+       vmf.gfp_mask = __get_fault_gfp_mask(vma);
        vmf.page = page;
        vmf.cow_page = NULL;
 
@@ -2097,7 +2101,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
                        if (!PageAnon(old_page)) {
-                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+                               dec_mm_counter_fast(mm,
+                                               mm_counter_file(old_page));
                                inc_mm_counter_fast(mm, MM_ANONPAGES);
                        }
                } else {
@@ -2767,6 +2772,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.page = NULL;
+       vmf.gfp_mask = __get_fault_gfp_mask(vma);
        vmf.cow_page = cow_page;
 
        ret = vma->vm_ops->fault(vma, &vmf);
@@ -2820,7 +2826,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, address);
        } else {
-               inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
+               inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                page_add_file_rmap(page);
        }
        set_pte_at(vma->vm_mm, address, pte, entry);
@@ -2933,6 +2939,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
        vmf.pgoff = pgoff;
        vmf.max_pgoff = max_pgoff;
        vmf.flags = flags;
+       vmf.gfp_mask = __get_fault_gfp_mask(vma);
        vma->vm_ops->map_pages(vma, &vmf);
 }