mm: account pmd page tables to the process
[cascardo/linux.git] / mm / memory.c
index 0e9b326..bbe6a73 100644 (file)
@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
        pmd_free_tlb(tlb, pmd, start);
+       mm_dec_nr_pmds(tlb->mm);
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -754,6 +755,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
        if (HAVE_PTE_SPECIAL) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
+               if (vma->vm_ops && vma->vm_ops->find_special_page)
+                       return vma->vm_ops->find_special_page(vma, addr);
                if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
                        return NULL;
                if (!is_zero_pfn(pfn))
@@ -2005,7 +2008,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t entry;
        int ret = 0;
        int page_mkwrite = 0;
-       struct page *dirty_page = NULL;
+       bool dirty_shared = false;
        unsigned long mmun_start = 0;   /* For mmu_notifiers */
        unsigned long mmun_end = 0;     /* For mmu_notifiers */
        struct mem_cgroup *memcg;
@@ -2056,6 +2059,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
+               page_cache_get(old_page);
                /*
                 * Only catch write-faults on shared writable pages,
                 * read-only shared pages can get COWed by
@@ -2063,7 +2067,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
                        int tmp;
-                       page_cache_get(old_page);
+
                        pte_unmap_unlock(page_table, ptl);
                        tmp = do_page_mkwrite(vma, old_page, address);
                        if (unlikely(!tmp || (tmp &
@@ -2083,11 +2087,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                unlock_page(old_page);
                                goto unlock;
                        }
-
                        page_mkwrite = 1;
                }
-               dirty_page = old_page;
-               get_page(dirty_page);
+
+               dirty_shared = true;
 
 reuse:
                /*
@@ -2106,43 +2109,29 @@ reuse:
                pte_unmap_unlock(page_table, ptl);
                ret |= VM_FAULT_WRITE;
 
-               if (!dirty_page)
-                       return ret;
-
-               if (!page_mkwrite) {
+               if (dirty_shared) {
                        struct address_space *mapping;
                        int dirtied;
 
-                       lock_page(dirty_page);
-                       dirtied = set_page_dirty(dirty_page);
-                       VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
-                       mapping = dirty_page->mapping;
-                       unlock_page(dirty_page);
+                       if (!page_mkwrite)
+                               lock_page(old_page);
 
-                       if (dirtied && mapping) {
-                               /*
-                                * Some device drivers do not set page.mapping
-                                * but still dirty their pages
-                                */
-                               balance_dirty_pages_ratelimited(mapping);
-                       }
+                       dirtied = set_page_dirty(old_page);
+                       VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
+                       mapping = old_page->mapping;
+                       unlock_page(old_page);
+                       page_cache_release(old_page);
 
-                       file_update_time(vma->vm_file);
-               }
-               put_page(dirty_page);
-               if (page_mkwrite) {
-                       struct address_space *mapping = dirty_page->mapping;
-
-                       set_page_dirty(dirty_page);
-                       unlock_page(dirty_page);
-                       page_cache_release(dirty_page);
-                       if (mapping)    {
+                       if ((dirtied || page_mkwrite) && mapping) {
                                /*
                                 * Some device drivers do not set page.mapping
                                 * but still dirty their pages
                                 */
                                balance_dirty_pages_ratelimited(mapping);
                        }
+
+                       if (!page_mkwrite)
+                               file_update_time(vma->vm_file);
                }
 
                return ret;
@@ -3334,15 +3323,17 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
-       if (pud_present(*pud))          /* Another has populated it */
-               pmd_free(mm, new);
-       else
+       if (!pud_present(*pud)) {
+               mm_inc_nr_pmds(mm);
                pud_populate(mm, pud, new);
-#else
-       if (pgd_present(*pud))          /* Another has populated it */
+       } else  /* Another has populated it */
                pmd_free(mm, new);
-       else
+#else
+       if (!pgd_present(*pud)) {
+               mm_inc_nr_pmds(mm);
                pgd_populate(mm, pud, new);
+       } else /* Another has populated it */
+               pmd_free(mm, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
        spin_unlock(&mm->page_table_lock);
        return 0;