UPSTREAM: USB: add new zte 3g-dongle's pid to option.c
[cascardo/linux.git] / mm / huge_memory.c
index 91d3efb..f0e5306 100644 (file)
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                set_pmd_at(mm, haddr, pmd, entry);
                prepare_pmd_huge_pte(pgtable, mm);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
        prepare_pmd_huge_pte(pgtable, dst_mm);
+       dst_mm->nr_ptes++;
 
        ret = 0;
 out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
        kfree(pages);
 
-       mm->nr_ptes++;
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        page_remove_rmap(page);
@@ -1030,31 +1031,23 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 {
        int ret = 0;
 
-       spin_lock(&tlb->mm->page_table_lock);
-       if (likely(pmd_trans_huge(*pmd))) {
-               if (unlikely(pmd_trans_splitting(*pmd))) {
-                       spin_unlock(&tlb->mm->page_table_lock);
-                       wait_split_huge_page(vma->anon_vma,
-                                            pmd);
-               } else {
-                       struct page *page;
-                       pgtable_t pgtable;
-                       pgtable = get_pmd_huge_pte(tlb->mm);
-                       page = pmd_page(*pmd);
-                       pmd_clear(pmd);
-                       tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
-                       page_remove_rmap(page);
-                       VM_BUG_ON(page_mapcount(page) < 0);
-                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-                       VM_BUG_ON(!PageHead(page));
-                       spin_unlock(&tlb->mm->page_table_lock);
-                       tlb_remove_page(tlb, page);
-                       pte_free(tlb->mm, pgtable);
-                       ret = 1;
-               }
-       } else
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               struct page *page;
+               pgtable_t pgtable;
+               pgtable = get_pmd_huge_pte(tlb->mm);
+               page = pmd_page(*pmd);
+               pmd_clear(pmd);
+               tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+               page_remove_rmap(page);
+               VM_BUG_ON(page_mapcount(page) < 0);
+               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+               VM_BUG_ON(!PageHead(page));
+               tlb->mm->nr_ptes--;
                spin_unlock(&tlb->mm->page_table_lock);
-
+               tlb_remove_page(tlb, page);
+               pte_free(tlb->mm, pgtable);
+               ret = 1;
+       }
        return ret;
 }
 
@@ -1064,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        int ret = 0;
 
-       spin_lock(&vma->vm_mm->page_table_lock);
-       if (likely(pmd_trans_huge(*pmd))) {
-               ret = !pmd_trans_splitting(*pmd);
-               spin_unlock(&vma->vm_mm->page_table_lock);
-               if (unlikely(!ret))
-                       wait_split_huge_page(vma->anon_vma, pmd);
-               else {
-                       /*
-                        * All logical pages in the range are present
-                        * if backed by a huge page.
-                        */
-                       memset(vec, 1, (end - addr) >> PAGE_SHIFT);
-               }
-       } else
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               /*
+                * All logical pages in the range are present
+                * if backed by a huge page.
+                */
                spin_unlock(&vma->vm_mm->page_table_lock);
+               memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+               ret = 1;
+       }
 
        return ret;
 }
@@ -1108,20 +1095,11 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
                goto out;
        }
 
-       spin_lock(&mm->page_table_lock);
-       if (likely(pmd_trans_huge(*old_pmd))) {
-               if (pmd_trans_splitting(*old_pmd)) {
-                       spin_unlock(&mm->page_table_lock);
-                       wait_split_huge_page(vma->anon_vma, old_pmd);
-                       ret = -1;
-               } else {
-                       pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
-                       VM_BUG_ON(!pmd_none(*new_pmd));
-                       set_pmd_at(mm, new_addr, new_pmd, pmd);
-                       spin_unlock(&mm->page_table_lock);
-                       ret = 1;
-               }
-       } else {
+       ret = __pmd_trans_huge_lock(old_pmd, vma);
+       if (ret == 1) {
+               pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
+               VM_BUG_ON(!pmd_none(*new_pmd));
+               set_pmd_at(mm, new_addr, new_pmd, pmd);
                spin_unlock(&mm->page_table_lock);
        }
 out:
@@ -1134,24 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        struct mm_struct *mm = vma->vm_mm;
        int ret = 0;
 
-       spin_lock(&mm->page_table_lock);
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               pmd_t entry;
+               entry = pmdp_get_and_clear(mm, addr, pmd);
+               entry = pmd_modify(entry, newprot);
+               set_pmd_at(mm, addr, pmd, entry);
+               spin_unlock(&vma->vm_mm->page_table_lock);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+/*
+ * Returns 1 if a given pmd maps a stable (not under splitting) thp.
+ * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
+ *
+ * Note that if it returns 1, this routine returns without unlocking page
+ * table locks. So callers must unlock them.
+ */
+int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
+{
+       spin_lock(&vma->vm_mm->page_table_lock);
        if (likely(pmd_trans_huge(*pmd))) {
                if (unlikely(pmd_trans_splitting(*pmd))) {
-                       spin_unlock(&mm->page_table_lock);
+                       spin_unlock(&vma->vm_mm->page_table_lock);
                        wait_split_huge_page(vma->anon_vma, pmd);
+                       return -1;
                } else {
-                       pmd_t entry;
-
-                       entry = pmdp_get_and_clear(mm, addr, pmd);
-                       entry = pmd_modify(entry, newprot);
-                       set_pmd_at(mm, addr, pmd, entry);
-                       spin_unlock(&vma->vm_mm->page_table_lock);
-                       ret = 1;
+                       /* Thp mapped by 'pmd' is stable, so we can
+                        * handle it as it is. */
+                       return 1;
                }
-       } else
-               spin_unlock(&vma->vm_mm->page_table_lock);
-
-       return ret;
+       }
+       spin_unlock(&vma->vm_mm->page_table_lock);
+       return 0;
 }
 
 pmd_t *page_check_address_pmd(struct page *page,
@@ -1375,7 +1370,6 @@ static int __split_huge_page_map(struct page *page,
                        pte_unmap(pte);
                }
 
-               mm->nr_ptes++;
                smp_wmb(); /* make pte visible before pmd */
                /*
                 * Up to this point the pmd is present and huge and
@@ -1988,7 +1982,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache(vma, address, _pmd);
        prepare_pmd_huge_pte(pgtable, mm);
-       mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
 
 #ifndef CONFIG_NUMA