Merge tag 'dm-4.8-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[cascardo/linux.git] / mm / huge_memory.c
index 9ed5853..343a2b7 100644 (file)
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (next - addr != HPAGE_PMD_SIZE) {
                get_page(page);
                spin_unlock(ptl);
-               if (split_huge_page(page)) {
-                       put_page(page);
-                       unlock_page(page);
-                       goto out_unlocked;
-               }
+               split_huge_page(page);
                put_page(page);
                unlock_page(page);
-               ret = 1;
                goto out_unlocked;
        }
 
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze)
+               unsigned long address, bool freeze, struct page *page)
 {
        spinlock_t *ptl;
        struct mm_struct *mm = vma->vm_mm;
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
        ptl = pmd_lock(mm, pmd);
+
+       /*
+        * If caller asks to setup a migration entries, we need a page to check
+        * pmd against. Otherwise we can end up replacing wrong page.
+        */
+       VM_BUG_ON(freeze && !page);
+       if (page && page != pmd_page(*pmd))
+               goto out;
+
        if (pmd_trans_huge(*pmd)) {
-               struct page *page = pmd_page(*pmd);
+               page = pmd_page(*pmd);
                if (PageMlocked(page))
                        clear_page_mlock(page);
        } else if (!pmd_devmap(*pmd))
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
-               return;
 
-       /*
-        * If caller asks to setup a migration entries, we need a page to check
-        * pmd against. Otherwise we can end up replacing wrong page.
-        */
-       VM_BUG_ON(freeze && !page);
-       if (page && page != pmd_page(*pmd))
-               return;
-
-       /*
-        * Caller holds the mmap_sem write mode or the anon_vma lock,
-        * so a huge pmd cannot materialize from under us (khugepaged
-        * holds both the mmap_sem write mode and the anon_vma lock
-        * write mode).
-        */
-       __split_huge_pmd(vma, pmd, address, freeze);
+       __split_huge_pmd(vma, pmd, address, freeze, page);
 }
 
 void vma_adjust_trans_huge(struct vm_area_struct *vma,