sctp: do not update a_rwnd if we are not issuing a sack
[cascardo/linux.git] / mm / rmap.c
index 02f0bfc..c399a0d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1431,6 +1431,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                goto out;
 
+       if (flags & TTU_SPLIT_HUGE_PMD) {
+               split_huge_pmd_address(vma, address,
+                               flags & TTU_MIGRATION, page);
+               /* check if we have anything to do after split */
+               if (page_mapcount(page) == 0)
+                       goto out;
+       }
+
        pte = page_check_address(page, mm, address, &ptl, 0);
        if (!pte)
                goto out;
@@ -1576,10 +1584,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
        return is_vma_temporary_stack(vma);
 }
 
-static int page_not_mapped(struct page *page)
+static int page_mapcount_is_zero(struct page *page)
 {
-       return !page_mapped(page);
-};
+       return !page_mapcount(page);
+}
 
 /**
  * try_to_unmap - try to remove all page table mappings to a page
@@ -1606,12 +1614,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        struct rmap_walk_control rwc = {
                .rmap_one = try_to_unmap_one,
                .arg = &rp,
-               .done = page_not_mapped,
+               .done = page_mapcount_is_zero,
                .anon_lock = page_lock_anon_vma_read,
        };
 
-       VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
-
        /*
         * During exec, a temporary VMA is setup and later moved.
         * The VMA is moved under the anon_vma lock but not the
@@ -1623,9 +1629,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
                rwc.invalid_vma = invalid_migration_vma;
 
-       ret = rmap_walk(page, &rwc);
+       if (flags & TTU_RMAP_LOCKED)
+               ret = rmap_walk_locked(page, &rwc);
+       else
+               ret = rmap_walk(page, &rwc);
 
-       if (ret != SWAP_MLOCK && !page_mapped(page)) {
+       if (ret != SWAP_MLOCK && !page_mapcount(page)) {
                ret = SWAP_SUCCESS;
                if (rp.lazyfreed && !PageDirty(page))
                        ret = SWAP_LZFREE;
@@ -1633,6 +1642,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        return ret;
 }
 
+static int page_not_mapped(struct page *page)
+{
+       return !page_mapped(page);
+};
+
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked
@@ -1715,14 +1729,21 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
+               bool locked)
 {
        struct anon_vma *anon_vma;
        pgoff_t pgoff;
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
-       anon_vma = rmap_walk_anon_lock(page, rwc);
+       if (locked) {
+               anon_vma = page_anon_vma(page);
+               /* anon_vma disappear under us? */
+               VM_BUG_ON_PAGE(!anon_vma, page);
+       } else {
+               anon_vma = rmap_walk_anon_lock(page, rwc);
+       }
        if (!anon_vma)
                return ret;
 
@@ -1742,7 +1763,9 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
                if (rwc->done && rwc->done(page))
                        break;
        }
-       anon_vma_unlock_read(anon_vma);
+
+       if (!locked)
+               anon_vma_unlock_read(anon_vma);
        return ret;
 }
 
@@ -1759,9 +1782,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
  * LOCKED.
  */
-static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
+static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
+               bool locked)
 {
-       struct address_space *mapping = page->mapping;
+       struct address_space *mapping = page_mapping(page);
        pgoff_t pgoff;
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
@@ -1778,7 +1802,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
                return ret;
 
        pgoff = page_to_pgoff(page);
-       i_mmap_lock_read(mapping);
+       if (!locked)
+               i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
 
@@ -1795,7 +1820,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
        }
 
 done:
-       i_mmap_unlock_read(mapping);
+       if (!locked)
+               i_mmap_unlock_read(mapping);
        return ret;
 }
 
@@ -1804,9 +1830,20 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
        if (unlikely(PageKsm(page)))
                return rmap_walk_ksm(page, rwc);
        else if (PageAnon(page))
-               return rmap_walk_anon(page, rwc);
+               return rmap_walk_anon(page, rwc, false);
+       else
+               return rmap_walk_file(page, rwc, false);
+}
+
+/* Like rmap_walk, but caller holds relevant rmap lock */
+int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
+{
+       /* no ksm support for now */
+       VM_BUG_ON_PAGE(PageKsm(page), page);
+       if (PageAnon(page))
+               return rmap_walk_anon(page, rwc, true);
        else
-               return rmap_walk_file(page, rwc);
+               return rmap_walk_file(page, rwc, true);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE