rmap: add argument to charge compound page
[cascardo/linux.git] / mm / ksm.c
index b5cd647..b4f7b69 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -740,8 +740,7 @@ static int remove_stable_node(struct stable_node *stable_node)
 
 static int remove_all_stable_nodes(void)
 {
-       struct stable_node *stable_node;
-       struct list_head *this, *next;
+       struct stable_node *stable_node, *next;
        int nid;
        int err = 0;
 
@@ -756,8 +755,7 @@ static int remove_all_stable_nodes(void)
                        cond_resched();
                }
        }
-       list_for_each_safe(this, next, &migrate_nodes) {
-               stable_node = list_entry(this, struct stable_node, list);
+       list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
                if (remove_stable_node(stable_node))
                        err = -EBUSY;
                cond_resched();
@@ -958,13 +956,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        }
 
        get_page(kpage);
-       page_add_anon_rmap(kpage, vma, addr);
+       page_add_anon_rmap(kpage, vma, addr, false);
 
        flush_cache_page(vma, addr, pte_pfn(*ptep));
        ptep_clear_flush_notify(vma, addr, ptep);
        set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
-       page_remove_rmap(page);
+       page_remove_rmap(page, false);
        if (!page_mapped(page))
                try_to_free_swap(page);
        put_page(page);
@@ -1583,13 +1581,11 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
                 * so prune them once before each full scan.
                 */
                if (!ksm_merge_across_nodes) {
-                       struct stable_node *stable_node;
-                       struct list_head *this, *next;
+                       struct stable_node *stable_node, *next;
                        struct page *page;
 
-                       list_for_each_safe(this, next, &migrate_nodes) {
-                               stable_node = list_entry(this,
-                                               struct stable_node, list);
+                       list_for_each_entry_safe(stable_node, next,
+                                                &migrate_nodes, list) {
                                page = get_ksm_page(stable_node, false);
                                if (page)
                                        put_page(page);
@@ -1903,7 +1899,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 
                SetPageDirty(new_page);
                __SetPageUptodate(new_page);
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
        }
 
        return new_page;
@@ -2012,8 +2008,7 @@ static void wait_while_offlining(void)
 static void ksm_check_stable_tree(unsigned long start_pfn,
                                  unsigned long end_pfn)
 {
-       struct stable_node *stable_node;
-       struct list_head *this, *next;
+       struct stable_node *stable_node, *next;
        struct rb_node *node;
        int nid;
 
@@ -2034,8 +2029,7 @@ static void ksm_check_stable_tree(unsigned long start_pfn,
                        cond_resched();
                }
        }
-       list_for_each_safe(this, next, &migrate_nodes) {
-               stable_node = list_entry(this, struct stable_node, list);
+       list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
                if (stable_node->kpfn >= start_pfn &&
                    stable_node->kpfn < end_pfn)
                        remove_node_from_stable_tree(stable_node);