mm/zsmalloc: use helper to clear page->flags bit
[cascardo/linux.git] / mm / compaction.c
index 7bc0477..e5995f3 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
 #include <linux/page-isolation.h>
 #include <linux/kasan.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -65,13 +65,27 @@ static unsigned long release_freepages(struct list_head *freelist)
 
 static void map_pages(struct list_head *list)
 {
-       struct page *page;
+       unsigned int i, order, nr_pages;
+       struct page *page, *next;
+       LIST_HEAD(tmp_list);
+
+       list_for_each_entry_safe(page, next, list, lru) {
+               list_del(&page->lru);
 
-       list_for_each_entry(page, list, lru) {
-               arch_alloc_page(page, 0);
-               kernel_map_pages(page, 1, 1);
-               kasan_alloc_pages(page, 0);
+               order = page_private(page);
+               nr_pages = 1 << order;
+
+               post_alloc_hook(page, order, __GFP_MOVABLE);
+               if (order)
+                       split_page(page, order);
+
+               for (i = 0; i < nr_pages; i++) {
+                       list_add(&page->lru, &tmp_list);
+                       page++;
+               }
        }
+
+       list_splice(&tmp_list, list);
 }
 
 static inline bool migrate_async_suitable(int migratetype)
@@ -81,6 +95,44 @@ static inline bool migrate_async_suitable(int migratetype)
 
 #ifdef CONFIG_COMPACTION
 
+int PageMovable(struct page *page)
+{
+       struct address_space *mapping;
+
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       if (!__PageMovable(page))
+               return 0;
+
+       mapping = page_mapping(page);
+       if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(PageMovable);
+
+void __SetPageMovable(struct page *page, struct address_space *mapping)
+{
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
+       page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__SetPageMovable);
+
+void __ClearPageMovable(struct page *page)
+{
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(!PageMovable(page), page);
+       /*
+        * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
+        * flag so that VM can catch up released page by driver after isolation.
+        * With it, VM migration doesn't try to put it back.
+        */
+       page->mapping = (void *)((unsigned long)page->mapping &
+                               PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__ClearPageMovable);
+
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
 
@@ -368,12 +420,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
        unsigned long flags = 0;
        bool locked = false;
        unsigned long blockpfn = *start_pfn;
+       unsigned int order;
 
        cursor = pfn_to_page(blockpfn);
 
        /* Isolate free pages. */
        for (; blockpfn < end_pfn; blockpfn++, cursor++) {
-               int isolated, i;
+               int isolated;
                struct page *page = cursor;
 
                /*
@@ -439,17 +492,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                                goto isolate_fail;
                }
 
-               /* Found a free page, break it into order-0 pages */
-               isolated = split_free_page(page);
+               /* Found a free page, will break it into order-0 pages */
+               order = page_order(page);
+               isolated = __isolate_free_page(page, order);
                if (!isolated)
                        break;
+               set_page_private(page, order);
 
                total_isolated += isolated;
                cc->nr_freepages += isolated;
-               for (i = 0; i < isolated; i++) {
-                       list_add(&page->lru, freelist);
-                       page++;
-               }
+               list_add_tail(&page->lru, freelist);
+
                if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
                        blockpfn += isolated;
                        break;
@@ -568,7 +621,7 @@ isolate_freepages_range(struct compact_control *cc,
                 */
        }
 
-       /* split_free_page does not map the pages */
+       /* __isolate_free_page() does not map the pages */
        map_pages(&freelist);
 
        if (pfn < end_pfn) {
@@ -593,8 +646,8 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
        list_for_each_entry(page, &cc->migratepages, lru)
                count[!!page_is_file_cache(page)]++;
 
-       mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
-       mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+       mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]);
+       mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]);
 }
 
 /* Similar to reclaim, but different enough that they don't share logic */
@@ -602,12 +655,12 @@ static bool too_many_isolated(struct zone *zone)
 {
        unsigned long active, inactive, isolated;
 
-       inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
-                                       zone_page_state(zone, NR_INACTIVE_ANON);
-       active = zone_page_state(zone, NR_ACTIVE_FILE) +
-                                       zone_page_state(zone, NR_ACTIVE_ANON);
-       isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
-                                       zone_page_state(zone, NR_ISOLATED_ANON);
+       inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
+                       node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
+       active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
+                       node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
+       isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
+                       node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
 
        return isolated > (inactive + active) / 2;
 }
@@ -670,7 +723,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
        /* Time to isolate some pages for migration */
        for (; low_pfn < end_pfn; low_pfn++) {
-               bool is_lru;
 
                if (skip_on_failure && low_pfn >= next_skip_pfn) {
                        /*
@@ -700,7 +752,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 * if contended.
                 */
                if (!(low_pfn % SWAP_CLUSTER_MAX)
-                   && compact_unlock_should_abort(&zone->lru_lock, flags,
+                   && compact_unlock_should_abort(zone_lru_lock(zone), flags,
                                                                &locked, cc))
                        break;
 
@@ -732,21 +784,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        continue;
                }
 
-               /*
-                * Check may be lockless but that's ok as we recheck later.
-                * It's possible to migrate LRU pages and balloon pages
-                * Skip any other type of page
-                */
-               is_lru = PageLRU(page);
-               if (!is_lru) {
-                       if (unlikely(balloon_page_movable(page))) {
-                               if (balloon_page_isolate(page)) {
-                                       /* Successfully isolated */
-                                       goto isolate_success;
-                               }
-                       }
-               }
-
                /*
                 * Regardless of being on LRU, compound pages such as THP and
                 * hugetlbfs are not to be compacted. We can potentially save
@@ -763,8 +800,30 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        goto isolate_fail;
                }
 
-               if (!is_lru)
+               /*
+                * Check may be lockless but that's ok as we recheck later.
+                * It's possible to migrate LRU and non-lru movable pages.
+                * Skip any other type of page
+                */
+               if (!PageLRU(page)) {
+                       /*
+                        * __PageMovable can return false positive so we need
+                        * to verify it under page_lock.
+                        */
+                       if (unlikely(__PageMovable(page)) &&
+                                       !PageIsolated(page)) {
+                               if (locked) {
+                                       spin_unlock_irqrestore(zone_lru_lock(zone),
+                                                                       flags);
+                                       locked = false;
+                               }
+
+                               if (isolate_movable_page(page, isolate_mode))
+                                       goto isolate_success;
+                       }
+
                        goto isolate_fail;
+               }
 
                /*
                 * Migration will fail if an anonymous page is pinned in memory,
@@ -777,7 +836,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
                /* If we already hold the lock, we can skip some rechecking */
                if (!locked) {
-                       locked = compact_trylock_irqsave(&zone->lru_lock,
+                       locked = compact_trylock_irqsave(zone_lru_lock(zone),
                                                                &flags, cc);
                        if (!locked)
                                break;
@@ -797,7 +856,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        }
                }
 
-               lruvec = mem_cgroup_page_lruvec(page, zone);
+               lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
 
                /* Try isolate the page */
                if (__isolate_lru_page(page, isolate_mode) != 0)
@@ -840,7 +899,7 @@ isolate_fail:
                 */
                if (nr_isolated) {
                        if (locked) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
+                               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
                                locked = false;
                        }
                        acct_isolated(zone, cc);
@@ -868,7 +927,7 @@ isolate_fail:
                low_pfn = end_pfn;
 
        if (locked)
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 
        /*
         * Update the pageblock-skip information and cached scanner pfn,
@@ -1059,7 +1118,7 @@ static void isolate_freepages(struct compact_control *cc)
                }
        }
 
-       /* split_free_page does not map the pages */
+       /* __isolate_free_page() does not map the pages */
        map_pages(freelist);
 
        /*
@@ -1622,7 +1681,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
        *contended = COMPACT_CONTENDED_NONE;
 
        /* Check if the GFP flags allow compaction */
-       if (!order || !may_enter_fs || !may_perform_io)
+       if (!may_enter_fs || !may_perform_io)
                return COMPACT_SKIPPED;
 
        trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);