Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / mm / vmscan.c
index b93968b..88c5fed 100644 (file)
@@ -128,7 +128,7 @@ struct scan_control {
  * From 0 .. 100.  Higher means more swappy.
  */
 int vm_swappiness = 60;
-long vm_total_pages;   /* The total number of pages which the VM controls */
+unsigned long vm_total_pages;  /* The total number of pages which the VM controls */
 
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
@@ -1579,16 +1579,6 @@ static inline int inactive_anon_is_low(struct lruvec *lruvec)
 }
 #endif
 
-static int inactive_file_is_low_global(struct zone *zone)
-{
-       unsigned long active, inactive;
-
-       active = zone_page_state(zone, NR_ACTIVE_FILE);
-       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
-
-       return (active > inactive);
-}
-
 /**
  * inactive_file_is_low - check if file pages need to be deactivated
  * @lruvec: LRU vector to check
@@ -1605,10 +1595,13 @@ static int inactive_file_is_low_global(struct zone *zone)
  */
 static int inactive_file_is_low(struct lruvec *lruvec)
 {
-       if (!mem_cgroup_disabled())
-               return mem_cgroup_inactive_file_is_low(lruvec);
+       unsigned long inactive;
+       unsigned long active;
+
+       inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
+       active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
 
-       return inactive_file_is_low_global(lruvec_zone(lruvec));
+       return active > inactive;
 }
 
 static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
@@ -1684,7 +1677,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
                force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+       if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -1933,7 +1926,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
         */
        pages_for_compaction = (2UL << sc->order);
        inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
@@ -2201,6 +2194,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                if (sc->nr_reclaimed >= sc->nr_to_reclaim)
                        goto out;
 
+               /*
+                * If we're getting trouble reclaiming, start doing
+                * writepage even in laptop mode.
+                */
+               if (sc->priority < DEF_PRIORITY - 2)
+                       sc->may_writepage = 1;
+
                /*
                 * Try to write back as many pages as we just scanned.  This
                 * tends to cause slow streaming writers to write data to the
@@ -2352,7 +2352,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 {
        unsigned long nr_reclaimed;
        struct scan_control sc = {
-               .gfp_mask = gfp_mask,
+               .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
                .may_writepage = !laptop_mode,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
@@ -2772,12 +2772,10 @@ loop_again:
                        }
 
                        /*
-                        * If we've done a decent amount of scanning and
-                        * the reclaim ratio is low, start doing writepage
-                        * even in laptop mode
+                        * If we're getting trouble reclaiming, start doing
+                        * writepage even in laptop mode.
                         */
-                       if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
-                           total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
+                       if (sc.priority < DEF_PRIORITY - 2)
                                sc.may_writepage = 1;
 
                        if (zone->all_unreclaimable) {
@@ -3085,7 +3083,7 @@ unsigned long global_reclaimable_pages(void)
        nr = global_page_state(NR_ACTIVE_FILE) +
             global_page_state(NR_INACTIVE_FILE);
 
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                nr += global_page_state(NR_ACTIVE_ANON) +
                      global_page_state(NR_INACTIVE_ANON);
 
@@ -3099,7 +3097,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
        nr = zone_page_state(zone, NR_ACTIVE_FILE) +
             zone_page_state(zone, NR_INACTIVE_FILE);
 
-       if (nr_swap_pages > 0)
+       if (get_nr_swap_pages() > 0)
                nr += zone_page_state(zone, NR_ACTIVE_ANON) +
                      zone_page_state(zone, NR_INACTIVE_ANON);
 
@@ -3313,7 +3311,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
                .may_swap = 1,
                .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
-               .gfp_mask = gfp_mask,
+               .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
                .order = order,
                .priority = ZONE_RECLAIM_PRIORITY,
        };