Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
[cascardo/linux.git] / mm / page_alloc.c
index dbb5386..bb90971 100644 (file)
@@ -1799,6 +1799,22 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 }
 
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+       return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
+}
+
+static void __paginginit init_zone_allows_reclaim(int nid)
+{
+       int i;
+
+       for_each_online_node(i)
+               if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
+                       node_set(i, NODE_DATA(nid)->reclaim_nodes);
+                       zone_reclaim_mode = 1;
+               }
+}
+
 #else  /* CONFIG_NUMA */
 
 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
@@ -1819,6 +1835,15 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 static void zlc_clear_zones_full(struct zonelist *zonelist)
 {
 }
+
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+       return true;
+}
+
+static inline void init_zone_allows_reclaim(int nid)
+{
+}
 #endif /* CONFIG_NUMA */
 
 /*
@@ -1903,7 +1928,8 @@ zonelist_scan:
                                did_zlc_setup = 1;
                        }
 
-                       if (zone_reclaim_mode == 0)
+                       if (zone_reclaim_mode == 0 ||
+                           !zone_allows_reclaim(preferred_zone, zone))
                                goto this_zone_full;
 
                        /*
@@ -3364,21 +3390,13 @@ static void build_zonelists(pg_data_t *pgdat)
        j = 0;
 
        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
-               int distance = node_distance(local_node, node);
-
-               /*
-                * If another node is sufficiently far away then it is better
-                * to reclaim pages in a zone before going off node.
-                */
-               if (distance > RECLAIM_DISTANCE)
-                       zone_reclaim_mode = 1;
-
                /*
                 * We don't want to pressure a particular node.
                 * So adding penalty to the first node in same
                 * distance group to make it round-robin.
                 */
-               if (distance != node_distance(local_node, prev_node))
+               if (node_distance(local_node, node) !=
+                   node_distance(local_node, prev_node))
                        node_load[node] = load;
 
                prev_node = node;
@@ -4552,6 +4570,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
+       init_zone_allows_reclaim(nid);
        calculate_node_totalpages(pgdat, zones_size, zholes_size);
 
        alloc_node_mem_map(pgdat);
@@ -5655,7 +5674,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                                        unsigned long start, unsigned long end)
 {
        /* This function is based on compact_zone() from compaction.c. */
-
+       unsigned long nr_reclaimed;
        unsigned long pfn = start;
        unsigned int tries = 0;
        int ret = 0;
@@ -5671,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                if (list_empty(&cc->migratepages)) {
                        cc->nr_migratepages = 0;
                        pfn = isolate_migratepages_range(cc->zone, cc,
-                                                        pfn, end);
+                                                        pfn, end, true);
                        if (!pfn) {
                                ret = -EINTR;
                                break;
@@ -5682,7 +5701,9 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                        break;
                }
 
-               reclaim_clean_pages_from_list(cc->zone, &cc->migratepages);
+               nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
+                                                       &cc->migratepages);
+               cc->nr_migratepages -= nr_reclaimed;
 
                ret = migrate_pages(&cc->migratepages,
                                    alloc_migrate_target,
@@ -5897,6 +5918,7 @@ static int __meminit __zone_pcp_update(void *data)
                local_irq_save(flags);
                if (pcp->count > 0)
                        free_pcppages_bulk(zone, pcp->count, pcp);
+               drain_zonestat(zone, pset);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }
@@ -5913,10 +5935,16 @@ void __meminit zone_pcp_update(struct zone *zone)
 void zone_pcp_reset(struct zone *zone)
 {
        unsigned long flags;
+       int cpu;
+       struct per_cpu_pageset *pset;
 
        /* avoid races with drain_pages()  */
        local_irq_save(flags);
        if (zone->pageset != &boot_pageset) {
+               for_each_online_cpu(cpu) {
+                       pset = per_cpu_ptr(zone->pageset, cpu);
+                       drain_zonestat(zone, pset);
+               }
                free_percpu(zone->pageset);
                zone->pageset = &boot_pageset;
        }