projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm/page_alloc.c: fix comment in zlc_setup()
[cascardo/linux.git]
/
mm
/
page_alloc.c
diff --git
a/mm/page_alloc.c
b/mm/page_alloc.c
index
dd886fa
..
580a5f0
100644
(file)
--- a/
mm/page_alloc.c
+++ b/
mm/page_alloc.c
@@
-234,8
+234,8
@@
int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
{
void set_pageblock_migratetype(struct page *page, int migratetype)
{
-
-
if (unlikely(page_group_by_mobility_disabled
))
+ if (unlikely(page_group_by_mobility_disabled &&
+
migratetype < MIGRATE_PCPTYPES
))
migratetype = MIGRATE_UNMOVABLE;
set_pageblock_flags_group(page, (unsigned long)migratetype,
migratetype = MIGRATE_UNMOVABLE;
set_pageblock_flags_group(page, (unsigned long)migratetype,
@@
-626,7
+626,7
@@
static inline int free_pages_check(struct page *page)
bad_page(page);
return 1;
}
bad_page(page);
return 1;
}
- page_
n
id_reset_last(page);
+ page_
cpup
id_reset_last(page);
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
@@
-1027,6
+1027,10
@@
static int try_to_steal_freepages(struct zone *zone, struct page *page,
{
int current_order = page_order(page);
{
int current_order = page_order(page);
+ /*
+ * When borrowing from MIGRATE_CMA, we need to release the excess
+ * buddy pages to CMA itself.
+ */
if (is_migrate_cma(fallback_type))
return fallback_type;
if (is_migrate_cma(fallback_type))
return fallback_type;
@@
-1091,21
+1095,11
@@
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
list_del(&page->lru);
rmv_page_order(page);
list_del(&page->lru);
rmv_page_order(page);
- /*
- * Borrow the excess buddy pages as well, irrespective
- * of whether we stole freepages, or took ownership of
- * the pageblock or not.
- *
- * Exception: When borrowing from MIGRATE_CMA, release
- * the excess buddy pages to CMA itself.
- */
expand(zone, page, order, current_order, area,
expand(zone, page, order, current_order, area,
- is_migrate_cma(migratetype)
- ? migratetype : start_migratetype);
+ new_type);
- trace_mm_page_alloc_extfrag(page, order,
- current_order, start_migratetype, migratetype,
- new_type == start_migratetype);
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, migratetype, new_type);
return page;
}
return page;
}
@@
-1711,7
+1705,7
@@
bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
* comments in mmzone.h. Reduces cache footprint of zonelist scans
* that have to skip over a lot of full or unallowed zones.
*
* comments in mmzone.h. Reduces cache footprint of zonelist scans
* that have to skip over a lot of full or unallowed zones.
*
- * If the zonelist cache is present in the passed
in
zonelist, then
+ * If the zonelist cache is present in the passed zonelist, then
* returns a pointer to the allowed node mask (either the current
* tasks mems_allowed, or node_states[N_MEMORY].)
*
* returns a pointer to the allowed node mask (either the current
* tasks mems_allowed, or node_states[N_MEMORY].)
*
@@
-2593,7
+2587,7
@@
rebalance:
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
- if (
(gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY
)) {
+ if (
oom_gfp_allowed(gfp_mask
)) {
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
@@
-3881,8
+3875,6
@@
static inline unsigned long wait_table_bits(unsigned long size)
return ffz(~size);
}
return ffz(~size);
}
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
/*
* Check if a pageblock contains reserved pages
*/
/*
* Check if a pageblock contains reserved pages
*/
@@
-4015,7
+4007,7
@@
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
- page_
n
id_reset_last(page);
+ page_
cpup
id_reset_last(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
@@
-4266,7
+4258,7
@@
static __meminit void zone_pcp_init(struct zone *zone)
*/
zone->pageset = &boot_pageset;
*/
zone->pageset = &boot_pageset;
- if (
zone->present_pages
)
+ if (
populated_zone(zone)
)
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
zone->name, zone->present_pages,
zone_batchsize(zone));
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
zone->name, zone->present_pages,
zone_batchsize(zone));
@@
-5160,7
+5152,7
@@
static void check_for_memory(pg_data_t *pgdat, int nid)
for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
struct zone *zone = &pgdat->node_zones[zone_type];
for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
struct zone *zone = &pgdat->node_zones[zone_type];
- if (
zone->present_pages
) {
+ if (
populated_zone(zone)
) {
node_set_state(nid, N_HIGH_MEMORY);
if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
zone_type <= ZONE_NORMAL)
node_set_state(nid, N_HIGH_MEMORY);
if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
zone_type <= ZONE_NORMAL)