3cda95451d93e3932f3bcfb902ea5e7c9f316c9c
[cascardo/linux.git] / mm / compaction.c
1 /*
2  * linux/mm/compaction.c
3  *
4  * Memory compaction for the reduction of external fragmentation. Note that
5  * this heavily depends upon page migration to do all the real heavy
6  * lifting
7  *
8  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9  */
10 #include <linux/cpu.h>
11 #include <linux/swap.h>
12 #include <linux/migrate.h>
13 #include <linux/compaction.h>
14 #include <linux/mm_inline.h>
15 #include <linux/backing-dev.h>
16 #include <linux/sysctl.h>
17 #include <linux/sysfs.h>
18 #include <linux/page-isolation.h>
19 #include <linux/kasan.h>
20 #include <linux/kthread.h>
21 #include <linux/freezer.h>
22 #include "internal.h"
23
24 #ifdef CONFIG_COMPACTION
25 static inline void count_compact_event(enum vm_event_item item)
26 {
27         count_vm_event(item);
28 }
29
30 static inline void count_compact_events(enum vm_event_item item, long delta)
31 {
32         count_vm_events(item, delta);
33 }
34 #else
35 #define count_compact_event(item) do { } while (0)
36 #define count_compact_events(item, delta) do { } while (0)
37 #endif
38
39 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/compaction.h>
43
44 #define block_start_pfn(pfn, order)     round_down(pfn, 1UL << (order))
45 #define block_end_pfn(pfn, order)       ALIGN((pfn) + 1, 1UL << (order))
46 #define pageblock_start_pfn(pfn)        block_start_pfn(pfn, pageblock_order)
47 #define pageblock_end_pfn(pfn)          block_end_pfn(pfn, pageblock_order)
48
49 static unsigned long release_freepages(struct list_head *freelist)
50 {
51         struct page *page, *next;
52         unsigned long high_pfn = 0;
53
54         list_for_each_entry_safe(page, next, freelist, lru) {
55                 unsigned long pfn = page_to_pfn(page);
56                 list_del(&page->lru);
57                 __free_page(page);
58                 if (pfn > high_pfn)
59                         high_pfn = pfn;
60         }
61
62         return high_pfn;
63 }
64
65 static void map_pages(struct list_head *list)
66 {
67         unsigned int i, order, nr_pages;
68         struct page *page, *next;
69         LIST_HEAD(tmp_list);
70
71         list_for_each_entry_safe(page, next, list, lru) {
72                 list_del(&page->lru);
73
74                 order = page_private(page);
75                 nr_pages = 1 << order;
76                 set_page_private(page, 0);
77                 set_page_refcounted(page);
78
79                 arch_alloc_page(page, order);
80                 kernel_map_pages(page, nr_pages, 1);
81                 kasan_alloc_pages(page, order);
82                 if (order)
83                         split_page(page, order);
84
85                 for (i = 0; i < nr_pages; i++) {
86                         list_add(&page->lru, &tmp_list);
87                         page++;
88                 }
89         }
90
91         list_splice(&tmp_list, list);
92 }
93
94 static inline bool migrate_async_suitable(int migratetype)
95 {
96         return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
97 }
98
99 #ifdef CONFIG_COMPACTION
100
101 int PageMovable(struct page *page)
102 {
103         struct address_space *mapping;
104
105         VM_BUG_ON_PAGE(!PageLocked(page), page);
106         if (!__PageMovable(page))
107                 return 0;
108
109         mapping = page_mapping(page);
110         if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
111                 return 1;
112
113         return 0;
114 }
115 EXPORT_SYMBOL(PageMovable);
116
117 void __SetPageMovable(struct page *page, struct address_space *mapping)
118 {
119         VM_BUG_ON_PAGE(!PageLocked(page), page);
120         VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
121         page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
122 }
123 EXPORT_SYMBOL(__SetPageMovable);
124
125 void __ClearPageMovable(struct page *page)
126 {
127         VM_BUG_ON_PAGE(!PageLocked(page), page);
128         VM_BUG_ON_PAGE(!PageMovable(page), page);
129         /*
130          * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
131          * flag so that VM can catch up released page by driver after isolation.
132          * With it, VM migration doesn't try to put it back.
133          */
134         page->mapping = (void *)((unsigned long)page->mapping &
135                                 PAGE_MAPPING_MOVABLE);
136 }
137 EXPORT_SYMBOL(__ClearPageMovable);
138
139 /* Do not skip compaction more than 64 times */
140 #define COMPACT_MAX_DEFER_SHIFT 6
141
142 /*
143  * Compaction is deferred when compaction fails to result in a page
144  * allocation success. 1 << compact_defer_limit compactions are skipped up
145  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
146  */
147 void defer_compaction(struct zone *zone, int order)
148 {
149         zone->compact_considered = 0;
150         zone->compact_defer_shift++;
151
152         if (order < zone->compact_order_failed)
153                 zone->compact_order_failed = order;
154
155         if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
156                 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
157
158         trace_mm_compaction_defer_compaction(zone, order);
159 }
160
161 /* Returns true if compaction should be skipped this time */
162 bool compaction_deferred(struct zone *zone, int order)
163 {
164         unsigned long defer_limit = 1UL << zone->compact_defer_shift;
165
166         if (order < zone->compact_order_failed)
167                 return false;
168
169         /* Avoid possible overflow */
170         if (++zone->compact_considered > defer_limit)
171                 zone->compact_considered = defer_limit;
172
173         if (zone->compact_considered >= defer_limit)
174                 return false;
175
176         trace_mm_compaction_deferred(zone, order);
177
178         return true;
179 }
180
181 /*
182  * Update defer tracking counters after successful compaction of given order,
183  * which means an allocation either succeeded (alloc_success == true) or is
184  * expected to succeed.
185  */
186 void compaction_defer_reset(struct zone *zone, int order,
187                 bool alloc_success)
188 {
189         if (alloc_success) {
190                 zone->compact_considered = 0;
191                 zone->compact_defer_shift = 0;
192         }
193         if (order >= zone->compact_order_failed)
194                 zone->compact_order_failed = order + 1;
195
196         trace_mm_compaction_defer_reset(zone, order);
197 }
198
199 /* Returns true if restarting compaction after many failures */
200 bool compaction_restarting(struct zone *zone, int order)
201 {
202         if (order < zone->compact_order_failed)
203                 return false;
204
205         return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
206                 zone->compact_considered >= 1UL << zone->compact_defer_shift;
207 }
208
209 /* Returns true if the pageblock should be scanned for pages to isolate. */
210 static inline bool isolation_suitable(struct compact_control *cc,
211                                         struct page *page)
212 {
213         if (cc->ignore_skip_hint)
214                 return true;
215
216         return !get_pageblock_skip(page);
217 }
218
219 static void reset_cached_positions(struct zone *zone)
220 {
221         zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
222         zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
223         zone->compact_cached_free_pfn =
224                                 pageblock_start_pfn(zone_end_pfn(zone) - 1);
225 }
226
227 /*
228  * This function is called to clear all cached information on pageblocks that
229  * should be skipped for page isolation when the migrate and free page scanner
230  * meet.
231  */
232 static void __reset_isolation_suitable(struct zone *zone)
233 {
234         unsigned long start_pfn = zone->zone_start_pfn;
235         unsigned long end_pfn = zone_end_pfn(zone);
236         unsigned long pfn;
237
238         zone->compact_blockskip_flush = false;
239
240         /* Walk the zone and mark every pageblock as suitable for isolation */
241         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
242                 struct page *page;
243
244                 cond_resched();
245
246                 if (!pfn_valid(pfn))
247                         continue;
248
249                 page = pfn_to_page(pfn);
250                 if (zone != page_zone(page))
251                         continue;
252
253                 clear_pageblock_skip(page);
254         }
255
256         reset_cached_positions(zone);
257 }
258
259 void reset_isolation_suitable(pg_data_t *pgdat)
260 {
261         int zoneid;
262
263         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
264                 struct zone *zone = &pgdat->node_zones[zoneid];
265                 if (!populated_zone(zone))
266                         continue;
267
268                 /* Only flush if a full compaction finished recently */
269                 if (zone->compact_blockskip_flush)
270                         __reset_isolation_suitable(zone);
271         }
272 }
273
274 /*
275  * If no pages were isolated then mark this pageblock to be skipped in the
276  * future. The information is later cleared by __reset_isolation_suitable().
277  */
278 static void update_pageblock_skip(struct compact_control *cc,
279                         struct page *page, unsigned long nr_isolated,
280                         bool migrate_scanner)
281 {
282         struct zone *zone = cc->zone;
283         unsigned long pfn;
284
285         if (cc->ignore_skip_hint)
286                 return;
287
288         if (!page)
289                 return;
290
291         if (nr_isolated)
292                 return;
293
294         set_pageblock_skip(page);
295
296         pfn = page_to_pfn(page);
297
298         /* Update where async and sync compaction should restart */
299         if (migrate_scanner) {
300                 if (pfn > zone->compact_cached_migrate_pfn[0])
301                         zone->compact_cached_migrate_pfn[0] = pfn;
302                 if (cc->mode != MIGRATE_ASYNC &&
303                     pfn > zone->compact_cached_migrate_pfn[1])
304                         zone->compact_cached_migrate_pfn[1] = pfn;
305         } else {
306                 if (pfn < zone->compact_cached_free_pfn)
307                         zone->compact_cached_free_pfn = pfn;
308         }
309 }
310 #else
311 static inline bool isolation_suitable(struct compact_control *cc,
312                                         struct page *page)
313 {
314         return true;
315 }
316
317 static void update_pageblock_skip(struct compact_control *cc,
318                         struct page *page, unsigned long nr_isolated,
319                         bool migrate_scanner)
320 {
321 }
322 #endif /* CONFIG_COMPACTION */
323
324 /*
325  * Compaction requires the taking of some coarse locks that are potentially
326  * very heavily contended. For async compaction, back out if the lock cannot
327  * be taken immediately. For sync compaction, spin on the lock if needed.
328  *
329  * Returns true if the lock is held
330  * Returns false if the lock is not held and compaction should abort
331  */
332 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
333                                                 struct compact_control *cc)
334 {
335         if (cc->mode == MIGRATE_ASYNC) {
336                 if (!spin_trylock_irqsave(lock, *flags)) {
337                         cc->contended = COMPACT_CONTENDED_LOCK;
338                         return false;
339                 }
340         } else {
341                 spin_lock_irqsave(lock, *flags);
342         }
343
344         return true;
345 }
346
347 /*
348  * Compaction requires the taking of some coarse locks that are potentially
349  * very heavily contended. The lock should be periodically unlocked to avoid
350  * having disabled IRQs for a long time, even when there is nobody waiting on
351  * the lock. It might also be that allowing the IRQs will result in
352  * need_resched() becoming true. If scheduling is needed, async compaction
353  * aborts. Sync compaction schedules.
354  * Either compaction type will also abort if a fatal signal is pending.
355  * In either case if the lock was locked, it is dropped and not regained.
356  *
357  * Returns true if compaction should abort due to fatal signal pending, or
358  *              async compaction due to need_resched()
359  * Returns false when compaction can continue (sync compaction might have
360  *              scheduled)
361  */
362 static bool compact_unlock_should_abort(spinlock_t *lock,
363                 unsigned long flags, bool *locked, struct compact_control *cc)
364 {
365         if (*locked) {
366                 spin_unlock_irqrestore(lock, flags);
367                 *locked = false;
368         }
369
370         if (fatal_signal_pending(current)) {
371                 cc->contended = COMPACT_CONTENDED_SCHED;
372                 return true;
373         }
374
375         if (need_resched()) {
376                 if (cc->mode == MIGRATE_ASYNC) {
377                         cc->contended = COMPACT_CONTENDED_SCHED;
378                         return true;
379                 }
380                 cond_resched();
381         }
382
383         return false;
384 }
385
386 /*
387  * Aside from avoiding lock contention, compaction also periodically checks
388  * need_resched() and either schedules in sync compaction or aborts async
389  * compaction. This is similar to what compact_unlock_should_abort() does, but
390  * is used where no lock is concerned.
391  *
392  * Returns false when no scheduling was needed, or sync compaction scheduled.
393  * Returns true when async compaction should abort.
394  */
395 static inline bool compact_should_abort(struct compact_control *cc)
396 {
397         /* async compaction aborts if contended */
398         if (need_resched()) {
399                 if (cc->mode == MIGRATE_ASYNC) {
400                         cc->contended = COMPACT_CONTENDED_SCHED;
401                         return true;
402                 }
403
404                 cond_resched();
405         }
406
407         return false;
408 }
409
410 /*
411  * Isolate free pages onto a private freelist. If @strict is true, will abort
412  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
413  * (even though it may still end up isolating some pages).
414  */
415 static unsigned long isolate_freepages_block(struct compact_control *cc,
416                                 unsigned long *start_pfn,
417                                 unsigned long end_pfn,
418                                 struct list_head *freelist,
419                                 bool strict)
420 {
421         int nr_scanned = 0, total_isolated = 0;
422         struct page *cursor, *valid_page = NULL;
423         unsigned long flags = 0;
424         bool locked = false;
425         unsigned long blockpfn = *start_pfn;
426         unsigned int order;
427
428         cursor = pfn_to_page(blockpfn);
429
430         /* Isolate free pages. */
431         for (; blockpfn < end_pfn; blockpfn++, cursor++) {
432                 int isolated;
433                 struct page *page = cursor;
434
435                 /*
436                  * Periodically drop the lock (if held) regardless of its
437                  * contention, to give chance to IRQs. Abort if fatal signal
438                  * pending or async compaction detects need_resched()
439                  */
440                 if (!(blockpfn % SWAP_CLUSTER_MAX)
441                     && compact_unlock_should_abort(&cc->zone->lock, flags,
442                                                                 &locked, cc))
443                         break;
444
445                 nr_scanned++;
446                 if (!pfn_valid_within(blockpfn))
447                         goto isolate_fail;
448
449                 if (!valid_page)
450                         valid_page = page;
451
452                 /*
453                  * For compound pages such as THP and hugetlbfs, we can save
454                  * potentially a lot of iterations if we skip them at once.
455                  * The check is racy, but we can consider only valid values
456                  * and the only danger is skipping too much.
457                  */
458                 if (PageCompound(page)) {
459                         unsigned int comp_order = compound_order(page);
460
461                         if (likely(comp_order < MAX_ORDER)) {
462                                 blockpfn += (1UL << comp_order) - 1;
463                                 cursor += (1UL << comp_order) - 1;
464                         }
465
466                         goto isolate_fail;
467                 }
468
469                 if (!PageBuddy(page))
470                         goto isolate_fail;
471
472                 /*
473                  * If we already hold the lock, we can skip some rechecking.
474                  * Note that if we hold the lock now, checked_pageblock was
475                  * already set in some previous iteration (or strict is true),
476                  * so it is correct to skip the suitable migration target
477                  * recheck as well.
478                  */
479                 if (!locked) {
480                         /*
481                          * The zone lock must be held to isolate freepages.
482                          * Unfortunately this is a very coarse lock and can be
483                          * heavily contended if there are parallel allocations
484                          * or parallel compactions. For async compaction do not
485                          * spin on the lock and we acquire the lock as late as
486                          * possible.
487                          */
488                         locked = compact_trylock_irqsave(&cc->zone->lock,
489                                                                 &flags, cc);
490                         if (!locked)
491                                 break;
492
493                         /* Recheck this is a buddy page under lock */
494                         if (!PageBuddy(page))
495                                 goto isolate_fail;
496                 }
497
498                 /* Found a free page, will break it into order-0 pages */
499                 order = page_order(page);
500                 isolated = __isolate_free_page(page, order);
501                 if (!isolated)
502                         break;
503                 set_page_private(page, order);
504
505                 total_isolated += isolated;
506                 cc->nr_freepages += isolated;
507                 list_add_tail(&page->lru, freelist);
508
509                 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
510                         blockpfn += isolated;
511                         break;
512                 }
513                 /* Advance to the end of split page */
514                 blockpfn += isolated - 1;
515                 cursor += isolated - 1;
516                 continue;
517
518 isolate_fail:
519                 if (strict)
520                         break;
521                 else
522                         continue;
523
524         }
525
526         if (locked)
527                 spin_unlock_irqrestore(&cc->zone->lock, flags);
528
529         /*
530          * There is a tiny chance that we have read bogus compound_order(),
531          * so be careful to not go outside of the pageblock.
532          */
533         if (unlikely(blockpfn > end_pfn))
534                 blockpfn = end_pfn;
535
536         trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
537                                         nr_scanned, total_isolated);
538
539         /* Record how far we have got within the block */
540         *start_pfn = blockpfn;
541
542         /*
543          * If strict isolation is requested by CMA then check that all the
544          * pages requested were isolated. If there were any failures, 0 is
545          * returned and CMA will fail.
546          */
547         if (strict && blockpfn < end_pfn)
548                 total_isolated = 0;
549
550         /* Update the pageblock-skip if the whole pageblock was scanned */
551         if (blockpfn == end_pfn)
552                 update_pageblock_skip(cc, valid_page, total_isolated, false);
553
554         count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
555         if (total_isolated)
556                 count_compact_events(COMPACTISOLATED, total_isolated);
557         return total_isolated;
558 }
559
560 /**
561  * isolate_freepages_range() - isolate free pages.
562  * @start_pfn: The first PFN to start isolating.
563  * @end_pfn:   The one-past-last PFN.
564  *
565  * Non-free pages, invalid PFNs, or zone boundaries within the
566  * [start_pfn, end_pfn) range are considered errors, cause function to
567  * undo its actions and return zero.
568  *
569  * Otherwise, function returns one-past-the-last PFN of isolated page
570  * (which may be greater then end_pfn if end fell in a middle of
571  * a free page).
572  */
573 unsigned long
574 isolate_freepages_range(struct compact_control *cc,
575                         unsigned long start_pfn, unsigned long end_pfn)
576 {
577         unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
578         LIST_HEAD(freelist);
579
580         pfn = start_pfn;
581         block_start_pfn = pageblock_start_pfn(pfn);
582         if (block_start_pfn < cc->zone->zone_start_pfn)
583                 block_start_pfn = cc->zone->zone_start_pfn;
584         block_end_pfn = pageblock_end_pfn(pfn);
585
586         for (; pfn < end_pfn; pfn += isolated,
587                                 block_start_pfn = block_end_pfn,
588                                 block_end_pfn += pageblock_nr_pages) {
589                 /* Protect pfn from changing by isolate_freepages_block */
590                 unsigned long isolate_start_pfn = pfn;
591
592                 block_end_pfn = min(block_end_pfn, end_pfn);
593
594                 /*
595                  * pfn could pass the block_end_pfn if isolated freepage
596                  * is more than pageblock order. In this case, we adjust
597                  * scanning range to right one.
598                  */
599                 if (pfn >= block_end_pfn) {
600                         block_start_pfn = pageblock_start_pfn(pfn);
601                         block_end_pfn = pageblock_end_pfn(pfn);
602                         block_end_pfn = min(block_end_pfn, end_pfn);
603                 }
604
605                 if (!pageblock_pfn_to_page(block_start_pfn,
606                                         block_end_pfn, cc->zone))
607                         break;
608
609                 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
610                                                 block_end_pfn, &freelist, true);
611
612                 /*
613                  * In strict mode, isolate_freepages_block() returns 0 if
614                  * there are any holes in the block (ie. invalid PFNs or
615                  * non-free pages).
616                  */
617                 if (!isolated)
618                         break;
619
620                 /*
621                  * If we managed to isolate pages, it is always (1 << n) *
622                  * pageblock_nr_pages for some non-negative n.  (Max order
623                  * page may span two pageblocks).
624                  */
625         }
626
627         /* __isolate_free_page() does not map the pages */
628         map_pages(&freelist);
629
630         if (pfn < end_pfn) {
631                 /* Loop terminated early, cleanup. */
632                 release_freepages(&freelist);
633                 return 0;
634         }
635
636         /* We don't use freelists for anything. */
637         return pfn;
638 }
639
640 /* Update the number of anon and file isolated pages in the zone */
641 static void acct_isolated(struct zone *zone, struct compact_control *cc)
642 {
643         struct page *page;
644         unsigned int count[2] = { 0, };
645
646         if (list_empty(&cc->migratepages))
647                 return;
648
649         list_for_each_entry(page, &cc->migratepages, lru)
650                 count[!!page_is_file_cache(page)]++;
651
652         mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
653         mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
654 }
655
656 /* Similar to reclaim, but different enough that they don't share logic */
657 static bool too_many_isolated(struct zone *zone)
658 {
659         unsigned long active, inactive, isolated;
660
661         inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
662                                         zone_page_state(zone, NR_INACTIVE_ANON);
663         active = zone_page_state(zone, NR_ACTIVE_FILE) +
664                                         zone_page_state(zone, NR_ACTIVE_ANON);
665         isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
666                                         zone_page_state(zone, NR_ISOLATED_ANON);
667
668         return isolated > (inactive + active) / 2;
669 }
670
671 /**
672  * isolate_migratepages_block() - isolate all migrate-able pages within
673  *                                a single pageblock
674  * @cc:         Compaction control structure.
675  * @low_pfn:    The first PFN to isolate
676  * @end_pfn:    The one-past-the-last PFN to isolate, within same pageblock
677  * @isolate_mode: Isolation mode to be used.
678  *
679  * Isolate all pages that can be migrated from the range specified by
680  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
681  * Returns zero if there is a fatal signal pending, otherwise PFN of the
682  * first page that was not scanned (which may be both less, equal to or more
683  * than end_pfn).
684  *
685  * The pages are isolated on cc->migratepages list (not required to be empty),
686  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
687  * is neither read nor updated.
688  */
689 static unsigned long
690 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
691                         unsigned long end_pfn, isolate_mode_t isolate_mode)
692 {
693         struct zone *zone = cc->zone;
694         unsigned long nr_scanned = 0, nr_isolated = 0;
695         struct lruvec *lruvec;
696         unsigned long flags = 0;
697         bool locked = false;
698         struct page *page = NULL, *valid_page = NULL;
699         unsigned long start_pfn = low_pfn;
700         bool skip_on_failure = false;
701         unsigned long next_skip_pfn = 0;
702
703         /*
704          * Ensure that there are not too many pages isolated from the LRU
705          * list by either parallel reclaimers or compaction. If there are,
706          * delay for some time until fewer pages are isolated
707          */
708         while (unlikely(too_many_isolated(zone))) {
709                 /* async migration should just abort */
710                 if (cc->mode == MIGRATE_ASYNC)
711                         return 0;
712
713                 congestion_wait(BLK_RW_ASYNC, HZ/10);
714
715                 if (fatal_signal_pending(current))
716                         return 0;
717         }
718
719         if (compact_should_abort(cc))
720                 return 0;
721
722         if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
723                 skip_on_failure = true;
724                 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
725         }
726
727         /* Time to isolate some pages for migration */
728         for (; low_pfn < end_pfn; low_pfn++) {
729
730                 if (skip_on_failure && low_pfn >= next_skip_pfn) {
731                         /*
732                          * We have isolated all migration candidates in the
733                          * previous order-aligned block, and did not skip it due
734                          * to failure. We should migrate the pages now and
735                          * hopefully succeed compaction.
736                          */
737                         if (nr_isolated)
738                                 break;
739
740                         /*
741                          * We failed to isolate in the previous order-aligned
742                          * block. Set the new boundary to the end of the
743                          * current block. Note we can't simply increase
744                          * next_skip_pfn by 1 << order, as low_pfn might have
745                          * been incremented by a higher number due to skipping
746                          * a compound or a high-order buddy page in the
747                          * previous loop iteration.
748                          */
749                         next_skip_pfn = block_end_pfn(low_pfn, cc->order);
750                 }
751
752                 /*
753                  * Periodically drop the lock (if held) regardless of its
754                  * contention, to give chance to IRQs. Abort async compaction
755                  * if contended.
756                  */
757                 if (!(low_pfn % SWAP_CLUSTER_MAX)
758                     && compact_unlock_should_abort(&zone->lru_lock, flags,
759                                                                 &locked, cc))
760                         break;
761
762                 if (!pfn_valid_within(low_pfn))
763                         goto isolate_fail;
764                 nr_scanned++;
765
766                 page = pfn_to_page(low_pfn);
767
768                 if (!valid_page)
769                         valid_page = page;
770
771                 /*
772                  * Skip if free. We read page order here without zone lock
773                  * which is generally unsafe, but the race window is small and
774                  * the worst thing that can happen is that we skip some
775                  * potential isolation targets.
776                  */
777                 if (PageBuddy(page)) {
778                         unsigned long freepage_order = page_order_unsafe(page);
779
780                         /*
781                          * Without lock, we cannot be sure that what we got is
782                          * a valid page order. Consider only values in the
783                          * valid order range to prevent low_pfn overflow.
784                          */
785                         if (freepage_order > 0 && freepage_order < MAX_ORDER)
786                                 low_pfn += (1UL << freepage_order) - 1;
787                         continue;
788                 }
789
790                 /*
791                  * Regardless of being on LRU, compound pages such as THP and
792                  * hugetlbfs are not to be compacted. We can potentially save
793                  * a lot of iterations if we skip them at once. The check is
794                  * racy, but we can consider only valid values and the only
795                  * danger is skipping too much.
796                  */
797                 if (PageCompound(page)) {
798                         unsigned int comp_order = compound_order(page);
799
800                         if (likely(comp_order < MAX_ORDER))
801                                 low_pfn += (1UL << comp_order) - 1;
802
803                         goto isolate_fail;
804                 }
805
806                 /*
807                  * Check may be lockless but that's ok as we recheck later.
808                  * It's possible to migrate LRU and non-lru movable pages.
809                  * Skip any other type of page
810                  */
811                 if (!PageLRU(page)) {
812                         /*
813                          * __PageMovable can return false positive so we need
814                          * to verify it under page_lock.
815                          */
816                         if (unlikely(__PageMovable(page)) &&
817                                         !PageIsolated(page)) {
818                                 if (locked) {
819                                         spin_unlock_irqrestore(&zone->lru_lock,
820                                                                         flags);
821                                         locked = false;
822                                 }
823
824                                 if (isolate_movable_page(page, isolate_mode))
825                                         goto isolate_success;
826                         }
827
828                         goto isolate_fail;
829                 }
830
831                 /*
832                  * Migration will fail if an anonymous page is pinned in memory,
833                  * so avoid taking lru_lock and isolating it unnecessarily in an
834                  * admittedly racy check.
835                  */
836                 if (!page_mapping(page) &&
837                     page_count(page) > page_mapcount(page))
838                         goto isolate_fail;
839
840                 /* If we already hold the lock, we can skip some rechecking */
841                 if (!locked) {
842                         locked = compact_trylock_irqsave(&zone->lru_lock,
843                                                                 &flags, cc);
844                         if (!locked)
845                                 break;
846
847                         /* Recheck PageLRU and PageCompound under lock */
848                         if (!PageLRU(page))
849                                 goto isolate_fail;
850
851                         /*
852                          * Page become compound since the non-locked check,
853                          * and it's on LRU. It can only be a THP so the order
854                          * is safe to read and it's 0 for tail pages.
855                          */
856                         if (unlikely(PageCompound(page))) {
857                                 low_pfn += (1UL << compound_order(page)) - 1;
858                                 goto isolate_fail;
859                         }
860                 }
861
862                 lruvec = mem_cgroup_page_lruvec(page, zone);
863
864                 /* Try isolate the page */
865                 if (__isolate_lru_page(page, isolate_mode) != 0)
866                         goto isolate_fail;
867
868                 VM_BUG_ON_PAGE(PageCompound(page), page);
869
870                 /* Successfully isolated */
871                 del_page_from_lru_list(page, lruvec, page_lru(page));
872
873 isolate_success:
874                 list_add(&page->lru, &cc->migratepages);
875                 cc->nr_migratepages++;
876                 nr_isolated++;
877
878                 /*
879                  * Record where we could have freed pages by migration and not
880                  * yet flushed them to buddy allocator.
881                  * - this is the lowest page that was isolated and likely be
882                  * then freed by migration.
883                  */
884                 if (!cc->last_migrated_pfn)
885                         cc->last_migrated_pfn = low_pfn;
886
887                 /* Avoid isolating too much */
888                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
889                         ++low_pfn;
890                         break;
891                 }
892
893                 continue;
894 isolate_fail:
895                 if (!skip_on_failure)
896                         continue;
897
898                 /*
899                  * We have isolated some pages, but then failed. Release them
900                  * instead of migrating, as we cannot form the cc->order buddy
901                  * page anyway.
902                  */
903                 if (nr_isolated) {
904                         if (locked) {
905                                 spin_unlock_irqrestore(&zone->lru_lock, flags);
906                                 locked = false;
907                         }
908                         acct_isolated(zone, cc);
909                         putback_movable_pages(&cc->migratepages);
910                         cc->nr_migratepages = 0;
911                         cc->last_migrated_pfn = 0;
912                         nr_isolated = 0;
913                 }
914
915                 if (low_pfn < next_skip_pfn) {
916                         low_pfn = next_skip_pfn - 1;
917                         /*
918                          * The check near the loop beginning would have updated
919                          * next_skip_pfn too, but this is a bit simpler.
920                          */
921                         next_skip_pfn += 1UL << cc->order;
922                 }
923         }
924
925         /*
926          * The PageBuddy() check could have potentially brought us outside
927          * the range to be scanned.
928          */
929         if (unlikely(low_pfn > end_pfn))
930                 low_pfn = end_pfn;
931
932         if (locked)
933                 spin_unlock_irqrestore(&zone->lru_lock, flags);
934
935         /*
936          * Update the pageblock-skip information and cached scanner pfn,
937          * if the whole pageblock was scanned without isolating any page.
938          */
939         if (low_pfn == end_pfn)
940                 update_pageblock_skip(cc, valid_page, nr_isolated, true);
941
942         trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
943                                                 nr_scanned, nr_isolated);
944
945         count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
946         if (nr_isolated)
947                 count_compact_events(COMPACTISOLATED, nr_isolated);
948
949         return low_pfn;
950 }
951
952 /**
953  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
954  * @cc:        Compaction control structure.
955  * @start_pfn: The first PFN to start isolating.
956  * @end_pfn:   The one-past-last PFN.
957  *
958  * Returns zero if isolation fails fatally due to e.g. pending signal.
959  * Otherwise, function returns one-past-the-last PFN of isolated page
960  * (which may be greater than end_pfn if end fell in a middle of a THP page).
961  */
962 unsigned long
963 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
964                                                         unsigned long end_pfn)
965 {
966         unsigned long pfn, block_start_pfn, block_end_pfn;
967
968         /* Scan block by block. First and last block may be incomplete */
969         pfn = start_pfn;
970         block_start_pfn = pageblock_start_pfn(pfn);
971         if (block_start_pfn < cc->zone->zone_start_pfn)
972                 block_start_pfn = cc->zone->zone_start_pfn;
973         block_end_pfn = pageblock_end_pfn(pfn);
974
975         for (; pfn < end_pfn; pfn = block_end_pfn,
976                                 block_start_pfn = block_end_pfn,
977                                 block_end_pfn += pageblock_nr_pages) {
978
979                 block_end_pfn = min(block_end_pfn, end_pfn);
980
981                 if (!pageblock_pfn_to_page(block_start_pfn,
982                                         block_end_pfn, cc->zone))
983                         continue;
984
985                 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
986                                                         ISOLATE_UNEVICTABLE);
987
988                 if (!pfn)
989                         break;
990
991                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
992                         break;
993         }
994         acct_isolated(cc->zone, cc);
995
996         return pfn;
997 }
998
999 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1000 #ifdef CONFIG_COMPACTION
1001
1002 /* Returns true if the page is within a block suitable for migration to */
1003 static bool suitable_migration_target(struct page *page)
1004 {
1005         /* If the page is a large free page, then disallow migration */
1006         if (PageBuddy(page)) {
1007                 /*
1008                  * We are checking page_order without zone->lock taken. But
1009                  * the only small danger is that we skip a potentially suitable
1010                  * pageblock, so it's not worth to check order for valid range.
1011                  */
1012                 if (page_order_unsafe(page) >= pageblock_order)
1013                         return false;
1014         }
1015
1016         /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1017         if (migrate_async_suitable(get_pageblock_migratetype(page)))
1018                 return true;
1019
1020         /* Otherwise skip the block */
1021         return false;
1022 }
1023
1024 /*
1025  * Test whether the free scanner has reached the same or lower pageblock than
1026  * the migration scanner, and compaction should thus terminate.
1027  */
1028 static inline bool compact_scanners_met(struct compact_control *cc)
1029 {
1030         return (cc->free_pfn >> pageblock_order)
1031                 <= (cc->migrate_pfn >> pageblock_order);
1032 }
1033
1034 /*
1035  * Based on information in the current compact_control, find blocks
1036  * suitable for isolating free pages from and then isolate them.
1037  */
1038 static void isolate_freepages(struct compact_control *cc)
1039 {
1040         struct zone *zone = cc->zone;
1041         struct page *page;
1042         unsigned long block_start_pfn;  /* start of current pageblock */
1043         unsigned long isolate_start_pfn; /* exact pfn we start at */
1044         unsigned long block_end_pfn;    /* end of current pageblock */
1045         unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
1046         struct list_head *freelist = &cc->freepages;
1047
1048         /*
1049          * Initialise the free scanner. The starting point is where we last
1050          * successfully isolated from, zone-cached value, or the end of the
1051          * zone when isolating for the first time. For looping we also need
1052          * this pfn aligned down to the pageblock boundary, because we do
1053          * block_start_pfn -= pageblock_nr_pages in the for loop.
1054          * For ending point, take care when isolating in last pageblock of a
1055          * a zone which ends in the middle of a pageblock.
1056          * The low boundary is the end of the pageblock the migration scanner
1057          * is using.
1058          */
1059         isolate_start_pfn = cc->free_pfn;
1060         block_start_pfn = pageblock_start_pfn(cc->free_pfn);
1061         block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1062                                                 zone_end_pfn(zone));
1063         low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1064
1065         /*
1066          * Isolate free pages until enough are available to migrate the
1067          * pages on cc->migratepages. We stop searching if the migrate
1068          * and free page scanners meet or enough free pages are isolated.
1069          */
1070         for (; block_start_pfn >= low_pfn;
1071                                 block_end_pfn = block_start_pfn,
1072                                 block_start_pfn -= pageblock_nr_pages,
1073                                 isolate_start_pfn = block_start_pfn) {
1074                 /*
1075                  * This can iterate a massively long zone without finding any
1076                  * suitable migration targets, so periodically check if we need
1077                  * to schedule, or even abort async compaction.
1078                  */
1079                 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1080                                                 && compact_should_abort(cc))
1081                         break;
1082
1083                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1084                                                                         zone);
1085                 if (!page)
1086                         continue;
1087
1088                 /* Check the block is suitable for migration */
1089                 if (!suitable_migration_target(page))
1090                         continue;
1091
1092                 /* If isolation recently failed, do not retry */
1093                 if (!isolation_suitable(cc, page))
1094                         continue;
1095
1096                 /* Found a block suitable for isolating free pages from. */
1097                 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1098                                         freelist, false);
1099
1100                 /*
1101                  * If we isolated enough freepages, or aborted due to lock
1102                  * contention, terminate.
1103                  */
1104                 if ((cc->nr_freepages >= cc->nr_migratepages)
1105                                                         || cc->contended) {
1106                         if (isolate_start_pfn >= block_end_pfn) {
1107                                 /*
1108                                  * Restart at previous pageblock if more
1109                                  * freepages can be isolated next time.
1110                                  */
1111                                 isolate_start_pfn =
1112                                         block_start_pfn - pageblock_nr_pages;
1113                         }
1114                         break;
1115                 } else if (isolate_start_pfn < block_end_pfn) {
1116                         /*
1117                          * If isolation failed early, do not continue
1118                          * needlessly.
1119                          */
1120                         break;
1121                 }
1122         }
1123
1124         /* __isolate_free_page() does not map the pages */
1125         map_pages(freelist);
1126
1127         /*
1128          * Record where the free scanner will restart next time. Either we
1129          * broke from the loop and set isolate_start_pfn based on the last
1130          * call to isolate_freepages_block(), or we met the migration scanner
1131          * and the loop terminated due to isolate_start_pfn < low_pfn
1132          */
1133         cc->free_pfn = isolate_start_pfn;
1134 }
1135
1136 /*
1137  * This is a migrate-callback that "allocates" freepages by taking pages
1138  * from the isolated freelists in the block we are migrating to.
1139  */
1140 static struct page *compaction_alloc(struct page *migratepage,
1141                                         unsigned long data,
1142                                         int **result)
1143 {
1144         struct compact_control *cc = (struct compact_control *)data;
1145         struct page *freepage;
1146
1147         /*
1148          * Isolate free pages if necessary, and if we are not aborting due to
1149          * contention.
1150          */
1151         if (list_empty(&cc->freepages)) {
1152                 if (!cc->contended)
1153                         isolate_freepages(cc);
1154
1155                 if (list_empty(&cc->freepages))
1156                         return NULL;
1157         }
1158
1159         freepage = list_entry(cc->freepages.next, struct page, lru);
1160         list_del(&freepage->lru);
1161         cc->nr_freepages--;
1162
1163         return freepage;
1164 }
1165
1166 /*
1167  * This is a migrate-callback that "frees" freepages back to the isolated
1168  * freelist.  All pages on the freelist are from the same zone, so there is no
1169  * special handling needed for NUMA.
1170  */
1171 static void compaction_free(struct page *page, unsigned long data)
1172 {
1173         struct compact_control *cc = (struct compact_control *)data;
1174
1175         list_add(&page->lru, &cc->freepages);
1176         cc->nr_freepages++;
1177 }
1178
1179 /* possible outcome of isolate_migratepages */
1180 typedef enum {
1181         ISOLATE_ABORT,          /* Abort compaction now */
1182         ISOLATE_NONE,           /* No pages isolated, continue scanning */
1183         ISOLATE_SUCCESS,        /* Pages isolated, migrate */
1184 } isolate_migrate_t;
1185
1186 /*
1187  * Allow userspace to control policy on scanning the unevictable LRU for
1188  * compactable pages.
1189  */
1190 int sysctl_compact_unevictable_allowed __read_mostly = 1;
1191
1192 /*
1193  * Isolate all pages that can be migrated from the first suitable block,
1194  * starting at the block pointed to by the migrate scanner pfn within
1195  * compact_control.
1196  */
1197 static isolate_migrate_t isolate_migratepages(struct zone *zone,
1198                                         struct compact_control *cc)
1199 {
1200         unsigned long block_start_pfn;
1201         unsigned long block_end_pfn;
1202         unsigned long low_pfn;
1203         struct page *page;
1204         const isolate_mode_t isolate_mode =
1205                 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1206                 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1207
1208         /*
1209          * Start at where we last stopped, or beginning of the zone as
1210          * initialized by compact_zone()
1211          */
1212         low_pfn = cc->migrate_pfn;
1213         block_start_pfn = pageblock_start_pfn(low_pfn);
1214         if (block_start_pfn < zone->zone_start_pfn)
1215                 block_start_pfn = zone->zone_start_pfn;
1216
1217         /* Only scan within a pageblock boundary */
1218         block_end_pfn = pageblock_end_pfn(low_pfn);
1219
1220         /*
1221          * Iterate over whole pageblocks until we find the first suitable.
1222          * Do not cross the free scanner.
1223          */
1224         for (; block_end_pfn <= cc->free_pfn;
1225                         low_pfn = block_end_pfn,
1226                         block_start_pfn = block_end_pfn,
1227                         block_end_pfn += pageblock_nr_pages) {
1228
1229                 /*
1230                  * This can potentially iterate a massively long zone with
1231                  * many pageblocks unsuitable, so periodically check if we
1232                  * need to schedule, or even abort async compaction.
1233                  */
1234                 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1235                                                 && compact_should_abort(cc))
1236                         break;
1237
1238                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1239                                                                         zone);
1240                 if (!page)
1241                         continue;
1242
1243                 /* If isolation recently failed, do not retry */
1244                 if (!isolation_suitable(cc, page))
1245                         continue;
1246
1247                 /*
1248                  * For async compaction, also only scan in MOVABLE blocks.
1249                  * Async compaction is optimistic to see if the minimum amount
1250                  * of work satisfies the allocation.
1251                  */
1252                 if (cc->mode == MIGRATE_ASYNC &&
1253                     !migrate_async_suitable(get_pageblock_migratetype(page)))
1254                         continue;
1255
1256                 /* Perform the isolation */
1257                 low_pfn = isolate_migratepages_block(cc, low_pfn,
1258                                                 block_end_pfn, isolate_mode);
1259
1260                 if (!low_pfn || cc->contended) {
1261                         acct_isolated(zone, cc);
1262                         return ISOLATE_ABORT;
1263                 }
1264
1265                 /*
1266                  * Either we isolated something and proceed with migration. Or
1267                  * we failed and compact_zone should decide if we should
1268                  * continue or not.
1269                  */
1270                 break;
1271         }
1272
1273         acct_isolated(zone, cc);
1274         /* Record where migration scanner will be restarted. */
1275         cc->migrate_pfn = low_pfn;
1276
1277         return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1278 }
1279
1280 /*
1281  * order == -1 is expected when compacting via
1282  * /proc/sys/vm/compact_memory
1283  */
1284 static inline bool is_via_compact_memory(int order)
1285 {
1286         return order == -1;
1287 }
1288
1289 static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
1290                             const int migratetype)
1291 {
1292         unsigned int order;
1293         unsigned long watermark;
1294
1295         if (cc->contended || fatal_signal_pending(current))
1296                 return COMPACT_CONTENDED;
1297
1298         /* Compaction run completes if the migrate and free scanner meet */
1299         if (compact_scanners_met(cc)) {
1300                 /* Let the next compaction start anew. */
1301                 reset_cached_positions(zone);
1302
1303                 /*
1304                  * Mark that the PG_migrate_skip information should be cleared
1305                  * by kswapd when it goes to sleep. kcompactd does not set the
1306                  * flag itself as the decision to be clear should be directly
1307                  * based on an allocation request.
1308                  */
1309                 if (cc->direct_compaction)
1310                         zone->compact_blockskip_flush = true;
1311
1312                 if (cc->whole_zone)
1313                         return COMPACT_COMPLETE;
1314                 else
1315                         return COMPACT_PARTIAL_SKIPPED;
1316         }
1317
1318         if (is_via_compact_memory(cc->order))
1319                 return COMPACT_CONTINUE;
1320
1321         /* Compaction run is not finished if the watermark is not met */
1322         watermark = low_wmark_pages(zone);
1323
1324         if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1325                                                         cc->alloc_flags))
1326                 return COMPACT_CONTINUE;
1327
1328         /* Direct compactor: Is a suitable page free? */
1329         for (order = cc->order; order < MAX_ORDER; order++) {
1330                 struct free_area *area = &zone->free_area[order];
1331                 bool can_steal;
1332
1333                 /* Job done if page is free of the right migratetype */
1334                 if (!list_empty(&area->free_list[migratetype]))
1335                         return COMPACT_PARTIAL;
1336
1337 #ifdef CONFIG_CMA
1338                 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1339                 if (migratetype == MIGRATE_MOVABLE &&
1340                         !list_empty(&area->free_list[MIGRATE_CMA]))
1341                         return COMPACT_PARTIAL;
1342 #endif
1343                 /*
1344                  * Job done if allocation would steal freepages from
1345                  * other migratetype buddy lists.
1346                  */
1347                 if (find_suitable_fallback(area, order, migratetype,
1348                                                 true, &can_steal) != -1)
1349                         return COMPACT_PARTIAL;
1350         }
1351
1352         return COMPACT_NO_SUITABLE_PAGE;
1353 }
1354
1355 static enum compact_result compact_finished(struct zone *zone,
1356                         struct compact_control *cc,
1357                         const int migratetype)
1358 {
1359         int ret;
1360
1361         ret = __compact_finished(zone, cc, migratetype);
1362         trace_mm_compaction_finished(zone, cc->order, ret);
1363         if (ret == COMPACT_NO_SUITABLE_PAGE)
1364                 ret = COMPACT_CONTINUE;
1365
1366         return ret;
1367 }
1368
1369 /*
1370  * compaction_suitable: Is this suitable to run compaction on this zone now?
1371  * Returns
1372  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1373  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
1374  *   COMPACT_CONTINUE - If compaction should run now
1375  */
1376 static enum compact_result __compaction_suitable(struct zone *zone, int order,
1377                                         unsigned int alloc_flags,
1378                                         int classzone_idx,
1379                                         unsigned long wmark_target)
1380 {
1381         int fragindex;
1382         unsigned long watermark;
1383
1384         if (is_via_compact_memory(order))
1385                 return COMPACT_CONTINUE;
1386
1387         watermark = low_wmark_pages(zone);
1388         /*
1389          * If watermarks for high-order allocation are already met, there
1390          * should be no need for compaction at all.
1391          */
1392         if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1393                                                                 alloc_flags))
1394                 return COMPACT_PARTIAL;
1395
1396         /*
1397          * Watermarks for order-0 must be met for compaction. Note the 2UL.
1398          * This is because during migration, copies of pages need to be
1399          * allocated and for a short time, the footprint is higher
1400          */
1401         watermark += (2UL << order);
1402         if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1403                                  alloc_flags, wmark_target))
1404                 return COMPACT_SKIPPED;
1405
1406         /*
1407          * fragmentation index determines if allocation failures are due to
1408          * low memory or external fragmentation
1409          *
1410          * index of -1000 would imply allocations might succeed depending on
1411          * watermarks, but we already failed the high-order watermark check
1412          * index towards 0 implies failure is due to lack of memory
1413          * index towards 1000 implies failure is due to fragmentation
1414          *
1415          * Only compact if a failure would be due to fragmentation.
1416          */
1417         fragindex = fragmentation_index(zone, order);
1418         if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1419                 return COMPACT_NOT_SUITABLE_ZONE;
1420
1421         return COMPACT_CONTINUE;
1422 }
1423
1424 enum compact_result compaction_suitable(struct zone *zone, int order,
1425                                         unsigned int alloc_flags,
1426                                         int classzone_idx)
1427 {
1428         enum compact_result ret;
1429
1430         ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1431                                     zone_page_state(zone, NR_FREE_PAGES));
1432         trace_mm_compaction_suitable(zone, order, ret);
1433         if (ret == COMPACT_NOT_SUITABLE_ZONE)
1434                 ret = COMPACT_SKIPPED;
1435
1436         return ret;
1437 }
1438
1439 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1440                 int alloc_flags)
1441 {
1442         struct zone *zone;
1443         struct zoneref *z;
1444
1445         /*
1446          * Make sure at least one zone would pass __compaction_suitable if we continue
1447          * retrying the reclaim.
1448          */
1449         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1450                                         ac->nodemask) {
1451                 unsigned long available;
1452                 enum compact_result compact_result;
1453
1454                 /*
1455                  * Do not consider all the reclaimable memory because we do not
1456                  * want to trash just for a single high order allocation which
1457                  * is even not guaranteed to appear even if __compaction_suitable
1458                  * is happy about the watermark check.
1459                  */
1460                 available = zone_reclaimable_pages(zone) / order;
1461                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1462                 compact_result = __compaction_suitable(zone, order, alloc_flags,
1463                                 ac_classzone_idx(ac), available);
1464                 if (compact_result != COMPACT_SKIPPED &&
1465                                 compact_result != COMPACT_NOT_SUITABLE_ZONE)
1466                         return true;
1467         }
1468
1469         return false;
1470 }
1471
1472 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1473 {
1474         enum compact_result ret;
1475         unsigned long start_pfn = zone->zone_start_pfn;
1476         unsigned long end_pfn = zone_end_pfn(zone);
1477         const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1478         const bool sync = cc->mode != MIGRATE_ASYNC;
1479
1480         ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1481                                                         cc->classzone_idx);
1482         /* Compaction is likely to fail */
1483         if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
1484                 return ret;
1485
1486         /* huh, compaction_suitable is returning something unexpected */
1487         VM_BUG_ON(ret != COMPACT_CONTINUE);
1488
1489         /*
1490          * Clear pageblock skip if there were failures recently and compaction
1491          * is about to be retried after being deferred.
1492          */
1493         if (compaction_restarting(zone, cc->order))
1494                 __reset_isolation_suitable(zone);
1495
1496         /*
1497          * Setup to move all movable pages to the end of the zone. Used cached
1498          * information on where the scanners should start but check that it
1499          * is initialised by ensuring the values are within zone boundaries.
1500          */
1501         cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1502         cc->free_pfn = zone->compact_cached_free_pfn;
1503         if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1504                 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1505                 zone->compact_cached_free_pfn = cc->free_pfn;
1506         }
1507         if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1508                 cc->migrate_pfn = start_pfn;
1509                 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1510                 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1511         }
1512
1513         if (cc->migrate_pfn == start_pfn)
1514                 cc->whole_zone = true;
1515
1516         cc->last_migrated_pfn = 0;
1517
1518         trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1519                                 cc->free_pfn, end_pfn, sync);
1520
1521         migrate_prep_local();
1522
1523         while ((ret = compact_finished(zone, cc, migratetype)) ==
1524                                                 COMPACT_CONTINUE) {
1525                 int err;
1526
1527                 switch (isolate_migratepages(zone, cc)) {
1528                 case ISOLATE_ABORT:
1529                         ret = COMPACT_CONTENDED;
1530                         putback_movable_pages(&cc->migratepages);
1531                         cc->nr_migratepages = 0;
1532                         goto out;
1533                 case ISOLATE_NONE:
1534                         /*
1535                          * We haven't isolated and migrated anything, but
1536                          * there might still be unflushed migrations from
1537                          * previous cc->order aligned block.
1538                          */
1539                         goto check_drain;
1540                 case ISOLATE_SUCCESS:
1541                         ;
1542                 }
1543
1544                 err = migrate_pages(&cc->migratepages, compaction_alloc,
1545                                 compaction_free, (unsigned long)cc, cc->mode,
1546                                 MR_COMPACTION);
1547
1548                 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1549                                                         &cc->migratepages);
1550
1551                 /* All pages were either migrated or will be released */
1552                 cc->nr_migratepages = 0;
1553                 if (err) {
1554                         putback_movable_pages(&cc->migratepages);
1555                         /*
1556                          * migrate_pages() may return -ENOMEM when scanners meet
1557                          * and we want compact_finished() to detect it
1558                          */
1559                         if (err == -ENOMEM && !compact_scanners_met(cc)) {
1560                                 ret = COMPACT_CONTENDED;
1561                                 goto out;
1562                         }
1563                         /*
1564                          * We failed to migrate at least one page in the current
1565                          * order-aligned block, so skip the rest of it.
1566                          */
1567                         if (cc->direct_compaction &&
1568                                                 (cc->mode == MIGRATE_ASYNC)) {
1569                                 cc->migrate_pfn = block_end_pfn(
1570                                                 cc->migrate_pfn - 1, cc->order);
1571                                 /* Draining pcplists is useless in this case */
1572                                 cc->last_migrated_pfn = 0;
1573
1574                         }
1575                 }
1576
1577 check_drain:
1578                 /*
1579                  * Has the migration scanner moved away from the previous
1580                  * cc->order aligned block where we migrated from? If yes,
1581                  * flush the pages that were freed, so that they can merge and
1582                  * compact_finished() can detect immediately if allocation
1583                  * would succeed.
1584                  */
1585                 if (cc->order > 0 && cc->last_migrated_pfn) {
1586                         int cpu;
1587                         unsigned long current_block_start =
1588                                 block_start_pfn(cc->migrate_pfn, cc->order);
1589
1590                         if (cc->last_migrated_pfn < current_block_start) {
1591                                 cpu = get_cpu();
1592                                 lru_add_drain_cpu(cpu);
1593                                 drain_local_pages(zone);
1594                                 put_cpu();
1595                                 /* No more flushing until we migrate again */
1596                                 cc->last_migrated_pfn = 0;
1597                         }
1598                 }
1599
1600         }
1601
1602 out:
1603         /*
1604          * Release free pages and update where the free scanner should restart,
1605          * so we don't leave any returned pages behind in the next attempt.
1606          */
1607         if (cc->nr_freepages > 0) {
1608                 unsigned long free_pfn = release_freepages(&cc->freepages);
1609
1610                 cc->nr_freepages = 0;
1611                 VM_BUG_ON(free_pfn == 0);
1612                 /* The cached pfn is always the first in a pageblock */
1613                 free_pfn = pageblock_start_pfn(free_pfn);
1614                 /*
1615                  * Only go back, not forward. The cached pfn might have been
1616                  * already reset to zone end in compact_finished()
1617                  */
1618                 if (free_pfn > zone->compact_cached_free_pfn)
1619                         zone->compact_cached_free_pfn = free_pfn;
1620         }
1621
1622         trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1623                                 cc->free_pfn, end_pfn, sync, ret);
1624
1625         if (ret == COMPACT_CONTENDED)
1626                 ret = COMPACT_PARTIAL;
1627
1628         return ret;
1629 }
1630
1631 static enum compact_result compact_zone_order(struct zone *zone, int order,
1632                 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1633                 unsigned int alloc_flags, int classzone_idx)
1634 {
1635         enum compact_result ret;
1636         struct compact_control cc = {
1637                 .nr_freepages = 0,
1638                 .nr_migratepages = 0,
1639                 .order = order,
1640                 .gfp_mask = gfp_mask,
1641                 .zone = zone,
1642                 .mode = mode,
1643                 .alloc_flags = alloc_flags,
1644                 .classzone_idx = classzone_idx,
1645                 .direct_compaction = true,
1646         };
1647         INIT_LIST_HEAD(&cc.freepages);
1648         INIT_LIST_HEAD(&cc.migratepages);
1649
1650         ret = compact_zone(zone, &cc);
1651
1652         VM_BUG_ON(!list_empty(&cc.freepages));
1653         VM_BUG_ON(!list_empty(&cc.migratepages));
1654
1655         *contended = cc.contended;
1656         return ret;
1657 }
1658
1659 int sysctl_extfrag_threshold = 500;
1660
1661 /**
1662  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1663  * @gfp_mask: The GFP mask of the current allocation
1664  * @order: The order of the current allocation
1665  * @alloc_flags: The allocation flags of the current allocation
1666  * @ac: The context of current allocation
1667  * @mode: The migration mode for async, sync light, or sync migration
1668  * @contended: Return value that determines if compaction was aborted due to
1669  *             need_resched() or lock contention
1670  *
1671  * This is the main entry point for direct page compaction.
1672  */
1673 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1674                 unsigned int alloc_flags, const struct alloc_context *ac,
1675                 enum migrate_mode mode, int *contended)
1676 {
1677         int may_enter_fs = gfp_mask & __GFP_FS;
1678         int may_perform_io = gfp_mask & __GFP_IO;
1679         struct zoneref *z;
1680         struct zone *zone;
1681         enum compact_result rc = COMPACT_SKIPPED;
1682         int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1683
1684         *contended = COMPACT_CONTENDED_NONE;
1685
1686         /* Check if the GFP flags allow compaction */
1687         if (!order || !may_enter_fs || !may_perform_io)
1688                 return COMPACT_SKIPPED;
1689
1690         trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1691
1692         /* Compact each zone in the list */
1693         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1694                                                                 ac->nodemask) {
1695                 enum compact_result status;
1696                 int zone_contended;
1697
1698                 if (compaction_deferred(zone, order)) {
1699                         rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1700                         continue;
1701                 }
1702
1703                 status = compact_zone_order(zone, order, gfp_mask, mode,
1704                                 &zone_contended, alloc_flags,
1705                                 ac_classzone_idx(ac));
1706                 rc = max(status, rc);
1707                 /*
1708                  * It takes at least one zone that wasn't lock contended
1709                  * to clear all_zones_contended.
1710                  */
1711                 all_zones_contended &= zone_contended;
1712
1713                 /* If a normal allocation would succeed, stop compacting */
1714                 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1715                                         ac_classzone_idx(ac), alloc_flags)) {
1716                         /*
1717                          * We think the allocation will succeed in this zone,
1718                          * but it is not certain, hence the false. The caller
1719                          * will repeat this with true if allocation indeed
1720                          * succeeds in this zone.
1721                          */
1722                         compaction_defer_reset(zone, order, false);
1723                         /*
1724                          * It is possible that async compaction aborted due to
1725                          * need_resched() and the watermarks were ok thanks to
1726                          * somebody else freeing memory. The allocation can
1727                          * however still fail so we better signal the
1728                          * need_resched() contention anyway (this will not
1729                          * prevent the allocation attempt).
1730                          */
1731                         if (zone_contended == COMPACT_CONTENDED_SCHED)
1732                                 *contended = COMPACT_CONTENDED_SCHED;
1733
1734                         goto break_loop;
1735                 }
1736
1737                 if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
1738                                         status == COMPACT_PARTIAL_SKIPPED)) {
1739                         /*
1740                          * We think that allocation won't succeed in this zone
1741                          * so we defer compaction there. If it ends up
1742                          * succeeding after all, it will be reset.
1743                          */
1744                         defer_compaction(zone, order);
1745                 }
1746
1747                 /*
1748                  * We might have stopped compacting due to need_resched() in
1749                  * async compaction, or due to a fatal signal detected. In that
1750                  * case do not try further zones and signal need_resched()
1751                  * contention.
1752                  */
1753                 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1754                                         || fatal_signal_pending(current)) {
1755                         *contended = COMPACT_CONTENDED_SCHED;
1756                         goto break_loop;
1757                 }
1758
1759                 continue;
1760 break_loop:
1761                 /*
1762                  * We might not have tried all the zones, so  be conservative
1763                  * and assume they are not all lock contended.
1764                  */
1765                 all_zones_contended = 0;
1766                 break;
1767         }
1768
1769         /*
1770          * If at least one zone wasn't deferred or skipped, we report if all
1771          * zones that were tried were lock contended.
1772          */
1773         if (rc > COMPACT_INACTIVE && all_zones_contended)
1774                 *contended = COMPACT_CONTENDED_LOCK;
1775
1776         return rc;
1777 }
1778
1779
1780 /* Compact all zones within a node */
1781 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1782 {
1783         int zoneid;
1784         struct zone *zone;
1785
1786         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1787
1788                 zone = &pgdat->node_zones[zoneid];
1789                 if (!populated_zone(zone))
1790                         continue;
1791
1792                 cc->nr_freepages = 0;
1793                 cc->nr_migratepages = 0;
1794                 cc->zone = zone;
1795                 INIT_LIST_HEAD(&cc->freepages);
1796                 INIT_LIST_HEAD(&cc->migratepages);
1797
1798                 /*
1799                  * When called via /proc/sys/vm/compact_memory
1800                  * this makes sure we compact the whole zone regardless of
1801                  * cached scanner positions.
1802                  */
1803                 if (is_via_compact_memory(cc->order))
1804                         __reset_isolation_suitable(zone);
1805
1806                 if (is_via_compact_memory(cc->order) ||
1807                                 !compaction_deferred(zone, cc->order))
1808                         compact_zone(zone, cc);
1809
1810                 VM_BUG_ON(!list_empty(&cc->freepages));
1811                 VM_BUG_ON(!list_empty(&cc->migratepages));
1812
1813                 if (is_via_compact_memory(cc->order))
1814                         continue;
1815
1816                 if (zone_watermark_ok(zone, cc->order,
1817                                 low_wmark_pages(zone), 0, 0))
1818                         compaction_defer_reset(zone, cc->order, false);
1819         }
1820 }
1821
1822 void compact_pgdat(pg_data_t *pgdat, int order)
1823 {
1824         struct compact_control cc = {
1825                 .order = order,
1826                 .mode = MIGRATE_ASYNC,
1827         };
1828
1829         if (!order)
1830                 return;
1831
1832         __compact_pgdat(pgdat, &cc);
1833 }
1834
1835 static void compact_node(int nid)
1836 {
1837         struct compact_control cc = {
1838                 .order = -1,
1839                 .mode = MIGRATE_SYNC,
1840                 .ignore_skip_hint = true,
1841         };
1842
1843         __compact_pgdat(NODE_DATA(nid), &cc);
1844 }
1845
1846 /* Compact all nodes in the system */
1847 static void compact_nodes(void)
1848 {
1849         int nid;
1850
1851         /* Flush pending updates to the LRU lists */
1852         lru_add_drain_all();
1853
1854         for_each_online_node(nid)
1855                 compact_node(nid);
1856 }
1857
1858 /* The written value is actually unused, all memory is compacted */
1859 int sysctl_compact_memory;
1860
1861 /*
1862  * This is the entry point for compacting all nodes via
1863  * /proc/sys/vm/compact_memory
1864  */
1865 int sysctl_compaction_handler(struct ctl_table *table, int write,
1866                         void __user *buffer, size_t *length, loff_t *ppos)
1867 {
1868         if (write)
1869                 compact_nodes();
1870
1871         return 0;
1872 }
1873
1874 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1875                         void __user *buffer, size_t *length, loff_t *ppos)
1876 {
1877         proc_dointvec_minmax(table, write, buffer, length, ppos);
1878
1879         return 0;
1880 }
1881
1882 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1883 static ssize_t sysfs_compact_node(struct device *dev,
1884                         struct device_attribute *attr,
1885                         const char *buf, size_t count)
1886 {
1887         int nid = dev->id;
1888
1889         if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1890                 /* Flush pending updates to the LRU lists */
1891                 lru_add_drain_all();
1892
1893                 compact_node(nid);
1894         }
1895
1896         return count;
1897 }
1898 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1899
1900 int compaction_register_node(struct node *node)
1901 {
1902         return device_create_file(&node->dev, &dev_attr_compact);
1903 }
1904
1905 void compaction_unregister_node(struct node *node)
1906 {
1907         return device_remove_file(&node->dev, &dev_attr_compact);
1908 }
1909 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1910
1911 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1912 {
1913         return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1914 }
1915
1916 static bool kcompactd_node_suitable(pg_data_t *pgdat)
1917 {
1918         int zoneid;
1919         struct zone *zone;
1920         enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1921
1922         for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1923                 zone = &pgdat->node_zones[zoneid];
1924
1925                 if (!populated_zone(zone))
1926                         continue;
1927
1928                 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1929                                         classzone_idx) == COMPACT_CONTINUE)
1930                         return true;
1931         }
1932
1933         return false;
1934 }
1935
1936 static void kcompactd_do_work(pg_data_t *pgdat)
1937 {
1938         /*
1939          * With no special task, compact all zones so that a page of requested
1940          * order is allocatable.
1941          */
1942         int zoneid;
1943         struct zone *zone;
1944         struct compact_control cc = {
1945                 .order = pgdat->kcompactd_max_order,
1946                 .classzone_idx = pgdat->kcompactd_classzone_idx,
1947                 .mode = MIGRATE_SYNC_LIGHT,
1948                 .ignore_skip_hint = true,
1949
1950         };
1951         bool success = false;
1952
1953         trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1954                                                         cc.classzone_idx);
1955         count_vm_event(KCOMPACTD_WAKE);
1956
1957         for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1958                 int status;
1959
1960                 zone = &pgdat->node_zones[zoneid];
1961                 if (!populated_zone(zone))
1962                         continue;
1963
1964                 if (compaction_deferred(zone, cc.order))
1965                         continue;
1966
1967                 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1968                                                         COMPACT_CONTINUE)
1969                         continue;
1970
1971                 cc.nr_freepages = 0;
1972                 cc.nr_migratepages = 0;
1973                 cc.zone = zone;
1974                 INIT_LIST_HEAD(&cc.freepages);
1975                 INIT_LIST_HEAD(&cc.migratepages);
1976
1977                 if (kthread_should_stop())
1978                         return;
1979                 status = compact_zone(zone, &cc);
1980
1981                 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1982                                                 cc.classzone_idx, 0)) {
1983                         success = true;
1984                         compaction_defer_reset(zone, cc.order, false);
1985                 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1986                         /*
1987                          * We use sync migration mode here, so we defer like
1988                          * sync direct compaction does.
1989                          */
1990                         defer_compaction(zone, cc.order);
1991                 }
1992
1993                 VM_BUG_ON(!list_empty(&cc.freepages));
1994                 VM_BUG_ON(!list_empty(&cc.migratepages));
1995         }
1996
1997         /*
1998          * Regardless of success, we are done until woken up next. But remember
1999          * the requested order/classzone_idx in case it was higher/tighter than
2000          * our current ones
2001          */
2002         if (pgdat->kcompactd_max_order <= cc.order)
2003                 pgdat->kcompactd_max_order = 0;
2004         if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2005                 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2006 }
2007
2008 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2009 {
2010         if (!order)
2011                 return;
2012
2013         if (pgdat->kcompactd_max_order < order)
2014                 pgdat->kcompactd_max_order = order;
2015
2016         if (pgdat->kcompactd_classzone_idx > classzone_idx)
2017                 pgdat->kcompactd_classzone_idx = classzone_idx;
2018
2019         if (!waitqueue_active(&pgdat->kcompactd_wait))
2020                 return;
2021
2022         if (!kcompactd_node_suitable(pgdat))
2023                 return;
2024
2025         trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2026                                                         classzone_idx);
2027         wake_up_interruptible(&pgdat->kcompactd_wait);
2028 }
2029
2030 /*
2031  * The background compaction daemon, started as a kernel thread
2032  * from the init process.
2033  */
2034 static int kcompactd(void *p)
2035 {
2036         pg_data_t *pgdat = (pg_data_t*)p;
2037         struct task_struct *tsk = current;
2038
2039         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2040
2041         if (!cpumask_empty(cpumask))
2042                 set_cpus_allowed_ptr(tsk, cpumask);
2043
2044         set_freezable();
2045
2046         pgdat->kcompactd_max_order = 0;
2047         pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2048
2049         while (!kthread_should_stop()) {
2050                 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2051                 wait_event_freezable(pgdat->kcompactd_wait,
2052                                 kcompactd_work_requested(pgdat));
2053
2054                 kcompactd_do_work(pgdat);
2055         }
2056
2057         return 0;
2058 }
2059
2060 /*
2061  * This kcompactd start function will be called by init and node-hot-add.
2062  * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2063  */
2064 int kcompactd_run(int nid)
2065 {
2066         pg_data_t *pgdat = NODE_DATA(nid);
2067         int ret = 0;
2068
2069         if (pgdat->kcompactd)
2070                 return 0;
2071
2072         pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2073         if (IS_ERR(pgdat->kcompactd)) {
2074                 pr_err("Failed to start kcompactd on node %d\n", nid);
2075                 ret = PTR_ERR(pgdat->kcompactd);
2076                 pgdat->kcompactd = NULL;
2077         }
2078         return ret;
2079 }
2080
2081 /*
2082  * Called by memory hotplug when all memory in a node is offlined. Caller must
2083  * hold mem_hotplug_begin/end().
2084  */
2085 void kcompactd_stop(int nid)
2086 {
2087         struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2088
2089         if (kcompactd) {
2090                 kthread_stop(kcompactd);
2091                 NODE_DATA(nid)->kcompactd = NULL;
2092         }
2093 }
2094
2095 /*
2096  * It's optimal to keep kcompactd on the same CPUs as their memory, but
2097  * not required for correctness. So if the last cpu in a node goes
2098  * away, we get changed to run anywhere: as the first one comes back,
2099  * restore their cpu bindings.
2100  */
2101 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
2102                         void *hcpu)
2103 {
2104         int nid;
2105
2106         if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2107                 for_each_node_state(nid, N_MEMORY) {
2108                         pg_data_t *pgdat = NODE_DATA(nid);
2109                         const struct cpumask *mask;
2110
2111                         mask = cpumask_of_node(pgdat->node_id);
2112
2113                         if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2114                                 /* One of our CPUs online: restore mask */
2115                                 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2116                 }
2117         }
2118         return NOTIFY_OK;
2119 }
2120
2121 static int __init kcompactd_init(void)
2122 {
2123         int nid;
2124
2125         for_each_node_state(nid, N_MEMORY)
2126                 kcompactd_run(nid);
2127         hotcpu_notifier(cpu_callback, 0);
2128         return 0;
2129 }
2130 subsys_initcall(kcompactd_init)
2131
2132 #endif /* CONFIG_COMPACTION */