hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages
[cascardo/linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/node.h>
32 #include "internal.h"
33
34 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
35 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
36 unsigned long hugepages_treat_as_movable;
37
38 static int hugetlb_max_hstate;
39 unsigned int default_hstate_idx;
40 struct hstate hstates[HUGE_MAX_HSTATE];
41
42 __initdata LIST_HEAD(huge_boot_pages);
43
44 /* for command line parsing */
45 static struct hstate * __initdata parsed_hstate;
46 static unsigned long __initdata default_hstate_max_huge_pages;
47 static unsigned long __initdata default_hstate_size;
48
49 #define for_each_hstate(h) \
50         for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
51
52 /*
53  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
54  */
55 static DEFINE_SPINLOCK(hugetlb_lock);
56
57 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
58 {
59         bool free = (spool->count == 0) && (spool->used_hpages == 0);
60
61         spin_unlock(&spool->lock);
62
63         /* If no pages are used, and no other handles to the subpool
64          * remain, free the subpool the subpool remain */
65         if (free)
66                 kfree(spool);
67 }
68
69 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
70 {
71         struct hugepage_subpool *spool;
72
73         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
74         if (!spool)
75                 return NULL;
76
77         spin_lock_init(&spool->lock);
78         spool->count = 1;
79         spool->max_hpages = nr_blocks;
80         spool->used_hpages = 0;
81
82         return spool;
83 }
84
85 void hugepage_put_subpool(struct hugepage_subpool *spool)
86 {
87         spin_lock(&spool->lock);
88         BUG_ON(!spool->count);
89         spool->count--;
90         unlock_or_release_subpool(spool);
91 }
92
93 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
94                                       long delta)
95 {
96         int ret = 0;
97
98         if (!spool)
99                 return 0;
100
101         spin_lock(&spool->lock);
102         if ((spool->used_hpages + delta) <= spool->max_hpages) {
103                 spool->used_hpages += delta;
104         } else {
105                 ret = -ENOMEM;
106         }
107         spin_unlock(&spool->lock);
108
109         return ret;
110 }
111
112 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
113                                        long delta)
114 {
115         if (!spool)
116                 return;
117
118         spin_lock(&spool->lock);
119         spool->used_hpages -= delta;
120         /* If hugetlbfs_put_super couldn't free spool due to
121         * an outstanding quota reference, free it now. */
122         unlock_or_release_subpool(spool);
123 }
124
125 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
126 {
127         return HUGETLBFS_SB(inode->i_sb)->spool;
128 }
129
130 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
131 {
132         return subpool_inode(vma->vm_file->f_dentry->d_inode);
133 }
134
135 /*
136  * Region tracking -- allows tracking of reservations and instantiated pages
137  *                    across the pages in a mapping.
138  *
139  * The region data structures are protected by a combination of the mmap_sem
140  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
141  * must either hold the mmap_sem for write, or the mmap_sem for read and
142  * the hugetlb_instantiation mutex:
143  *
144  *      down_write(&mm->mmap_sem);
145  * or
146  *      down_read(&mm->mmap_sem);
147  *      mutex_lock(&hugetlb_instantiation_mutex);
148  */
149 struct file_region {
150         struct list_head link;
151         long from;
152         long to;
153 };
154
155 static long region_add(struct list_head *head, long f, long t)
156 {
157         struct file_region *rg, *nrg, *trg;
158
159         /* Locate the region we are either in or before. */
160         list_for_each_entry(rg, head, link)
161                 if (f <= rg->to)
162                         break;
163
164         /* Round our left edge to the current segment if it encloses us. */
165         if (f > rg->from)
166                 f = rg->from;
167
168         /* Check for and consume any regions we now overlap with. */
169         nrg = rg;
170         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
171                 if (&rg->link == head)
172                         break;
173                 if (rg->from > t)
174                         break;
175
176                 /* If this area reaches higher then extend our area to
177                  * include it completely.  If this is not the first area
178                  * which we intend to reuse, free it. */
179                 if (rg->to > t)
180                         t = rg->to;
181                 if (rg != nrg) {
182                         list_del(&rg->link);
183                         kfree(rg);
184                 }
185         }
186         nrg->from = f;
187         nrg->to = t;
188         return 0;
189 }
190
191 static long region_chg(struct list_head *head, long f, long t)
192 {
193         struct file_region *rg, *nrg;
194         long chg = 0;
195
196         /* Locate the region we are before or in. */
197         list_for_each_entry(rg, head, link)
198                 if (f <= rg->to)
199                         break;
200
201         /* If we are below the current region then a new region is required.
202          * Subtle, allocate a new region at the position but make it zero
203          * size such that we can guarantee to record the reservation. */
204         if (&rg->link == head || t < rg->from) {
205                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
206                 if (!nrg)
207                         return -ENOMEM;
208                 nrg->from = f;
209                 nrg->to   = f;
210                 INIT_LIST_HEAD(&nrg->link);
211                 list_add(&nrg->link, rg->link.prev);
212
213                 return t - f;
214         }
215
216         /* Round our left edge to the current segment if it encloses us. */
217         if (f > rg->from)
218                 f = rg->from;
219         chg = t - f;
220
221         /* Check for and consume any regions we now overlap with. */
222         list_for_each_entry(rg, rg->link.prev, link) {
223                 if (&rg->link == head)
224                         break;
225                 if (rg->from > t)
226                         return chg;
227
228                 /* We overlap with this area, if it extends further than
229                  * us then we must extend ourselves.  Account for its
230                  * existing reservation. */
231                 if (rg->to > t) {
232                         chg += rg->to - t;
233                         t = rg->to;
234                 }
235                 chg -= rg->to - rg->from;
236         }
237         return chg;
238 }
239
240 static long region_truncate(struct list_head *head, long end)
241 {
242         struct file_region *rg, *trg;
243         long chg = 0;
244
245         /* Locate the region we are either in or before. */
246         list_for_each_entry(rg, head, link)
247                 if (end <= rg->to)
248                         break;
249         if (&rg->link == head)
250                 return 0;
251
252         /* If we are in the middle of a region then adjust it. */
253         if (end > rg->from) {
254                 chg = rg->to - end;
255                 rg->to = end;
256                 rg = list_entry(rg->link.next, typeof(*rg), link);
257         }
258
259         /* Drop any remaining regions. */
260         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
261                 if (&rg->link == head)
262                         break;
263                 chg += rg->to - rg->from;
264                 list_del(&rg->link);
265                 kfree(rg);
266         }
267         return chg;
268 }
269
270 static long region_count(struct list_head *head, long f, long t)
271 {
272         struct file_region *rg;
273         long chg = 0;
274
275         /* Locate each segment we overlap with, and count that overlap. */
276         list_for_each_entry(rg, head, link) {
277                 long seg_from;
278                 long seg_to;
279
280                 if (rg->to <= f)
281                         continue;
282                 if (rg->from >= t)
283                         break;
284
285                 seg_from = max(rg->from, f);
286                 seg_to = min(rg->to, t);
287
288                 chg += seg_to - seg_from;
289         }
290
291         return chg;
292 }
293
294 /*
295  * Convert the address within this vma to the page offset within
296  * the mapping, in pagecache page units; huge pages here.
297  */
298 static pgoff_t vma_hugecache_offset(struct hstate *h,
299                         struct vm_area_struct *vma, unsigned long address)
300 {
301         return ((address - vma->vm_start) >> huge_page_shift(h)) +
302                         (vma->vm_pgoff >> huge_page_order(h));
303 }
304
305 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
306                                      unsigned long address)
307 {
308         return vma_hugecache_offset(hstate_vma(vma), vma, address);
309 }
310
311 /*
312  * Return the size of the pages allocated when backing a VMA. In the majority
313  * cases this will be same size as used by the page table entries.
314  */
315 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
316 {
317         struct hstate *hstate;
318
319         if (!is_vm_hugetlb_page(vma))
320                 return PAGE_SIZE;
321
322         hstate = hstate_vma(vma);
323
324         return 1UL << (hstate->order + PAGE_SHIFT);
325 }
326 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
327
328 /*
329  * Return the page size being used by the MMU to back a VMA. In the majority
330  * of cases, the page size used by the kernel matches the MMU size. On
331  * architectures where it differs, an architecture-specific version of this
332  * function is required.
333  */
334 #ifndef vma_mmu_pagesize
335 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
336 {
337         return vma_kernel_pagesize(vma);
338 }
339 #endif
340
341 /*
342  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
343  * bits of the reservation map pointer, which are always clear due to
344  * alignment.
345  */
346 #define HPAGE_RESV_OWNER    (1UL << 0)
347 #define HPAGE_RESV_UNMAPPED (1UL << 1)
348 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
349
350 /*
351  * These helpers are used to track how many pages are reserved for
352  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
353  * is guaranteed to have their future faults succeed.
354  *
355  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
356  * the reserve counters are updated with the hugetlb_lock held. It is safe
357  * to reset the VMA at fork() time as it is not in use yet and there is no
358  * chance of the global counters getting corrupted as a result of the values.
359  *
360  * The private mapping reservation is represented in a subtly different
361  * manner to a shared mapping.  A shared mapping has a region map associated
362  * with the underlying file, this region map represents the backing file
363  * pages which have ever had a reservation assigned which this persists even
364  * after the page is instantiated.  A private mapping has a region map
365  * associated with the original mmap which is attached to all VMAs which
366  * reference it, this region map represents those offsets which have consumed
367  * reservation ie. where pages have been instantiated.
368  */
369 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
370 {
371         return (unsigned long)vma->vm_private_data;
372 }
373
374 static void set_vma_private_data(struct vm_area_struct *vma,
375                                                         unsigned long value)
376 {
377         vma->vm_private_data = (void *)value;
378 }
379
380 struct resv_map {
381         struct kref refs;
382         struct list_head regions;
383 };
384
385 static struct resv_map *resv_map_alloc(void)
386 {
387         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
388         if (!resv_map)
389                 return NULL;
390
391         kref_init(&resv_map->refs);
392         INIT_LIST_HEAD(&resv_map->regions);
393
394         return resv_map;
395 }
396
397 static void resv_map_release(struct kref *ref)
398 {
399         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
400
401         /* Clear out any active regions before we release the map. */
402         region_truncate(&resv_map->regions, 0);
403         kfree(resv_map);
404 }
405
406 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
407 {
408         VM_BUG_ON(!is_vm_hugetlb_page(vma));
409         if (!(vma->vm_flags & VM_MAYSHARE))
410                 return (struct resv_map *)(get_vma_private_data(vma) &
411                                                         ~HPAGE_RESV_MASK);
412         return NULL;
413 }
414
415 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
416 {
417         VM_BUG_ON(!is_vm_hugetlb_page(vma));
418         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
419
420         set_vma_private_data(vma, (get_vma_private_data(vma) &
421                                 HPAGE_RESV_MASK) | (unsigned long)map);
422 }
423
424 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
425 {
426         VM_BUG_ON(!is_vm_hugetlb_page(vma));
427         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
428
429         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
430 }
431
432 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
433 {
434         VM_BUG_ON(!is_vm_hugetlb_page(vma));
435
436         return (get_vma_private_data(vma) & flag) != 0;
437 }
438
439 /* Decrement the reserved pages in the hugepage pool by one */
440 static void decrement_hugepage_resv_vma(struct hstate *h,
441                         struct vm_area_struct *vma)
442 {
443         if (vma->vm_flags & VM_NORESERVE)
444                 return;
445
446         if (vma->vm_flags & VM_MAYSHARE) {
447                 /* Shared mappings always use reserves */
448                 h->resv_huge_pages--;
449         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
450                 /*
451                  * Only the process that called mmap() has reserves for
452                  * private mappings.
453                  */
454                 h->resv_huge_pages--;
455         }
456 }
457
458 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
459 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
460 {
461         VM_BUG_ON(!is_vm_hugetlb_page(vma));
462         if (!(vma->vm_flags & VM_MAYSHARE))
463                 vma->vm_private_data = (void *)0;
464 }
465
466 /* Returns true if the VMA has associated reserve pages */
467 static int vma_has_reserves(struct vm_area_struct *vma)
468 {
469         if (vma->vm_flags & VM_MAYSHARE)
470                 return 1;
471         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
472                 return 1;
473         return 0;
474 }
475
476 static void copy_gigantic_page(struct page *dst, struct page *src)
477 {
478         int i;
479         struct hstate *h = page_hstate(src);
480         struct page *dst_base = dst;
481         struct page *src_base = src;
482
483         for (i = 0; i < pages_per_huge_page(h); ) {
484                 cond_resched();
485                 copy_highpage(dst, src);
486
487                 i++;
488                 dst = mem_map_next(dst, dst_base, i);
489                 src = mem_map_next(src, src_base, i);
490         }
491 }
492
493 void copy_huge_page(struct page *dst, struct page *src)
494 {
495         int i;
496         struct hstate *h = page_hstate(src);
497
498         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
499                 copy_gigantic_page(dst, src);
500                 return;
501         }
502
503         might_sleep();
504         for (i = 0; i < pages_per_huge_page(h); i++) {
505                 cond_resched();
506                 copy_highpage(dst + i, src + i);
507         }
508 }
509
510 static void enqueue_huge_page(struct hstate *h, struct page *page)
511 {
512         int nid = page_to_nid(page);
513         list_add(&page->lru, &h->hugepage_freelists[nid]);
514         h->free_huge_pages++;
515         h->free_huge_pages_node[nid]++;
516 }
517
518 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
519 {
520         struct page *page;
521
522         if (list_empty(&h->hugepage_freelists[nid]))
523                 return NULL;
524         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
525         list_del(&page->lru);
526         set_page_refcounted(page);
527         h->free_huge_pages--;
528         h->free_huge_pages_node[nid]--;
529         return page;
530 }
531
532 static struct page *dequeue_huge_page_vma(struct hstate *h,
533                                 struct vm_area_struct *vma,
534                                 unsigned long address, int avoid_reserve)
535 {
536         struct page *page = NULL;
537         struct mempolicy *mpol;
538         nodemask_t *nodemask;
539         struct zonelist *zonelist;
540         struct zone *zone;
541         struct zoneref *z;
542         unsigned int cpuset_mems_cookie;
543
544 retry_cpuset:
545         cpuset_mems_cookie = get_mems_allowed();
546         zonelist = huge_zonelist(vma, address,
547                                         htlb_alloc_mask, &mpol, &nodemask);
548         /*
549          * A child process with MAP_PRIVATE mappings created by their parent
550          * have no page reserves. This check ensures that reservations are
551          * not "stolen". The child may still get SIGKILLed
552          */
553         if (!vma_has_reserves(vma) &&
554                         h->free_huge_pages - h->resv_huge_pages == 0)
555                 goto err;
556
557         /* If reserves cannot be used, ensure enough pages are in the pool */
558         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
559                 goto err;
560
561         for_each_zone_zonelist_nodemask(zone, z, zonelist,
562                                                 MAX_NR_ZONES - 1, nodemask) {
563                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
564                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
565                         if (page) {
566                                 if (!avoid_reserve)
567                                         decrement_hugepage_resv_vma(h, vma);
568                                 break;
569                         }
570                 }
571         }
572
573         mpol_cond_put(mpol);
574         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
575                 goto retry_cpuset;
576         return page;
577
578 err:
579         mpol_cond_put(mpol);
580         return NULL;
581 }
582
583 static void update_and_free_page(struct hstate *h, struct page *page)
584 {
585         int i;
586
587         VM_BUG_ON(h->order >= MAX_ORDER);
588
589         h->nr_huge_pages--;
590         h->nr_huge_pages_node[page_to_nid(page)]--;
591         for (i = 0; i < pages_per_huge_page(h); i++) {
592                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
593                                 1 << PG_referenced | 1 << PG_dirty |
594                                 1 << PG_active | 1 << PG_reserved |
595                                 1 << PG_private | 1 << PG_writeback);
596         }
597         set_compound_page_dtor(page, NULL);
598         set_page_refcounted(page);
599         arch_release_hugepage(page);
600         __free_pages(page, huge_page_order(h));
601 }
602
603 struct hstate *size_to_hstate(unsigned long size)
604 {
605         struct hstate *h;
606
607         for_each_hstate(h) {
608                 if (huge_page_size(h) == size)
609                         return h;
610         }
611         return NULL;
612 }
613
614 static void free_huge_page(struct page *page)
615 {
616         /*
617          * Can't pass hstate in here because it is called from the
618          * compound page destructor.
619          */
620         struct hstate *h = page_hstate(page);
621         int nid = page_to_nid(page);
622         struct hugepage_subpool *spool =
623                 (struct hugepage_subpool *)page_private(page);
624
625         set_page_private(page, 0);
626         page->mapping = NULL;
627         BUG_ON(page_count(page));
628         BUG_ON(page_mapcount(page));
629         INIT_LIST_HEAD(&page->lru);
630
631         spin_lock(&hugetlb_lock);
632         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
633                 update_and_free_page(h, page);
634                 h->surplus_huge_pages--;
635                 h->surplus_huge_pages_node[nid]--;
636         } else {
637                 enqueue_huge_page(h, page);
638         }
639         spin_unlock(&hugetlb_lock);
640         hugepage_subpool_put_pages(spool, 1);
641 }
642
643 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
644 {
645         set_compound_page_dtor(page, free_huge_page);
646         spin_lock(&hugetlb_lock);
647         h->nr_huge_pages++;
648         h->nr_huge_pages_node[nid]++;
649         spin_unlock(&hugetlb_lock);
650         put_page(page); /* free it into the hugepage allocator */
651 }
652
653 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
654 {
655         int i;
656         int nr_pages = 1 << order;
657         struct page *p = page + 1;
658
659         /* we rely on prep_new_huge_page to set the destructor */
660         set_compound_order(page, order);
661         __SetPageHead(page);
662         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
663                 __SetPageTail(p);
664                 set_page_count(p, 0);
665                 p->first_page = page;
666         }
667 }
668
669 int PageHuge(struct page *page)
670 {
671         compound_page_dtor *dtor;
672
673         if (!PageCompound(page))
674                 return 0;
675
676         page = compound_head(page);
677         dtor = get_compound_page_dtor(page);
678
679         return dtor == free_huge_page;
680 }
681 EXPORT_SYMBOL_GPL(PageHuge);
682
683 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
684 {
685         struct page *page;
686
687         if (h->order >= MAX_ORDER)
688                 return NULL;
689
690         page = alloc_pages_exact_node(nid,
691                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
692                                                 __GFP_REPEAT|__GFP_NOWARN,
693                 huge_page_order(h));
694         if (page) {
695                 if (arch_prepare_hugepage(page)) {
696                         __free_pages(page, huge_page_order(h));
697                         return NULL;
698                 }
699                 prep_new_huge_page(h, page, nid);
700         }
701
702         return page;
703 }
704
705 /*
706  * common helper functions for hstate_next_node_to_{alloc|free}.
707  * We may have allocated or freed a huge page based on a different
708  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
709  * be outside of *nodes_allowed.  Ensure that we use an allowed
710  * node for alloc or free.
711  */
712 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
713 {
714         nid = next_node(nid, *nodes_allowed);
715         if (nid == MAX_NUMNODES)
716                 nid = first_node(*nodes_allowed);
717         VM_BUG_ON(nid >= MAX_NUMNODES);
718
719         return nid;
720 }
721
722 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
723 {
724         if (!node_isset(nid, *nodes_allowed))
725                 nid = next_node_allowed(nid, nodes_allowed);
726         return nid;
727 }
728
729 /*
730  * returns the previously saved node ["this node"] from which to
731  * allocate a persistent huge page for the pool and advance the
732  * next node from which to allocate, handling wrap at end of node
733  * mask.
734  */
735 static int hstate_next_node_to_alloc(struct hstate *h,
736                                         nodemask_t *nodes_allowed)
737 {
738         int nid;
739
740         VM_BUG_ON(!nodes_allowed);
741
742         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
743         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
744
745         return nid;
746 }
747
748 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
749 {
750         struct page *page;
751         int start_nid;
752         int next_nid;
753         int ret = 0;
754
755         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
756         next_nid = start_nid;
757
758         do {
759                 page = alloc_fresh_huge_page_node(h, next_nid);
760                 if (page) {
761                         ret = 1;
762                         break;
763                 }
764                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
765         } while (next_nid != start_nid);
766
767         if (ret)
768                 count_vm_event(HTLB_BUDDY_PGALLOC);
769         else
770                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
771
772         return ret;
773 }
774
775 /*
776  * helper for free_pool_huge_page() - return the previously saved
777  * node ["this node"] from which to free a huge page.  Advance the
778  * next node id whether or not we find a free huge page to free so
779  * that the next attempt to free addresses the next node.
780  */
781 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
782 {
783         int nid;
784
785         VM_BUG_ON(!nodes_allowed);
786
787         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
788         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
789
790         return nid;
791 }
792
793 /*
794  * Free huge page from pool from next node to free.
795  * Attempt to keep persistent huge pages more or less
796  * balanced over allowed nodes.
797  * Called with hugetlb_lock locked.
798  */
799 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
800                                                          bool acct_surplus)
801 {
802         int start_nid;
803         int next_nid;
804         int ret = 0;
805
806         start_nid = hstate_next_node_to_free(h, nodes_allowed);
807         next_nid = start_nid;
808
809         do {
810                 /*
811                  * If we're returning unused surplus pages, only examine
812                  * nodes with surplus pages.
813                  */
814                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
815                     !list_empty(&h->hugepage_freelists[next_nid])) {
816                         struct page *page =
817                                 list_entry(h->hugepage_freelists[next_nid].next,
818                                           struct page, lru);
819                         list_del(&page->lru);
820                         h->free_huge_pages--;
821                         h->free_huge_pages_node[next_nid]--;
822                         if (acct_surplus) {
823                                 h->surplus_huge_pages--;
824                                 h->surplus_huge_pages_node[next_nid]--;
825                         }
826                         update_and_free_page(h, page);
827                         ret = 1;
828                         break;
829                 }
830                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
831         } while (next_nid != start_nid);
832
833         return ret;
834 }
835
836 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
837 {
838         struct page *page;
839         unsigned int r_nid;
840
841         if (h->order >= MAX_ORDER)
842                 return NULL;
843
844         /*
845          * Assume we will successfully allocate the surplus page to
846          * prevent racing processes from causing the surplus to exceed
847          * overcommit
848          *
849          * This however introduces a different race, where a process B
850          * tries to grow the static hugepage pool while alloc_pages() is
851          * called by process A. B will only examine the per-node
852          * counters in determining if surplus huge pages can be
853          * converted to normal huge pages in adjust_pool_surplus(). A
854          * won't be able to increment the per-node counter, until the
855          * lock is dropped by B, but B doesn't drop hugetlb_lock until
856          * no more huge pages can be converted from surplus to normal
857          * state (and doesn't try to convert again). Thus, we have a
858          * case where a surplus huge page exists, the pool is grown, and
859          * the surplus huge page still exists after, even though it
860          * should just have been converted to a normal huge page. This
861          * does not leak memory, though, as the hugepage will be freed
862          * once it is out of use. It also does not allow the counters to
863          * go out of whack in adjust_pool_surplus() as we don't modify
864          * the node values until we've gotten the hugepage and only the
865          * per-node value is checked there.
866          */
867         spin_lock(&hugetlb_lock);
868         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
869                 spin_unlock(&hugetlb_lock);
870                 return NULL;
871         } else {
872                 h->nr_huge_pages++;
873                 h->surplus_huge_pages++;
874         }
875         spin_unlock(&hugetlb_lock);
876
877         if (nid == NUMA_NO_NODE)
878                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
879                                    __GFP_REPEAT|__GFP_NOWARN,
880                                    huge_page_order(h));
881         else
882                 page = alloc_pages_exact_node(nid,
883                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
884                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
885
886         if (page && arch_prepare_hugepage(page)) {
887                 __free_pages(page, huge_page_order(h));
888                 page = NULL;
889         }
890
891         spin_lock(&hugetlb_lock);
892         if (page) {
893                 r_nid = page_to_nid(page);
894                 set_compound_page_dtor(page, free_huge_page);
895                 /*
896                  * We incremented the global counters already
897                  */
898                 h->nr_huge_pages_node[r_nid]++;
899                 h->surplus_huge_pages_node[r_nid]++;
900                 __count_vm_event(HTLB_BUDDY_PGALLOC);
901         } else {
902                 h->nr_huge_pages--;
903                 h->surplus_huge_pages--;
904                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
905         }
906         spin_unlock(&hugetlb_lock);
907
908         return page;
909 }
910
911 /*
912  * This allocation function is useful in the context where vma is irrelevant.
913  * E.g. soft-offlining uses this function because it only cares physical
914  * address of error page.
915  */
916 struct page *alloc_huge_page_node(struct hstate *h, int nid)
917 {
918         struct page *page;
919
920         spin_lock(&hugetlb_lock);
921         page = dequeue_huge_page_node(h, nid);
922         spin_unlock(&hugetlb_lock);
923
924         if (!page)
925                 page = alloc_buddy_huge_page(h, nid);
926
927         return page;
928 }
929
930 /*
931  * Increase the hugetlb pool such that it can accommodate a reservation
932  * of size 'delta'.
933  */
934 static int gather_surplus_pages(struct hstate *h, int delta)
935 {
936         struct list_head surplus_list;
937         struct page *page, *tmp;
938         int ret, i;
939         int needed, allocated;
940         bool alloc_ok = true;
941
942         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
943         if (needed <= 0) {
944                 h->resv_huge_pages += delta;
945                 return 0;
946         }
947
948         allocated = 0;
949         INIT_LIST_HEAD(&surplus_list);
950
951         ret = -ENOMEM;
952 retry:
953         spin_unlock(&hugetlb_lock);
954         for (i = 0; i < needed; i++) {
955                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
956                 if (!page) {
957                         alloc_ok = false;
958                         break;
959                 }
960                 list_add(&page->lru, &surplus_list);
961         }
962         allocated += i;
963
964         /*
965          * After retaking hugetlb_lock, we need to recalculate 'needed'
966          * because either resv_huge_pages or free_huge_pages may have changed.
967          */
968         spin_lock(&hugetlb_lock);
969         needed = (h->resv_huge_pages + delta) -
970                         (h->free_huge_pages + allocated);
971         if (needed > 0) {
972                 if (alloc_ok)
973                         goto retry;
974                 /*
975                  * We were not able to allocate enough pages to
976                  * satisfy the entire reservation so we free what
977                  * we've allocated so far.
978                  */
979                 goto free;
980         }
981         /*
982          * The surplus_list now contains _at_least_ the number of extra pages
983          * needed to accommodate the reservation.  Add the appropriate number
984          * of pages to the hugetlb pool and free the extras back to the buddy
985          * allocator.  Commit the entire reservation here to prevent another
986          * process from stealing the pages as they are added to the pool but
987          * before they are reserved.
988          */
989         needed += allocated;
990         h->resv_huge_pages += delta;
991         ret = 0;
992
993         /* Free the needed pages to the hugetlb pool */
994         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
995                 if ((--needed) < 0)
996                         break;
997                 list_del(&page->lru);
998                 /*
999                  * This page is now managed by the hugetlb allocator and has
1000                  * no users -- drop the buddy allocator's reference.
1001                  */
1002                 put_page_testzero(page);
1003                 VM_BUG_ON(page_count(page));
1004                 enqueue_huge_page(h, page);
1005         }
1006 free:
1007         spin_unlock(&hugetlb_lock);
1008
1009         /* Free unnecessary surplus pages to the buddy allocator */
1010         if (!list_empty(&surplus_list)) {
1011                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1012                         list_del(&page->lru);
1013                         put_page(page);
1014                 }
1015         }
1016         spin_lock(&hugetlb_lock);
1017
1018         return ret;
1019 }
1020
1021 /*
1022  * When releasing a hugetlb pool reservation, any surplus pages that were
1023  * allocated to satisfy the reservation must be explicitly freed if they were
1024  * never used.
1025  * Called with hugetlb_lock held.
1026  */
1027 static void return_unused_surplus_pages(struct hstate *h,
1028                                         unsigned long unused_resv_pages)
1029 {
1030         unsigned long nr_pages;
1031
1032         /* Uncommit the reservation */
1033         h->resv_huge_pages -= unused_resv_pages;
1034
1035         /* Cannot return gigantic pages currently */
1036         if (h->order >= MAX_ORDER)
1037                 return;
1038
1039         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1040
1041         /*
1042          * We want to release as many surplus pages as possible, spread
1043          * evenly across all nodes with memory. Iterate across these nodes
1044          * until we can no longer free unreserved surplus pages. This occurs
1045          * when the nodes with surplus pages have no free pages.
1046          * free_pool_huge_page() will balance the the freed pages across the
1047          * on-line nodes with memory and will handle the hstate accounting.
1048          */
1049         while (nr_pages--) {
1050                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1051                         break;
1052         }
1053 }
1054
1055 /*
1056  * Determine if the huge page at addr within the vma has an associated
1057  * reservation.  Where it does not we will need to logically increase
1058  * reservation and actually increase subpool usage before an allocation
1059  * can occur.  Where any new reservation would be required the
1060  * reservation change is prepared, but not committed.  Once the page
1061  * has been allocated from the subpool and instantiated the change should
1062  * be committed via vma_commit_reservation.  No action is required on
1063  * failure.
1064  */
1065 static long vma_needs_reservation(struct hstate *h,
1066                         struct vm_area_struct *vma, unsigned long addr)
1067 {
1068         struct address_space *mapping = vma->vm_file->f_mapping;
1069         struct inode *inode = mapping->host;
1070
1071         if (vma->vm_flags & VM_MAYSHARE) {
1072                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1073                 return region_chg(&inode->i_mapping->private_list,
1074                                                         idx, idx + 1);
1075
1076         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1077                 return 1;
1078
1079         } else  {
1080                 long err;
1081                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1082                 struct resv_map *reservations = vma_resv_map(vma);
1083
1084                 err = region_chg(&reservations->regions, idx, idx + 1);
1085                 if (err < 0)
1086                         return err;
1087                 return 0;
1088         }
1089 }
1090 static void vma_commit_reservation(struct hstate *h,
1091                         struct vm_area_struct *vma, unsigned long addr)
1092 {
1093         struct address_space *mapping = vma->vm_file->f_mapping;
1094         struct inode *inode = mapping->host;
1095
1096         if (vma->vm_flags & VM_MAYSHARE) {
1097                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1098                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1099
1100         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1101                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1102                 struct resv_map *reservations = vma_resv_map(vma);
1103
1104                 /* Mark this page used in the map. */
1105                 region_add(&reservations->regions, idx, idx + 1);
1106         }
1107 }
1108
1109 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1110                                     unsigned long addr, int avoid_reserve)
1111 {
1112         struct hugepage_subpool *spool = subpool_vma(vma);
1113         struct hstate *h = hstate_vma(vma);
1114         struct page *page;
1115         long chg;
1116
1117         /*
1118          * Processes that did not create the mapping will have no
1119          * reserves and will not have accounted against subpool
1120          * limit. Check that the subpool limit can be made before
1121          * satisfying the allocation MAP_NORESERVE mappings may also
1122          * need pages and subpool limit allocated allocated if no reserve
1123          * mapping overlaps.
1124          */
1125         chg = vma_needs_reservation(h, vma, addr);
1126         if (chg < 0)
1127                 return ERR_PTR(-ENOMEM);
1128         if (chg)
1129                 if (hugepage_subpool_get_pages(spool, chg))
1130                         return ERR_PTR(-ENOSPC);
1131
1132         spin_lock(&hugetlb_lock);
1133         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1134         spin_unlock(&hugetlb_lock);
1135
1136         if (!page) {
1137                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1138                 if (!page) {
1139                         hugepage_subpool_put_pages(spool, chg);
1140                         return ERR_PTR(-ENOSPC);
1141                 }
1142         }
1143
1144         set_page_private(page, (unsigned long)spool);
1145
1146         vma_commit_reservation(h, vma, addr);
1147
1148         return page;
1149 }
1150
1151 int __weak alloc_bootmem_huge_page(struct hstate *h)
1152 {
1153         struct huge_bootmem_page *m;
1154         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1155
1156         while (nr_nodes) {
1157                 void *addr;
1158
1159                 addr = __alloc_bootmem_node_nopanic(
1160                                 NODE_DATA(hstate_next_node_to_alloc(h,
1161                                                 &node_states[N_HIGH_MEMORY])),
1162                                 huge_page_size(h), huge_page_size(h), 0);
1163
1164                 if (addr) {
1165                         /*
1166                          * Use the beginning of the huge page to store the
1167                          * huge_bootmem_page struct (until gather_bootmem
1168                          * puts them into the mem_map).
1169                          */
1170                         m = addr;
1171                         goto found;
1172                 }
1173                 nr_nodes--;
1174         }
1175         return 0;
1176
1177 found:
1178         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1179         /* Put them into a private list first because mem_map is not up yet */
1180         list_add(&m->list, &huge_boot_pages);
1181         m->hstate = h;
1182         return 1;
1183 }
1184
1185 static void prep_compound_huge_page(struct page *page, int order)
1186 {
1187         if (unlikely(order > (MAX_ORDER - 1)))
1188                 prep_compound_gigantic_page(page, order);
1189         else
1190                 prep_compound_page(page, order);
1191 }
1192
1193 /* Put bootmem huge pages into the standard lists after mem_map is up */
1194 static void __init gather_bootmem_prealloc(void)
1195 {
1196         struct huge_bootmem_page *m;
1197
1198         list_for_each_entry(m, &huge_boot_pages, list) {
1199                 struct hstate *h = m->hstate;
1200                 struct page *page;
1201
1202 #ifdef CONFIG_HIGHMEM
1203                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1204                 free_bootmem_late((unsigned long)m,
1205                                   sizeof(struct huge_bootmem_page));
1206 #else
1207                 page = virt_to_page(m);
1208 #endif
1209                 __ClearPageReserved(page);
1210                 WARN_ON(page_count(page) != 1);
1211                 prep_compound_huge_page(page, h->order);
1212                 prep_new_huge_page(h, page, page_to_nid(page));
1213                 /*
1214                  * If we had gigantic hugepages allocated at boot time, we need
1215                  * to restore the 'stolen' pages to totalram_pages in order to
1216                  * fix confusing memory reports from free(1) and another
1217                  * side-effects, like CommitLimit going negative.
1218                  */
1219                 if (h->order > (MAX_ORDER - 1))
1220                         totalram_pages += 1 << h->order;
1221         }
1222 }
1223
1224 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1225 {
1226         unsigned long i;
1227
1228         for (i = 0; i < h->max_huge_pages; ++i) {
1229                 if (h->order >= MAX_ORDER) {
1230                         if (!alloc_bootmem_huge_page(h))
1231                                 break;
1232                 } else if (!alloc_fresh_huge_page(h,
1233                                          &node_states[N_HIGH_MEMORY]))
1234                         break;
1235         }
1236         h->max_huge_pages = i;
1237 }
1238
1239 static void __init hugetlb_init_hstates(void)
1240 {
1241         struct hstate *h;
1242
1243         for_each_hstate(h) {
1244                 /* oversize hugepages were init'ed in early boot */
1245                 if (h->order < MAX_ORDER)
1246                         hugetlb_hstate_alloc_pages(h);
1247         }
1248 }
1249
1250 static char * __init memfmt(char *buf, unsigned long n)
1251 {
1252         if (n >= (1UL << 30))
1253                 sprintf(buf, "%lu GB", n >> 30);
1254         else if (n >= (1UL << 20))
1255                 sprintf(buf, "%lu MB", n >> 20);
1256         else
1257                 sprintf(buf, "%lu KB", n >> 10);
1258         return buf;
1259 }
1260
1261 static void __init report_hugepages(void)
1262 {
1263         struct hstate *h;
1264
1265         for_each_hstate(h) {
1266                 char buf[32];
1267                 printk(KERN_INFO "HugeTLB registered %s page size, "
1268                                  "pre-allocated %ld pages\n",
1269                         memfmt(buf, huge_page_size(h)),
1270                         h->free_huge_pages);
1271         }
1272 }
1273
1274 #ifdef CONFIG_HIGHMEM
1275 static void try_to_free_low(struct hstate *h, unsigned long count,
1276                                                 nodemask_t *nodes_allowed)
1277 {
1278         int i;
1279
1280         if (h->order >= MAX_ORDER)
1281                 return;
1282
1283         for_each_node_mask(i, *nodes_allowed) {
1284                 struct page *page, *next;
1285                 struct list_head *freel = &h->hugepage_freelists[i];
1286                 list_for_each_entry_safe(page, next, freel, lru) {
1287                         if (count >= h->nr_huge_pages)
1288                                 return;
1289                         if (PageHighMem(page))
1290                                 continue;
1291                         list_del(&page->lru);
1292                         update_and_free_page(h, page);
1293                         h->free_huge_pages--;
1294                         h->free_huge_pages_node[page_to_nid(page)]--;
1295                 }
1296         }
1297 }
1298 #else
1299 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1300                                                 nodemask_t *nodes_allowed)
1301 {
1302 }
1303 #endif
1304
1305 /*
1306  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1307  * balanced by operating on them in a round-robin fashion.
1308  * Returns 1 if an adjustment was made.
1309  */
1310 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1311                                 int delta)
1312 {
1313         int start_nid, next_nid;
1314         int ret = 0;
1315
1316         VM_BUG_ON(delta != -1 && delta != 1);
1317
1318         if (delta < 0)
1319                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1320         else
1321                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1322         next_nid = start_nid;
1323
1324         do {
1325                 int nid = next_nid;
1326                 if (delta < 0)  {
1327                         /*
1328                          * To shrink on this node, there must be a surplus page
1329                          */
1330                         if (!h->surplus_huge_pages_node[nid]) {
1331                                 next_nid = hstate_next_node_to_alloc(h,
1332                                                                 nodes_allowed);
1333                                 continue;
1334                         }
1335                 }
1336                 if (delta > 0) {
1337                         /*
1338                          * Surplus cannot exceed the total number of pages
1339                          */
1340                         if (h->surplus_huge_pages_node[nid] >=
1341                                                 h->nr_huge_pages_node[nid]) {
1342                                 next_nid = hstate_next_node_to_free(h,
1343                                                                 nodes_allowed);
1344                                 continue;
1345                         }
1346                 }
1347
1348                 h->surplus_huge_pages += delta;
1349                 h->surplus_huge_pages_node[nid] += delta;
1350                 ret = 1;
1351                 break;
1352         } while (next_nid != start_nid);
1353
1354         return ret;
1355 }
1356
1357 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1358 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1359                                                 nodemask_t *nodes_allowed)
1360 {
1361         unsigned long min_count, ret;
1362
1363         if (h->order >= MAX_ORDER)
1364                 return h->max_huge_pages;
1365
1366         /*
1367          * Increase the pool size
1368          * First take pages out of surplus state.  Then make up the
1369          * remaining difference by allocating fresh huge pages.
1370          *
1371          * We might race with alloc_buddy_huge_page() here and be unable
1372          * to convert a surplus huge page to a normal huge page. That is
1373          * not critical, though, it just means the overall size of the
1374          * pool might be one hugepage larger than it needs to be, but
1375          * within all the constraints specified by the sysctls.
1376          */
1377         spin_lock(&hugetlb_lock);
1378         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1379                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1380                         break;
1381         }
1382
1383         while (count > persistent_huge_pages(h)) {
1384                 /*
1385                  * If this allocation races such that we no longer need the
1386                  * page, free_huge_page will handle it by freeing the page
1387                  * and reducing the surplus.
1388                  */
1389                 spin_unlock(&hugetlb_lock);
1390                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1391                 spin_lock(&hugetlb_lock);
1392                 if (!ret)
1393                         goto out;
1394
1395                 /* Bail for signals. Probably ctrl-c from user */
1396                 if (signal_pending(current))
1397                         goto out;
1398         }
1399
1400         /*
1401          * Decrease the pool size
1402          * First return free pages to the buddy allocator (being careful
1403          * to keep enough around to satisfy reservations).  Then place
1404          * pages into surplus state as needed so the pool will shrink
1405          * to the desired size as pages become free.
1406          *
1407          * By placing pages into the surplus state independent of the
1408          * overcommit value, we are allowing the surplus pool size to
1409          * exceed overcommit. There are few sane options here. Since
1410          * alloc_buddy_huge_page() is checking the global counter,
1411          * though, we'll note that we're not allowed to exceed surplus
1412          * and won't grow the pool anywhere else. Not until one of the
1413          * sysctls are changed, or the surplus pages go out of use.
1414          */
1415         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1416         min_count = max(count, min_count);
1417         try_to_free_low(h, min_count, nodes_allowed);
1418         while (min_count < persistent_huge_pages(h)) {
1419                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1420                         break;
1421         }
1422         while (count < persistent_huge_pages(h)) {
1423                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1424                         break;
1425         }
1426 out:
1427         ret = persistent_huge_pages(h);
1428         spin_unlock(&hugetlb_lock);
1429         return ret;
1430 }
1431
1432 #define HSTATE_ATTR_RO(_name) \
1433         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1434
1435 #define HSTATE_ATTR(_name) \
1436         static struct kobj_attribute _name##_attr = \
1437                 __ATTR(_name, 0644, _name##_show, _name##_store)
1438
1439 static struct kobject *hugepages_kobj;
1440 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1441
1442 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1443
1444 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1445 {
1446         int i;
1447
1448         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1449                 if (hstate_kobjs[i] == kobj) {
1450                         if (nidp)
1451                                 *nidp = NUMA_NO_NODE;
1452                         return &hstates[i];
1453                 }
1454
1455         return kobj_to_node_hstate(kobj, nidp);
1456 }
1457
1458 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1459                                         struct kobj_attribute *attr, char *buf)
1460 {
1461         struct hstate *h;
1462         unsigned long nr_huge_pages;
1463         int nid;
1464
1465         h = kobj_to_hstate(kobj, &nid);
1466         if (nid == NUMA_NO_NODE)
1467                 nr_huge_pages = h->nr_huge_pages;
1468         else
1469                 nr_huge_pages = h->nr_huge_pages_node[nid];
1470
1471         return sprintf(buf, "%lu\n", nr_huge_pages);
1472 }
1473
1474 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1475                         struct kobject *kobj, struct kobj_attribute *attr,
1476                         const char *buf, size_t len)
1477 {
1478         int err;
1479         int nid;
1480         unsigned long count;
1481         struct hstate *h;
1482         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1483
1484         err = strict_strtoul(buf, 10, &count);
1485         if (err)
1486                 goto out;
1487
1488         h = kobj_to_hstate(kobj, &nid);
1489         if (h->order >= MAX_ORDER) {
1490                 err = -EINVAL;
1491                 goto out;
1492         }
1493
1494         if (nid == NUMA_NO_NODE) {
1495                 /*
1496                  * global hstate attribute
1497                  */
1498                 if (!(obey_mempolicy &&
1499                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1500                         NODEMASK_FREE(nodes_allowed);
1501                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1502                 }
1503         } else if (nodes_allowed) {
1504                 /*
1505                  * per node hstate attribute: adjust count to global,
1506                  * but restrict alloc/free to the specified node.
1507                  */
1508                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1509                 init_nodemask_of_node(nodes_allowed, nid);
1510         } else
1511                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1512
1513         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1514
1515         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1516                 NODEMASK_FREE(nodes_allowed);
1517
1518         return len;
1519 out:
1520         NODEMASK_FREE(nodes_allowed);
1521         return err;
1522 }
1523
1524 static ssize_t nr_hugepages_show(struct kobject *kobj,
1525                                        struct kobj_attribute *attr, char *buf)
1526 {
1527         return nr_hugepages_show_common(kobj, attr, buf);
1528 }
1529
1530 static ssize_t nr_hugepages_store(struct kobject *kobj,
1531                struct kobj_attribute *attr, const char *buf, size_t len)
1532 {
1533         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1534 }
1535 HSTATE_ATTR(nr_hugepages);
1536
1537 #ifdef CONFIG_NUMA
1538
1539 /*
1540  * hstate attribute for optionally mempolicy-based constraint on persistent
1541  * huge page alloc/free.
1542  */
1543 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1544                                        struct kobj_attribute *attr, char *buf)
1545 {
1546         return nr_hugepages_show_common(kobj, attr, buf);
1547 }
1548
1549 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1550                struct kobj_attribute *attr, const char *buf, size_t len)
1551 {
1552         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1553 }
1554 HSTATE_ATTR(nr_hugepages_mempolicy);
1555 #endif
1556
1557
1558 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1559                                         struct kobj_attribute *attr, char *buf)
1560 {
1561         struct hstate *h = kobj_to_hstate(kobj, NULL);
1562         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1563 }
1564
1565 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1566                 struct kobj_attribute *attr, const char *buf, size_t count)
1567 {
1568         int err;
1569         unsigned long input;
1570         struct hstate *h = kobj_to_hstate(kobj, NULL);
1571
1572         if (h->order >= MAX_ORDER)
1573                 return -EINVAL;
1574
1575         err = strict_strtoul(buf, 10, &input);
1576         if (err)
1577                 return err;
1578
1579         spin_lock(&hugetlb_lock);
1580         h->nr_overcommit_huge_pages = input;
1581         spin_unlock(&hugetlb_lock);
1582
1583         return count;
1584 }
1585 HSTATE_ATTR(nr_overcommit_hugepages);
1586
1587 static ssize_t free_hugepages_show(struct kobject *kobj,
1588                                         struct kobj_attribute *attr, char *buf)
1589 {
1590         struct hstate *h;
1591         unsigned long free_huge_pages;
1592         int nid;
1593
1594         h = kobj_to_hstate(kobj, &nid);
1595         if (nid == NUMA_NO_NODE)
1596                 free_huge_pages = h->free_huge_pages;
1597         else
1598                 free_huge_pages = h->free_huge_pages_node[nid];
1599
1600         return sprintf(buf, "%lu\n", free_huge_pages);
1601 }
1602 HSTATE_ATTR_RO(free_hugepages);
1603
1604 static ssize_t resv_hugepages_show(struct kobject *kobj,
1605                                         struct kobj_attribute *attr, char *buf)
1606 {
1607         struct hstate *h = kobj_to_hstate(kobj, NULL);
1608         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1609 }
1610 HSTATE_ATTR_RO(resv_hugepages);
1611
1612 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1613                                         struct kobj_attribute *attr, char *buf)
1614 {
1615         struct hstate *h;
1616         unsigned long surplus_huge_pages;
1617         int nid;
1618
1619         h = kobj_to_hstate(kobj, &nid);
1620         if (nid == NUMA_NO_NODE)
1621                 surplus_huge_pages = h->surplus_huge_pages;
1622         else
1623                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1624
1625         return sprintf(buf, "%lu\n", surplus_huge_pages);
1626 }
1627 HSTATE_ATTR_RO(surplus_hugepages);
1628
1629 static struct attribute *hstate_attrs[] = {
1630         &nr_hugepages_attr.attr,
1631         &nr_overcommit_hugepages_attr.attr,
1632         &free_hugepages_attr.attr,
1633         &resv_hugepages_attr.attr,
1634         &surplus_hugepages_attr.attr,
1635 #ifdef CONFIG_NUMA
1636         &nr_hugepages_mempolicy_attr.attr,
1637 #endif
1638         NULL,
1639 };
1640
1641 static struct attribute_group hstate_attr_group = {
1642         .attrs = hstate_attrs,
1643 };
1644
1645 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1646                                     struct kobject **hstate_kobjs,
1647                                     struct attribute_group *hstate_attr_group)
1648 {
1649         int retval;
1650         int hi = hstate_index(h);
1651
1652         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1653         if (!hstate_kobjs[hi])
1654                 return -ENOMEM;
1655
1656         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1657         if (retval)
1658                 kobject_put(hstate_kobjs[hi]);
1659
1660         return retval;
1661 }
1662
1663 static void __init hugetlb_sysfs_init(void)
1664 {
1665         struct hstate *h;
1666         int err;
1667
1668         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1669         if (!hugepages_kobj)
1670                 return;
1671
1672         for_each_hstate(h) {
1673                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1674                                          hstate_kobjs, &hstate_attr_group);
1675                 if (err)
1676                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1677                                                                 h->name);
1678         }
1679 }
1680
1681 #ifdef CONFIG_NUMA
1682
1683 /*
1684  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1685  * with node devices in node_devices[] using a parallel array.  The array
1686  * index of a node device or _hstate == node id.
1687  * This is here to avoid any static dependency of the node device driver, in
1688  * the base kernel, on the hugetlb module.
1689  */
1690 struct node_hstate {
1691         struct kobject          *hugepages_kobj;
1692         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1693 };
1694 struct node_hstate node_hstates[MAX_NUMNODES];
1695
1696 /*
1697  * A subset of global hstate attributes for node devices
1698  */
1699 static struct attribute *per_node_hstate_attrs[] = {
1700         &nr_hugepages_attr.attr,
1701         &free_hugepages_attr.attr,
1702         &surplus_hugepages_attr.attr,
1703         NULL,
1704 };
1705
1706 static struct attribute_group per_node_hstate_attr_group = {
1707         .attrs = per_node_hstate_attrs,
1708 };
1709
1710 /*
1711  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1712  * Returns node id via non-NULL nidp.
1713  */
1714 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1715 {
1716         int nid;
1717
1718         for (nid = 0; nid < nr_node_ids; nid++) {
1719                 struct node_hstate *nhs = &node_hstates[nid];
1720                 int i;
1721                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1722                         if (nhs->hstate_kobjs[i] == kobj) {
1723                                 if (nidp)
1724                                         *nidp = nid;
1725                                 return &hstates[i];
1726                         }
1727         }
1728
1729         BUG();
1730         return NULL;
1731 }
1732
1733 /*
1734  * Unregister hstate attributes from a single node device.
1735  * No-op if no hstate attributes attached.
1736  */
1737 void hugetlb_unregister_node(struct node *node)
1738 {
1739         struct hstate *h;
1740         struct node_hstate *nhs = &node_hstates[node->dev.id];
1741
1742         if (!nhs->hugepages_kobj)
1743                 return;         /* no hstate attributes */
1744
1745         for_each_hstate(h) {
1746                 int idx = hstate_index(h);
1747                 if (nhs->hstate_kobjs[idx]) {
1748                         kobject_put(nhs->hstate_kobjs[idx]);
1749                         nhs->hstate_kobjs[idx] = NULL;
1750                 }
1751         }
1752
1753         kobject_put(nhs->hugepages_kobj);
1754         nhs->hugepages_kobj = NULL;
1755 }
1756
1757 /*
1758  * hugetlb module exit:  unregister hstate attributes from node devices
1759  * that have them.
1760  */
1761 static void hugetlb_unregister_all_nodes(void)
1762 {
1763         int nid;
1764
1765         /*
1766          * disable node device registrations.
1767          */
1768         register_hugetlbfs_with_node(NULL, NULL);
1769
1770         /*
1771          * remove hstate attributes from any nodes that have them.
1772          */
1773         for (nid = 0; nid < nr_node_ids; nid++)
1774                 hugetlb_unregister_node(&node_devices[nid]);
1775 }
1776
1777 /*
1778  * Register hstate attributes for a single node device.
1779  * No-op if attributes already registered.
1780  */
1781 void hugetlb_register_node(struct node *node)
1782 {
1783         struct hstate *h;
1784         struct node_hstate *nhs = &node_hstates[node->dev.id];
1785         int err;
1786
1787         if (nhs->hugepages_kobj)
1788                 return;         /* already allocated */
1789
1790         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1791                                                         &node->dev.kobj);
1792         if (!nhs->hugepages_kobj)
1793                 return;
1794
1795         for_each_hstate(h) {
1796                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1797                                                 nhs->hstate_kobjs,
1798                                                 &per_node_hstate_attr_group);
1799                 if (err) {
1800                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1801                                         " for node %d\n",
1802                                                 h->name, node->dev.id);
1803                         hugetlb_unregister_node(node);
1804                         break;
1805                 }
1806         }
1807 }
1808
1809 /*
1810  * hugetlb init time:  register hstate attributes for all registered node
1811  * devices of nodes that have memory.  All on-line nodes should have
1812  * registered their associated device by this time.
1813  */
1814 static void hugetlb_register_all_nodes(void)
1815 {
1816         int nid;
1817
1818         for_each_node_state(nid, N_HIGH_MEMORY) {
1819                 struct node *node = &node_devices[nid];
1820                 if (node->dev.id == nid)
1821                         hugetlb_register_node(node);
1822         }
1823
1824         /*
1825          * Let the node device driver know we're here so it can
1826          * [un]register hstate attributes on node hotplug.
1827          */
1828         register_hugetlbfs_with_node(hugetlb_register_node,
1829                                      hugetlb_unregister_node);
1830 }
1831 #else   /* !CONFIG_NUMA */
1832
1833 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1834 {
1835         BUG();
1836         if (nidp)
1837                 *nidp = -1;
1838         return NULL;
1839 }
1840
1841 static void hugetlb_unregister_all_nodes(void) { }
1842
1843 static void hugetlb_register_all_nodes(void) { }
1844
1845 #endif
1846
1847 static void __exit hugetlb_exit(void)
1848 {
1849         struct hstate *h;
1850
1851         hugetlb_unregister_all_nodes();
1852
1853         for_each_hstate(h) {
1854                 kobject_put(hstate_kobjs[hstate_index(h)]);
1855         }
1856
1857         kobject_put(hugepages_kobj);
1858 }
1859 module_exit(hugetlb_exit);
1860
1861 static int __init hugetlb_init(void)
1862 {
1863         /* Some platform decide whether they support huge pages at boot
1864          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1865          * there is no such support
1866          */
1867         if (HPAGE_SHIFT == 0)
1868                 return 0;
1869
1870         if (!size_to_hstate(default_hstate_size)) {
1871                 default_hstate_size = HPAGE_SIZE;
1872                 if (!size_to_hstate(default_hstate_size))
1873                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1874         }
1875         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1876         if (default_hstate_max_huge_pages)
1877                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1878
1879         hugetlb_init_hstates();
1880
1881         gather_bootmem_prealloc();
1882
1883         report_hugepages();
1884
1885         hugetlb_sysfs_init();
1886
1887         hugetlb_register_all_nodes();
1888
1889         return 0;
1890 }
1891 module_init(hugetlb_init);
1892
1893 /* Should be called on processing a hugepagesz=... option */
1894 void __init hugetlb_add_hstate(unsigned order)
1895 {
1896         struct hstate *h;
1897         unsigned long i;
1898
1899         if (size_to_hstate(PAGE_SIZE << order)) {
1900                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1901                 return;
1902         }
1903         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1904         BUG_ON(order == 0);
1905         h = &hstates[hugetlb_max_hstate++];
1906         h->order = order;
1907         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1908         h->nr_huge_pages = 0;
1909         h->free_huge_pages = 0;
1910         for (i = 0; i < MAX_NUMNODES; ++i)
1911                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1912         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1913         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1914         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1915                                         huge_page_size(h)/1024);
1916
1917         parsed_hstate = h;
1918 }
1919
1920 static int __init hugetlb_nrpages_setup(char *s)
1921 {
1922         unsigned long *mhp;
1923         static unsigned long *last_mhp;
1924
1925         /*
1926          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1927          * so this hugepages= parameter goes to the "default hstate".
1928          */
1929         if (!hugetlb_max_hstate)
1930                 mhp = &default_hstate_max_huge_pages;
1931         else
1932                 mhp = &parsed_hstate->max_huge_pages;
1933
1934         if (mhp == last_mhp) {
1935                 printk(KERN_WARNING "hugepages= specified twice without "
1936                         "interleaving hugepagesz=, ignoring\n");
1937                 return 1;
1938         }
1939
1940         if (sscanf(s, "%lu", mhp) <= 0)
1941                 *mhp = 0;
1942
1943         /*
1944          * Global state is always initialized later in hugetlb_init.
1945          * But we need to allocate >= MAX_ORDER hstates here early to still
1946          * use the bootmem allocator.
1947          */
1948         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1949                 hugetlb_hstate_alloc_pages(parsed_hstate);
1950
1951         last_mhp = mhp;
1952
1953         return 1;
1954 }
1955 __setup("hugepages=", hugetlb_nrpages_setup);
1956
1957 static int __init hugetlb_default_setup(char *s)
1958 {
1959         default_hstate_size = memparse(s, &s);
1960         return 1;
1961 }
1962 __setup("default_hugepagesz=", hugetlb_default_setup);
1963
1964 static unsigned int cpuset_mems_nr(unsigned int *array)
1965 {
1966         int node;
1967         unsigned int nr = 0;
1968
1969         for_each_node_mask(node, cpuset_current_mems_allowed)
1970                 nr += array[node];
1971
1972         return nr;
1973 }
1974
1975 #ifdef CONFIG_SYSCTL
1976 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1977                          struct ctl_table *table, int write,
1978                          void __user *buffer, size_t *length, loff_t *ppos)
1979 {
1980         struct hstate *h = &default_hstate;
1981         unsigned long tmp;
1982         int ret;
1983
1984         tmp = h->max_huge_pages;
1985
1986         if (write && h->order >= MAX_ORDER)
1987                 return -EINVAL;
1988
1989         table->data = &tmp;
1990         table->maxlen = sizeof(unsigned long);
1991         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1992         if (ret)
1993                 goto out;
1994
1995         if (write) {
1996                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1997                                                 GFP_KERNEL | __GFP_NORETRY);
1998                 if (!(obey_mempolicy &&
1999                                init_nodemask_of_mempolicy(nodes_allowed))) {
2000                         NODEMASK_FREE(nodes_allowed);
2001                         nodes_allowed = &node_states[N_HIGH_MEMORY];
2002                 }
2003                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2004
2005                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
2006                         NODEMASK_FREE(nodes_allowed);
2007         }
2008 out:
2009         return ret;
2010 }
2011
2012 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2013                           void __user *buffer, size_t *length, loff_t *ppos)
2014 {
2015
2016         return hugetlb_sysctl_handler_common(false, table, write,
2017                                                         buffer, length, ppos);
2018 }
2019
2020 #ifdef CONFIG_NUMA
2021 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2022                           void __user *buffer, size_t *length, loff_t *ppos)
2023 {
2024         return hugetlb_sysctl_handler_common(true, table, write,
2025                                                         buffer, length, ppos);
2026 }
2027 #endif /* CONFIG_NUMA */
2028
2029 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2030                         void __user *buffer,
2031                         size_t *length, loff_t *ppos)
2032 {
2033         proc_dointvec(table, write, buffer, length, ppos);
2034         if (hugepages_treat_as_movable)
2035                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2036         else
2037                 htlb_alloc_mask = GFP_HIGHUSER;
2038         return 0;
2039 }
2040
2041 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2042                         void __user *buffer,
2043                         size_t *length, loff_t *ppos)
2044 {
2045         struct hstate *h = &default_hstate;
2046         unsigned long tmp;
2047         int ret;
2048
2049         tmp = h->nr_overcommit_huge_pages;
2050
2051         if (write && h->order >= MAX_ORDER)
2052                 return -EINVAL;
2053
2054         table->data = &tmp;
2055         table->maxlen = sizeof(unsigned long);
2056         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2057         if (ret)
2058                 goto out;
2059
2060         if (write) {
2061                 spin_lock(&hugetlb_lock);
2062                 h->nr_overcommit_huge_pages = tmp;
2063                 spin_unlock(&hugetlb_lock);
2064         }
2065 out:
2066         return ret;
2067 }
2068
2069 #endif /* CONFIG_SYSCTL */
2070
2071 void hugetlb_report_meminfo(struct seq_file *m)
2072 {
2073         struct hstate *h = &default_hstate;
2074         seq_printf(m,
2075                         "HugePages_Total:   %5lu\n"
2076                         "HugePages_Free:    %5lu\n"
2077                         "HugePages_Rsvd:    %5lu\n"
2078                         "HugePages_Surp:    %5lu\n"
2079                         "Hugepagesize:   %8lu kB\n",
2080                         h->nr_huge_pages,
2081                         h->free_huge_pages,
2082                         h->resv_huge_pages,
2083                         h->surplus_huge_pages,
2084                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2085 }
2086
2087 int hugetlb_report_node_meminfo(int nid, char *buf)
2088 {
2089         struct hstate *h = &default_hstate;
2090         return sprintf(buf,
2091                 "Node %d HugePages_Total: %5u\n"
2092                 "Node %d HugePages_Free:  %5u\n"
2093                 "Node %d HugePages_Surp:  %5u\n",
2094                 nid, h->nr_huge_pages_node[nid],
2095                 nid, h->free_huge_pages_node[nid],
2096                 nid, h->surplus_huge_pages_node[nid]);
2097 }
2098
2099 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2100 unsigned long hugetlb_total_pages(void)
2101 {
2102         struct hstate *h = &default_hstate;
2103         return h->nr_huge_pages * pages_per_huge_page(h);
2104 }
2105
2106 static int hugetlb_acct_memory(struct hstate *h, long delta)
2107 {
2108         int ret = -ENOMEM;
2109
2110         spin_lock(&hugetlb_lock);
2111         /*
2112          * When cpuset is configured, it breaks the strict hugetlb page
2113          * reservation as the accounting is done on a global variable. Such
2114          * reservation is completely rubbish in the presence of cpuset because
2115          * the reservation is not checked against page availability for the
2116          * current cpuset. Application can still potentially OOM'ed by kernel
2117          * with lack of free htlb page in cpuset that the task is in.
2118          * Attempt to enforce strict accounting with cpuset is almost
2119          * impossible (or too ugly) because cpuset is too fluid that
2120          * task or memory node can be dynamically moved between cpusets.
2121          *
2122          * The change of semantics for shared hugetlb mapping with cpuset is
2123          * undesirable. However, in order to preserve some of the semantics,
2124          * we fall back to check against current free page availability as
2125          * a best attempt and hopefully to minimize the impact of changing
2126          * semantics that cpuset has.
2127          */
2128         if (delta > 0) {
2129                 if (gather_surplus_pages(h, delta) < 0)
2130                         goto out;
2131
2132                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2133                         return_unused_surplus_pages(h, delta);
2134                         goto out;
2135                 }
2136         }
2137
2138         ret = 0;
2139         if (delta < 0)
2140                 return_unused_surplus_pages(h, (unsigned long) -delta);
2141
2142 out:
2143         spin_unlock(&hugetlb_lock);
2144         return ret;
2145 }
2146
2147 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2148 {
2149         struct resv_map *reservations = vma_resv_map(vma);
2150
2151         /*
2152          * This new VMA should share its siblings reservation map if present.
2153          * The VMA will only ever have a valid reservation map pointer where
2154          * it is being copied for another still existing VMA.  As that VMA
2155          * has a reference to the reservation map it cannot disappear until
2156          * after this open call completes.  It is therefore safe to take a
2157          * new reference here without additional locking.
2158          */
2159         if (reservations)
2160                 kref_get(&reservations->refs);
2161 }
2162
2163 static void resv_map_put(struct vm_area_struct *vma)
2164 {
2165         struct resv_map *reservations = vma_resv_map(vma);
2166
2167         if (!reservations)
2168                 return;
2169         kref_put(&reservations->refs, resv_map_release);
2170 }
2171
2172 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2173 {
2174         struct hstate *h = hstate_vma(vma);
2175         struct resv_map *reservations = vma_resv_map(vma);
2176         struct hugepage_subpool *spool = subpool_vma(vma);
2177         unsigned long reserve;
2178         unsigned long start;
2179         unsigned long end;
2180
2181         if (reservations) {
2182                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2183                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2184
2185                 reserve = (end - start) -
2186                         region_count(&reservations->regions, start, end);
2187
2188                 resv_map_put(vma);
2189
2190                 if (reserve) {
2191                         hugetlb_acct_memory(h, -reserve);
2192                         hugepage_subpool_put_pages(spool, reserve);
2193                 }
2194         }
2195 }
2196
2197 /*
2198  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2199  * handle_mm_fault() to try to instantiate regular-sized pages in the
2200  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2201  * this far.
2202  */
2203 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2204 {
2205         BUG();
2206         return 0;
2207 }
2208
2209 const struct vm_operations_struct hugetlb_vm_ops = {
2210         .fault = hugetlb_vm_op_fault,
2211         .open = hugetlb_vm_op_open,
2212         .close = hugetlb_vm_op_close,
2213 };
2214
2215 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2216                                 int writable)
2217 {
2218         pte_t entry;
2219
2220         if (writable) {
2221                 entry =
2222                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2223         } else {
2224                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2225         }
2226         entry = pte_mkyoung(entry);
2227         entry = pte_mkhuge(entry);
2228         entry = arch_make_huge_pte(entry, vma, page, writable);
2229
2230         return entry;
2231 }
2232
2233 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2234                                    unsigned long address, pte_t *ptep)
2235 {
2236         pte_t entry;
2237
2238         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2239         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2240                 update_mmu_cache(vma, address, ptep);
2241 }
2242
2243
2244 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2245                             struct vm_area_struct *vma)
2246 {
2247         pte_t *src_pte, *dst_pte, entry;
2248         struct page *ptepage;
2249         unsigned long addr;
2250         int cow;
2251         struct hstate *h = hstate_vma(vma);
2252         unsigned long sz = huge_page_size(h);
2253
2254         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2255
2256         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2257                 src_pte = huge_pte_offset(src, addr);
2258                 if (!src_pte)
2259                         continue;
2260                 dst_pte = huge_pte_alloc(dst, addr, sz);
2261                 if (!dst_pte)
2262                         goto nomem;
2263
2264                 /* If the pagetables are shared don't copy or take references */
2265                 if (dst_pte == src_pte)
2266                         continue;
2267
2268                 spin_lock(&dst->page_table_lock);
2269                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2270                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2271                         if (cow)
2272                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2273                         entry = huge_ptep_get(src_pte);
2274                         ptepage = pte_page(entry);
2275                         get_page(ptepage);
2276                         page_dup_rmap(ptepage);
2277                         set_huge_pte_at(dst, addr, dst_pte, entry);
2278                 }
2279                 spin_unlock(&src->page_table_lock);
2280                 spin_unlock(&dst->page_table_lock);
2281         }
2282         return 0;
2283
2284 nomem:
2285         return -ENOMEM;
2286 }
2287
2288 static int is_hugetlb_entry_migration(pte_t pte)
2289 {
2290         swp_entry_t swp;
2291
2292         if (huge_pte_none(pte) || pte_present(pte))
2293                 return 0;
2294         swp = pte_to_swp_entry(pte);
2295         if (non_swap_entry(swp) && is_migration_entry(swp))
2296                 return 1;
2297         else
2298                 return 0;
2299 }
2300
2301 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2302 {
2303         swp_entry_t swp;
2304
2305         if (huge_pte_none(pte) || pte_present(pte))
2306                 return 0;
2307         swp = pte_to_swp_entry(pte);
2308         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2309                 return 1;
2310         else
2311                 return 0;
2312 }
2313
2314 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2315                             unsigned long start, unsigned long end,
2316                             struct page *ref_page)
2317 {
2318         int force_flush = 0;
2319         struct mm_struct *mm = vma->vm_mm;
2320         unsigned long address;
2321         pte_t *ptep;
2322         pte_t pte;
2323         struct page *page;
2324         struct hstate *h = hstate_vma(vma);
2325         unsigned long sz = huge_page_size(h);
2326
2327         WARN_ON(!is_vm_hugetlb_page(vma));
2328         BUG_ON(start & ~huge_page_mask(h));
2329         BUG_ON(end & ~huge_page_mask(h));
2330
2331         tlb_start_vma(tlb, vma);
2332         mmu_notifier_invalidate_range_start(mm, start, end);
2333 again:
2334         spin_lock(&mm->page_table_lock);
2335         for (address = start; address < end; address += sz) {
2336                 ptep = huge_pte_offset(mm, address);
2337                 if (!ptep)
2338                         continue;
2339
2340                 if (huge_pmd_unshare(mm, &address, ptep))
2341                         continue;
2342
2343                 pte = huge_ptep_get(ptep);
2344                 if (huge_pte_none(pte))
2345                         continue;
2346
2347                 /*
2348                  * HWPoisoned hugepage is already unmapped and dropped reference
2349                  */
2350                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2351                         continue;
2352
2353                 page = pte_page(pte);
2354                 /*
2355                  * If a reference page is supplied, it is because a specific
2356                  * page is being unmapped, not a range. Ensure the page we
2357                  * are about to unmap is the actual page of interest.
2358                  */
2359                 if (ref_page) {
2360                         if (page != ref_page)
2361                                 continue;
2362
2363                         /*
2364                          * Mark the VMA as having unmapped its page so that
2365                          * future faults in this VMA will fail rather than
2366                          * looking like data was lost
2367                          */
2368                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2369                 }
2370
2371                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2372                 tlb_remove_tlb_entry(tlb, ptep, address);
2373                 if (pte_dirty(pte))
2374                         set_page_dirty(page);
2375
2376                 page_remove_rmap(page);
2377                 force_flush = !__tlb_remove_page(tlb, page);
2378                 if (force_flush)
2379                         break;
2380                 /* Bail out after unmapping reference page if supplied */
2381                 if (ref_page)
2382                         break;
2383         }
2384         spin_unlock(&mm->page_table_lock);
2385         /*
2386          * mmu_gather ran out of room to batch pages, we break out of
2387          * the PTE lock to avoid doing the potential expensive TLB invalidate
2388          * and page-free while holding it.
2389          */
2390         if (force_flush) {
2391                 force_flush = 0;
2392                 tlb_flush_mmu(tlb);
2393                 if (address < end && !ref_page)
2394                         goto again;
2395         }
2396         mmu_notifier_invalidate_range_end(mm, start, end);
2397         tlb_end_vma(tlb, vma);
2398 }
2399
2400 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2401                           unsigned long end, struct page *ref_page)
2402 {
2403         struct mm_struct *mm;
2404         struct mmu_gather tlb;
2405
2406         mm = vma->vm_mm;
2407
2408         tlb_gather_mmu(&tlb, mm, 0);
2409         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2410         tlb_finish_mmu(&tlb, start, end);
2411 }
2412
2413 /*
2414  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2415  * mappping it owns the reserve page for. The intention is to unmap the page
2416  * from other VMAs and let the children be SIGKILLed if they are faulting the
2417  * same region.
2418  */
2419 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2420                                 struct page *page, unsigned long address)
2421 {
2422         struct hstate *h = hstate_vma(vma);
2423         struct vm_area_struct *iter_vma;
2424         struct address_space *mapping;
2425         struct prio_tree_iter iter;
2426         pgoff_t pgoff;
2427
2428         /*
2429          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2430          * from page cache lookup which is in HPAGE_SIZE units.
2431          */
2432         address = address & huge_page_mask(h);
2433         pgoff = vma_hugecache_offset(h, vma, address);
2434         mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2435
2436         /*
2437          * Take the mapping lock for the duration of the table walk. As
2438          * this mapping should be shared between all the VMAs,
2439          * __unmap_hugepage_range() is called as the lock is already held
2440          */
2441         mutex_lock(&mapping->i_mmap_mutex);
2442         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2443                 /* Do not unmap the current VMA */
2444                 if (iter_vma == vma)
2445                         continue;
2446
2447                 /*
2448                  * Unmap the page from other VMAs without their own reserves.
2449                  * They get marked to be SIGKILLed if they fault in these
2450                  * areas. This is because a future no-page fault on this VMA
2451                  * could insert a zeroed page instead of the data existing
2452                  * from the time of fork. This would look like data corruption
2453                  */
2454                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2455                         unmap_hugepage_range(iter_vma, address,
2456                                              address + huge_page_size(h), page);
2457         }
2458         mutex_unlock(&mapping->i_mmap_mutex);
2459
2460         return 1;
2461 }
2462
2463 /*
2464  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2465  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2466  * cannot race with other handlers or page migration.
2467  * Keep the pte_same checks anyway to make transition from the mutex easier.
2468  */
2469 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2470                         unsigned long address, pte_t *ptep, pte_t pte,
2471                         struct page *pagecache_page)
2472 {
2473         struct hstate *h = hstate_vma(vma);
2474         struct page *old_page, *new_page;
2475         int avoidcopy;
2476         int outside_reserve = 0;
2477
2478         old_page = pte_page(pte);
2479
2480 retry_avoidcopy:
2481         /* If no-one else is actually using this page, avoid the copy
2482          * and just make the page writable */
2483         avoidcopy = (page_mapcount(old_page) == 1);
2484         if (avoidcopy) {
2485                 if (PageAnon(old_page))
2486                         page_move_anon_rmap(old_page, vma, address);
2487                 set_huge_ptep_writable(vma, address, ptep);
2488                 return 0;
2489         }
2490
2491         /*
2492          * If the process that created a MAP_PRIVATE mapping is about to
2493          * perform a COW due to a shared page count, attempt to satisfy
2494          * the allocation without using the existing reserves. The pagecache
2495          * page is used to determine if the reserve at this address was
2496          * consumed or not. If reserves were used, a partial faulted mapping
2497          * at the time of fork() could consume its reserves on COW instead
2498          * of the full address range.
2499          */
2500         if (!(vma->vm_flags & VM_MAYSHARE) &&
2501                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2502                         old_page != pagecache_page)
2503                 outside_reserve = 1;
2504
2505         page_cache_get(old_page);
2506
2507         /* Drop page_table_lock as buddy allocator may be called */
2508         spin_unlock(&mm->page_table_lock);
2509         new_page = alloc_huge_page(vma, address, outside_reserve);
2510
2511         if (IS_ERR(new_page)) {
2512                 long err = PTR_ERR(new_page);
2513                 page_cache_release(old_page);
2514
2515                 /*
2516                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2517                  * it is due to references held by a child and an insufficient
2518                  * huge page pool. To guarantee the original mappers
2519                  * reliability, unmap the page from child processes. The child
2520                  * may get SIGKILLed if it later faults.
2521                  */
2522                 if (outside_reserve) {
2523                         BUG_ON(huge_pte_none(pte));
2524                         if (unmap_ref_private(mm, vma, old_page, address)) {
2525                                 BUG_ON(huge_pte_none(pte));
2526                                 spin_lock(&mm->page_table_lock);
2527                                 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2528                                 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2529                                         goto retry_avoidcopy;
2530                                 /*
2531                                  * race occurs while re-acquiring page_table_lock, and
2532                                  * our job is done.
2533                                  */
2534                                 return 0;
2535                         }
2536                         WARN_ON_ONCE(1);
2537                 }
2538
2539                 /* Caller expects lock to be held */
2540                 spin_lock(&mm->page_table_lock);
2541                 if (err == -ENOMEM)
2542                         return VM_FAULT_OOM;
2543                 else
2544                         return VM_FAULT_SIGBUS;
2545         }
2546
2547         /*
2548          * When the original hugepage is shared one, it does not have
2549          * anon_vma prepared.
2550          */
2551         if (unlikely(anon_vma_prepare(vma))) {
2552                 page_cache_release(new_page);
2553                 page_cache_release(old_page);
2554                 /* Caller expects lock to be held */
2555                 spin_lock(&mm->page_table_lock);
2556                 return VM_FAULT_OOM;
2557         }
2558
2559         copy_user_huge_page(new_page, old_page, address, vma,
2560                             pages_per_huge_page(h));
2561         __SetPageUptodate(new_page);
2562
2563         /*
2564          * Retake the page_table_lock to check for racing updates
2565          * before the page tables are altered
2566          */
2567         spin_lock(&mm->page_table_lock);
2568         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2569         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2570                 /* Break COW */
2571                 mmu_notifier_invalidate_range_start(mm,
2572                         address & huge_page_mask(h),
2573                         (address & huge_page_mask(h)) + huge_page_size(h));
2574                 huge_ptep_clear_flush(vma, address, ptep);
2575                 set_huge_pte_at(mm, address, ptep,
2576                                 make_huge_pte(vma, new_page, 1));
2577                 page_remove_rmap(old_page);
2578                 hugepage_add_new_anon_rmap(new_page, vma, address);
2579                 /* Make the old page be freed below */
2580                 new_page = old_page;
2581                 mmu_notifier_invalidate_range_end(mm,
2582                         address & huge_page_mask(h),
2583                         (address & huge_page_mask(h)) + huge_page_size(h));
2584         }
2585         page_cache_release(new_page);
2586         page_cache_release(old_page);
2587         return 0;
2588 }
2589
2590 /* Return the pagecache page at a given address within a VMA */
2591 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2592                         struct vm_area_struct *vma, unsigned long address)
2593 {
2594         struct address_space *mapping;
2595         pgoff_t idx;
2596
2597         mapping = vma->vm_file->f_mapping;
2598         idx = vma_hugecache_offset(h, vma, address);
2599
2600         return find_lock_page(mapping, idx);
2601 }
2602
2603 /*
2604  * Return whether there is a pagecache page to back given address within VMA.
2605  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2606  */
2607 static bool hugetlbfs_pagecache_present(struct hstate *h,
2608                         struct vm_area_struct *vma, unsigned long address)
2609 {
2610         struct address_space *mapping;
2611         pgoff_t idx;
2612         struct page *page;
2613
2614         mapping = vma->vm_file->f_mapping;
2615         idx = vma_hugecache_offset(h, vma, address);
2616
2617         page = find_get_page(mapping, idx);
2618         if (page)
2619                 put_page(page);
2620         return page != NULL;
2621 }
2622
2623 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2624                         unsigned long address, pte_t *ptep, unsigned int flags)
2625 {
2626         struct hstate *h = hstate_vma(vma);
2627         int ret = VM_FAULT_SIGBUS;
2628         int anon_rmap = 0;
2629         pgoff_t idx;
2630         unsigned long size;
2631         struct page *page;
2632         struct address_space *mapping;
2633         pte_t new_pte;
2634
2635         /*
2636          * Currently, we are forced to kill the process in the event the
2637          * original mapper has unmapped pages from the child due to a failed
2638          * COW. Warn that such a situation has occurred as it may not be obvious
2639          */
2640         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2641                 printk(KERN_WARNING
2642                         "PID %d killed due to inadequate hugepage pool\n",
2643                         current->pid);
2644                 return ret;
2645         }
2646
2647         mapping = vma->vm_file->f_mapping;
2648         idx = vma_hugecache_offset(h, vma, address);
2649
2650         /*
2651          * Use page lock to guard against racing truncation
2652          * before we get page_table_lock.
2653          */
2654 retry:
2655         page = find_lock_page(mapping, idx);
2656         if (!page) {
2657                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2658                 if (idx >= size)
2659                         goto out;
2660                 page = alloc_huge_page(vma, address, 0);
2661                 if (IS_ERR(page)) {
2662                         ret = PTR_ERR(page);
2663                         if (ret == -ENOMEM)
2664                                 ret = VM_FAULT_OOM;
2665                         else
2666                                 ret = VM_FAULT_SIGBUS;
2667                         goto out;
2668                 }
2669                 clear_huge_page(page, address, pages_per_huge_page(h));
2670                 __SetPageUptodate(page);
2671
2672                 if (vma->vm_flags & VM_MAYSHARE) {
2673                         int err;
2674                         struct inode *inode = mapping->host;
2675
2676                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2677                         if (err) {
2678                                 put_page(page);
2679                                 if (err == -EEXIST)
2680                                         goto retry;
2681                                 goto out;
2682                         }
2683
2684                         spin_lock(&inode->i_lock);
2685                         inode->i_blocks += blocks_per_huge_page(h);
2686                         spin_unlock(&inode->i_lock);
2687                 } else {
2688                         lock_page(page);
2689                         if (unlikely(anon_vma_prepare(vma))) {
2690                                 ret = VM_FAULT_OOM;
2691                                 goto backout_unlocked;
2692                         }
2693                         anon_rmap = 1;
2694                 }
2695         } else {
2696                 /*
2697                  * If memory error occurs between mmap() and fault, some process
2698                  * don't have hwpoisoned swap entry for errored virtual address.
2699                  * So we need to block hugepage fault by PG_hwpoison bit check.
2700                  */
2701                 if (unlikely(PageHWPoison(page))) {
2702                         ret = VM_FAULT_HWPOISON |
2703                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2704                         goto backout_unlocked;
2705                 }
2706         }
2707
2708         /*
2709          * If we are going to COW a private mapping later, we examine the
2710          * pending reservations for this page now. This will ensure that
2711          * any allocations necessary to record that reservation occur outside
2712          * the spinlock.
2713          */
2714         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2715                 if (vma_needs_reservation(h, vma, address) < 0) {
2716                         ret = VM_FAULT_OOM;
2717                         goto backout_unlocked;
2718                 }
2719
2720         spin_lock(&mm->page_table_lock);
2721         size = i_size_read(mapping->host) >> huge_page_shift(h);
2722         if (idx >= size)
2723                 goto backout;
2724
2725         ret = 0;
2726         if (!huge_pte_none(huge_ptep_get(ptep)))
2727                 goto backout;
2728
2729         if (anon_rmap)
2730                 hugepage_add_new_anon_rmap(page, vma, address);
2731         else
2732                 page_dup_rmap(page);
2733         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2734                                 && (vma->vm_flags & VM_SHARED)));
2735         set_huge_pte_at(mm, address, ptep, new_pte);
2736
2737         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2738                 /* Optimization, do the COW without a second fault */
2739                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2740         }
2741
2742         spin_unlock(&mm->page_table_lock);
2743         unlock_page(page);
2744 out:
2745         return ret;
2746
2747 backout:
2748         spin_unlock(&mm->page_table_lock);
2749 backout_unlocked:
2750         unlock_page(page);
2751         put_page(page);
2752         goto out;
2753 }
2754
2755 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2756                         unsigned long address, unsigned int flags)
2757 {
2758         pte_t *ptep;
2759         pte_t entry;
2760         int ret;
2761         struct page *page = NULL;
2762         struct page *pagecache_page = NULL;
2763         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2764         struct hstate *h = hstate_vma(vma);
2765
2766         address &= huge_page_mask(h);
2767
2768         ptep = huge_pte_offset(mm, address);
2769         if (ptep) {
2770                 entry = huge_ptep_get(ptep);
2771                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2772                         migration_entry_wait(mm, (pmd_t *)ptep, address);
2773                         return 0;
2774                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2775                         return VM_FAULT_HWPOISON_LARGE |
2776                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2777         }
2778
2779         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2780         if (!ptep)
2781                 return VM_FAULT_OOM;
2782
2783         /*
2784          * Serialize hugepage allocation and instantiation, so that we don't
2785          * get spurious allocation failures if two CPUs race to instantiate
2786          * the same page in the page cache.
2787          */
2788         mutex_lock(&hugetlb_instantiation_mutex);
2789         entry = huge_ptep_get(ptep);
2790         if (huge_pte_none(entry)) {
2791                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2792                 goto out_mutex;
2793         }
2794
2795         ret = 0;
2796
2797         /*
2798          * If we are going to COW the mapping later, we examine the pending
2799          * reservations for this page now. This will ensure that any
2800          * allocations necessary to record that reservation occur outside the
2801          * spinlock. For private mappings, we also lookup the pagecache
2802          * page now as it is used to determine if a reservation has been
2803          * consumed.
2804          */
2805         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2806                 if (vma_needs_reservation(h, vma, address) < 0) {
2807                         ret = VM_FAULT_OOM;
2808                         goto out_mutex;
2809                 }
2810
2811                 if (!(vma->vm_flags & VM_MAYSHARE))
2812                         pagecache_page = hugetlbfs_pagecache_page(h,
2813                                                                 vma, address);
2814         }
2815
2816         /*
2817          * hugetlb_cow() requires page locks of pte_page(entry) and
2818          * pagecache_page, so here we need take the former one
2819          * when page != pagecache_page or !pagecache_page.
2820          * Note that locking order is always pagecache_page -> page,
2821          * so no worry about deadlock.
2822          */
2823         page = pte_page(entry);
2824         get_page(page);
2825         if (page != pagecache_page)
2826                 lock_page(page);
2827
2828         spin_lock(&mm->page_table_lock);
2829         /* Check for a racing update before calling hugetlb_cow */
2830         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2831                 goto out_page_table_lock;
2832
2833
2834         if (flags & FAULT_FLAG_WRITE) {
2835                 if (!pte_write(entry)) {
2836                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2837                                                         pagecache_page);
2838                         goto out_page_table_lock;
2839                 }
2840                 entry = pte_mkdirty(entry);
2841         }
2842         entry = pte_mkyoung(entry);
2843         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2844                                                 flags & FAULT_FLAG_WRITE))
2845                 update_mmu_cache(vma, address, ptep);
2846
2847 out_page_table_lock:
2848         spin_unlock(&mm->page_table_lock);
2849
2850         if (pagecache_page) {
2851                 unlock_page(pagecache_page);
2852                 put_page(pagecache_page);
2853         }
2854         if (page != pagecache_page)
2855                 unlock_page(page);
2856         put_page(page);
2857
2858 out_mutex:
2859         mutex_unlock(&hugetlb_instantiation_mutex);
2860
2861         return ret;
2862 }
2863
2864 /* Can be overriden by architectures */
2865 __attribute__((weak)) struct page *
2866 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2867                pud_t *pud, int write)
2868 {
2869         BUG();
2870         return NULL;
2871 }
2872
2873 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2874                         struct page **pages, struct vm_area_struct **vmas,
2875                         unsigned long *position, int *length, int i,
2876                         unsigned int flags)
2877 {
2878         unsigned long pfn_offset;
2879         unsigned long vaddr = *position;
2880         int remainder = *length;
2881         struct hstate *h = hstate_vma(vma);
2882
2883         spin_lock(&mm->page_table_lock);
2884         while (vaddr < vma->vm_end && remainder) {
2885                 pte_t *pte;
2886                 int absent;
2887                 struct page *page;
2888
2889                 /*
2890                  * Some archs (sparc64, sh*) have multiple pte_ts to
2891                  * each hugepage.  We have to make sure we get the
2892                  * first, for the page indexing below to work.
2893                  */
2894                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2895                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2896
2897                 /*
2898                  * When coredumping, it suits get_dump_page if we just return
2899                  * an error where there's an empty slot with no huge pagecache
2900                  * to back it.  This way, we avoid allocating a hugepage, and
2901                  * the sparse dumpfile avoids allocating disk blocks, but its
2902                  * huge holes still show up with zeroes where they need to be.
2903                  */
2904                 if (absent && (flags & FOLL_DUMP) &&
2905                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2906                         remainder = 0;
2907                         break;
2908                 }
2909
2910                 if (absent ||
2911                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2912                         int ret;
2913
2914                         spin_unlock(&mm->page_table_lock);
2915                         ret = hugetlb_fault(mm, vma, vaddr,
2916                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2917                         spin_lock(&mm->page_table_lock);
2918                         if (!(ret & VM_FAULT_ERROR))
2919                                 continue;
2920
2921                         remainder = 0;
2922                         break;
2923                 }
2924
2925                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2926                 page = pte_page(huge_ptep_get(pte));
2927 same_page:
2928                 if (pages) {
2929                         pages[i] = mem_map_offset(page, pfn_offset);
2930                         get_page(pages[i]);
2931                 }
2932
2933                 if (vmas)
2934                         vmas[i] = vma;
2935
2936                 vaddr += PAGE_SIZE;
2937                 ++pfn_offset;
2938                 --remainder;
2939                 ++i;
2940                 if (vaddr < vma->vm_end && remainder &&
2941                                 pfn_offset < pages_per_huge_page(h)) {
2942                         /*
2943                          * We use pfn_offset to avoid touching the pageframes
2944                          * of this compound page.
2945                          */
2946                         goto same_page;
2947                 }
2948         }
2949         spin_unlock(&mm->page_table_lock);
2950         *length = remainder;
2951         *position = vaddr;
2952
2953         return i ? i : -EFAULT;
2954 }
2955
2956 void hugetlb_change_protection(struct vm_area_struct *vma,
2957                 unsigned long address, unsigned long end, pgprot_t newprot)
2958 {
2959         struct mm_struct *mm = vma->vm_mm;
2960         unsigned long start = address;
2961         pte_t *ptep;
2962         pte_t pte;
2963         struct hstate *h = hstate_vma(vma);
2964
2965         BUG_ON(address >= end);
2966         flush_cache_range(vma, address, end);
2967
2968         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2969         spin_lock(&mm->page_table_lock);
2970         for (; address < end; address += huge_page_size(h)) {
2971                 ptep = huge_pte_offset(mm, address);
2972                 if (!ptep)
2973                         continue;
2974                 if (huge_pmd_unshare(mm, &address, ptep))
2975                         continue;
2976                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2977                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2978                         pte = pte_mkhuge(pte_modify(pte, newprot));
2979                         set_huge_pte_at(mm, address, ptep, pte);
2980                 }
2981         }
2982         spin_unlock(&mm->page_table_lock);
2983         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2984
2985         flush_tlb_range(vma, start, end);
2986 }
2987
2988 int hugetlb_reserve_pages(struct inode *inode,
2989                                         long from, long to,
2990                                         struct vm_area_struct *vma,
2991                                         vm_flags_t vm_flags)
2992 {
2993         long ret, chg;
2994         struct hstate *h = hstate_inode(inode);
2995         struct hugepage_subpool *spool = subpool_inode(inode);
2996
2997         /*
2998          * Only apply hugepage reservation if asked. At fault time, an
2999          * attempt will be made for VM_NORESERVE to allocate a page
3000          * without using reserves
3001          */
3002         if (vm_flags & VM_NORESERVE)
3003                 return 0;
3004
3005         /*
3006          * Shared mappings base their reservation on the number of pages that
3007          * are already allocated on behalf of the file. Private mappings need
3008          * to reserve the full area even if read-only as mprotect() may be
3009          * called to make the mapping read-write. Assume !vma is a shm mapping
3010          */
3011         if (!vma || vma->vm_flags & VM_MAYSHARE)
3012                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3013         else {
3014                 struct resv_map *resv_map = resv_map_alloc();
3015                 if (!resv_map)
3016                         return -ENOMEM;
3017
3018                 chg = to - from;
3019
3020                 set_vma_resv_map(vma, resv_map);
3021                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3022         }
3023
3024         if (chg < 0) {
3025                 ret = chg;
3026                 goto out_err;
3027         }
3028
3029         /* There must be enough pages in the subpool for the mapping */
3030         if (hugepage_subpool_get_pages(spool, chg)) {
3031                 ret = -ENOSPC;
3032                 goto out_err;
3033         }
3034
3035         /*
3036          * Check enough hugepages are available for the reservation.
3037          * Hand the pages back to the subpool if there are not
3038          */
3039         ret = hugetlb_acct_memory(h, chg);
3040         if (ret < 0) {
3041                 hugepage_subpool_put_pages(spool, chg);
3042                 goto out_err;
3043         }
3044
3045         /*
3046          * Account for the reservations made. Shared mappings record regions
3047          * that have reservations as they are shared by multiple VMAs.
3048          * When the last VMA disappears, the region map says how much
3049          * the reservation was and the page cache tells how much of
3050          * the reservation was consumed. Private mappings are per-VMA and
3051          * only the consumed reservations are tracked. When the VMA
3052          * disappears, the original reservation is the VMA size and the
3053          * consumed reservations are stored in the map. Hence, nothing
3054          * else has to be done for private mappings here
3055          */
3056         if (!vma || vma->vm_flags & VM_MAYSHARE)
3057                 region_add(&inode->i_mapping->private_list, from, to);
3058         return 0;
3059 out_err:
3060         if (vma)
3061                 resv_map_put(vma);
3062         return ret;
3063 }
3064
3065 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3066 {
3067         struct hstate *h = hstate_inode(inode);
3068         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3069         struct hugepage_subpool *spool = subpool_inode(inode);
3070
3071         spin_lock(&inode->i_lock);
3072         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3073         spin_unlock(&inode->i_lock);
3074
3075         hugepage_subpool_put_pages(spool, (chg - freed));
3076         hugetlb_acct_memory(h, -(chg - freed));
3077 }
3078
3079 #ifdef CONFIG_MEMORY_FAILURE
3080
3081 /* Should be called in hugetlb_lock */
3082 static int is_hugepage_on_freelist(struct page *hpage)
3083 {
3084         struct page *page;
3085         struct page *tmp;
3086         struct hstate *h = page_hstate(hpage);
3087         int nid = page_to_nid(hpage);
3088
3089         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3090                 if (page == hpage)
3091                         return 1;
3092         return 0;
3093 }
3094
3095 /*
3096  * This function is called from memory failure code.
3097  * Assume the caller holds page lock of the head page.
3098  */
3099 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3100 {
3101         struct hstate *h = page_hstate(hpage);
3102         int nid = page_to_nid(hpage);
3103         int ret = -EBUSY;
3104
3105         spin_lock(&hugetlb_lock);
3106         if (is_hugepage_on_freelist(hpage)) {
3107                 list_del(&hpage->lru);
3108                 set_page_refcounted(hpage);
3109                 h->free_huge_pages--;
3110                 h->free_huge_pages_node[nid]--;
3111                 ret = 0;
3112         }
3113         spin_unlock(&hugetlb_lock);
3114         return ret;
3115 }
3116 #endif