mm, hugetlb: convert hugetlbfs to use split pmd lock
[cascardo/linux.git] / mm / huge_memory.c
index cca80d9..c2082ab 100644 (file)
 #include "internal.h"
 
 /*
- * By default transparent hugepage support is enabled for all mappings
- * and khugepaged scans all mappings. Defrag is only invoked by
- * khugepaged hugepage allocations and by page faults inside
- * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
- * allocations.
+ * By default transparent hugepage support is disabled in order that avoid
+ * to risk increase the memory footprint of applications without a guaranteed
+ * benefit. When transparent hugepage support is enabled, is for all mappings,
+ * and khugepaged scans all mappings.
+ * Defrag is invoked by khugepaged hugepage allocations and by page faults
+ * for all hugepage allocations.
  */
 unsigned long transparent_hugepage_flags __read_mostly =
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
@@ -737,7 +738,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                set_pmd_at(mm, haddr, pmd, entry);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
-               mm->nr_ptes++;
+               atomic_long_inc(&mm->nr_ptes);
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -758,14 +759,6 @@ static inline struct page *alloc_hugepage_vma(int defrag,
                               HPAGE_PMD_ORDER, vma, haddr, nd);
 }
 
-#ifndef CONFIG_NUMA
-static inline struct page *alloc_hugepage(int defrag)
-{
-       return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
-                          HPAGE_PMD_ORDER);
-}
-#endif
-
 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
                struct page *zero_page)
@@ -778,7 +771,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
        entry = pmd_mkhuge(entry);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
-       mm->nr_ptes++;
+       atomic_long_inc(&mm->nr_ptes);
        return true;
 }
 
@@ -903,7 +896,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
-       dst_mm->nr_ptes++;
+       atomic_long_inc(&dst_mm->nr_ptes);
 
        ret = 0;
 out_unlock:
@@ -1282,19 +1275,32 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        unsigned long haddr = addr & HPAGE_PMD_MASK;
        int page_nid = -1, this_nid = numa_node_id();
-       int target_nid;
+       int target_nid, last_cpupid = -1;
        bool page_locked;
        bool migrated = false;
+       int flags = 0;
 
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp)))
                goto out_unlock;
 
        page = pmd_page(pmd);
+       BUG_ON(is_huge_zero_page(page));
        page_nid = page_to_nid(page);
+       last_cpupid = page_cpupid_last(page);
        count_vm_numa_event(NUMA_HINT_FAULTS);
-       if (page_nid == this_nid)
+       if (page_nid == this_nid) {
                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+               flags |= TNF_FAULT_LOCAL;
+       }
+
+       /*
+        * Avoid grouping on DSO/COW pages in specific and RO pages
+        * in general, RO pages shouldn't hurt as much anyway since
+        * they can be in shared cache state.
+        */
+       if (!pmd_write(pmd))
+               flags |= TNF_NO_GROUP;
 
        /*
         * Acquire the page lock to serialise THP migrations but avoid dropping
@@ -1325,7 +1331,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                lock_page(page);
        anon_vma = page_lock_anon_vma_read(page);
 
-       /* Confirm the PTE did not while locked */
+       /* Confirm the PMD did not change while page_table_lock was released */
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp))) {
                unlock_page(page);
@@ -1341,8 +1347,10 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_unlock(&mm->page_table_lock);
        migrated = migrate_misplaced_transhuge_page(mm, vma,
                                pmdp, pmd, addr, page, target_nid);
-       if (migrated)
+       if (migrated) {
+               flags |= TNF_MIGRATED;
                page_nid = target_nid;
+       }
 
        goto out;
 clear_pmdnuma:
@@ -1360,7 +1368,7 @@ out:
                page_unlock_anon_vma_read(anon_vma);
 
        if (page_nid != -1)
-               task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
+               task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
 
        return 0;
 }
@@ -1368,9 +1376,10 @@ out:
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
+       spinlock_t *ptl;
        int ret = 0;
 
-       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                struct page *page;
                pgtable_t pgtable;
                pmd_t orig_pmd;
@@ -1384,8 +1393,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
                pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
                if (is_huge_zero_pmd(orig_pmd)) {
-                       tlb->mm->nr_ptes--;
-                       spin_unlock(&tlb->mm->page_table_lock);
+                       atomic_long_dec(&tlb->mm->nr_ptes);
+                       spin_unlock(ptl);
                        put_huge_zero_page();
                } else {
                        page = pmd_page(orig_pmd);
@@ -1393,8 +1402,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        VM_BUG_ON(page_mapcount(page) < 0);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                        VM_BUG_ON(!PageHead(page));
-                       tlb->mm->nr_ptes--;
-                       spin_unlock(&tlb->mm->page_table_lock);
+                       atomic_long_dec(&tlb->mm->nr_ptes);
+                       spin_unlock(ptl);
                        tlb_remove_page(tlb, page);
                }
                pte_free(tlb->mm, pgtable);
@@ -1407,14 +1416,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end,
                unsigned char *vec)
 {
+       spinlock_t *ptl;
        int ret = 0;
 
-       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                /*
                 * All logical pages in the range are present
                 * if backed by a huge page.
                 */
-               spin_unlock(&vma->vm_mm->page_table_lock);
+               spin_unlock(ptl);
                memset(vec, 1, (end - addr) >> PAGE_SHIFT);
                ret = 1;
        }
@@ -1427,6 +1437,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
                  unsigned long new_addr, unsigned long old_end,
                  pmd_t *old_pmd, pmd_t *new_pmd)
 {
+       spinlock_t *old_ptl, *new_ptl;
        int ret = 0;
        pmd_t pmd;
 
@@ -1447,41 +1458,69 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
                goto out;
        }
 
-       ret = __pmd_trans_huge_lock(old_pmd, vma);
+       /*
+        * We don't have to worry about the ordering of src and dst
+        * ptlocks because exclusive mmap_sem prevents deadlock.
+        */
+       ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
        if (ret == 1) {
+               new_ptl = pmd_lockptr(mm, new_pmd);
+               if (new_ptl != old_ptl)
+                       spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
                pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
                VM_BUG_ON(!pmd_none(*new_pmd));
                set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
-               spin_unlock(&mm->page_table_lock);
+               if (new_ptl != old_ptl)
+                       spin_unlock(new_ptl);
+               spin_unlock(old_ptl);
        }
 out:
        return ret;
 }
 
+/*
+ * Returns
+ *  - 0 if PMD could not be locked
+ *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
+ *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
+ */
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, pgprot_t newprot, int prot_numa)
 {
        struct mm_struct *mm = vma->vm_mm;
+       spinlock_t *ptl;
        int ret = 0;
 
-       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+       if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                pmd_t entry;
-               entry = pmdp_get_and_clear(mm, addr, pmd);
+               ret = 1;
                if (!prot_numa) {
+                       entry = pmdp_get_and_clear(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
+                       ret = HPAGE_PMD_NR;
                        BUG_ON(pmd_write(entry));
                } else {
                        struct page *page = pmd_page(*pmd);
 
-                       /* only check non-shared pages */
-                       if (page_mapcount(page) == 1 &&
+                       /*
+                        * Do not trap faults against the zero page. The
+                        * read-only data is likely to be read-cached on the
+                        * local CPU cache and it is less useful to know about
+                        * local vs remote hits on the zero page.
+                        */
+                       if (!is_huge_zero_page(page) &&
                            !pmd_numa(*pmd)) {
+                               entry = pmdp_get_and_clear(mm, addr, pmd);
                                entry = pmd_mknuma(entry);
+                               ret = HPAGE_PMD_NR;
                        }
                }
-               set_pmd_at(mm, addr, pmd, entry);
-               spin_unlock(&vma->vm_mm->page_table_lock);
-               ret = 1;
+
+               /* Set PMD if cleared earlier */
+               if (ret == HPAGE_PMD_NR)
+                       set_pmd_at(mm, addr, pmd, entry);
+
+               spin_unlock(ptl);
        }
 
        return ret;
@@ -1494,12 +1533,13 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  * Note that if it returns 1, this routine returns without unlocking page
  * table locks. So callers must unlock them.
  */
-int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
+int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+               spinlock_t **ptl)
 {
-       spin_lock(&vma->vm_mm->page_table_lock);
+       *ptl = pmd_lock(vma->vm_mm, pmd);
        if (likely(pmd_trans_huge(*pmd))) {
                if (unlikely(pmd_trans_splitting(*pmd))) {
-                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       spin_unlock(*ptl);
                        wait_split_huge_page(vma->anon_vma, pmd);
                        return -1;
                } else {
@@ -1508,27 +1548,37 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
                        return 1;
                }
        }
-       spin_unlock(&vma->vm_mm->page_table_lock);
+       spin_unlock(*ptl);
        return 0;
 }
 
+/*
+ * This function returns whether a given @page is mapped onto the @address
+ * in the virtual space of @mm.
+ *
+ * When it's true, this function returns *pmd with holding the page table lock
+ * and passing it back to the caller via @ptl.
+ * If it's false, returns NULL without holding the page table lock.
+ */
 pmd_t *page_check_address_pmd(struct page *page,
                              struct mm_struct *mm,
                              unsigned long address,
-                             enum page_check_address_pmd_flag flag)
+                             enum page_check_address_pmd_flag flag,
+                             spinlock_t **ptl)
 {
-       pmd_t *pmd, *ret = NULL;
+       pmd_t *pmd;
 
        if (address & ~HPAGE_PMD_MASK)
-               goto out;
+               return NULL;
 
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
-               goto out;
+               return NULL;
+       *ptl = pmd_lock(mm, pmd);
        if (pmd_none(*pmd))
-               goto out;
+               goto unlock;
        if (pmd_page(*pmd) != page)
-               goto out;
+               goto unlock;
        /*
         * split_vma() may create temporary aliased mappings. There is
         * no risk as long as all huge pmd are found and have their
@@ -1538,14 +1588,15 @@ pmd_t *page_check_address_pmd(struct page *page,
         */
        if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
            pmd_trans_splitting(*pmd))
-               goto out;
+               goto unlock;
        if (pmd_trans_huge(*pmd)) {
                VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
                          !pmd_trans_splitting(*pmd));
-               ret = pmd;
+               return pmd;
        }
-out:
-       return ret;
+unlock:
+       spin_unlock(*ptl);
+       return NULL;
 }
 
 static int __split_huge_page_splitting(struct page *page,
@@ -1553,6 +1604,7 @@ static int __split_huge_page_splitting(struct page *page,
                                       unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
+       spinlock_t *ptl;
        pmd_t *pmd;
        int ret = 0;
        /* For mmu_notifiers */
@@ -1560,9 +1612,8 @@ static int __split_huge_page_splitting(struct page *page,
        const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
 
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-       spin_lock(&mm->page_table_lock);
        pmd = page_check_address_pmd(page, mm, address,
-                                    PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
+                       PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
        if (pmd) {
                /*
                 * We can't temporarily set the pmd to null in order
@@ -1573,8 +1624,8 @@ static int __split_huge_page_splitting(struct page *page,
                 */
                pmdp_splitting_flush(vma, address, pmd);
                ret = 1;
+               spin_unlock(ptl);
        }
-       spin_unlock(&mm->page_table_lock);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        return ret;
@@ -1662,7 +1713,7 @@ static void __split_huge_page_refcount(struct page *page,
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
-               page_nid_xchg_last(page_tail, page_nid_last(page));
+               page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
@@ -1705,14 +1756,14 @@ static int __split_huge_page_map(struct page *page,
                                 unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
+       spinlock_t *ptl;
        pmd_t *pmd, _pmd;
        int ret = 0, i;
        pgtable_t pgtable;
        unsigned long haddr;
 
-       spin_lock(&mm->page_table_lock);
        pmd = page_check_address_pmd(page, mm, address,
-                                    PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
+                       PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
        if (pmd) {
                pgtable = pgtable_trans_huge_withdraw(mm, pmd);
                pmd_populate(mm, &_pmd, pgtable);
@@ -1767,8 +1818,8 @@ static int __split_huge_page_map(struct page *page,
                pmdp_invalidate(vma, address, pmd);
                pmd_populate(mm, pmd, pgtable);
                ret = 1;
+               spin_unlock(ptl);
        }
-       spin_unlock(&mm->page_table_lock);
 
        return ret;
 }
@@ -2165,7 +2216,34 @@ static void khugepaged_alloc_sleep(void)
                        msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
+static int khugepaged_node_load[MAX_NUMNODES];
+
 #ifdef CONFIG_NUMA
+static int khugepaged_find_target_node(void)
+{
+       static int last_khugepaged_target_node = NUMA_NO_NODE;
+       int nid, target_node = 0, max_value = 0;
+
+       /* find first node with max normal pages hit */
+       for (nid = 0; nid < MAX_NUMNODES; nid++)
+               if (khugepaged_node_load[nid] > max_value) {
+                       max_value = khugepaged_node_load[nid];
+                       target_node = nid;
+               }
+
+       /* do some balance if several nodes have the same hit record */
+       if (target_node <= last_khugepaged_target_node)
+               for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
+                               nid++)
+                       if (max_value == khugepaged_node_load[nid]) {
+                               target_node = nid;
+                               break;
+                       }
+
+       last_khugepaged_target_node = target_node;
+       return target_node;
+}
+
 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 {
        if (IS_ERR(*hpage)) {
@@ -2199,9 +2277,8 @@ static struct page
         * mmap_sem in read mode is good idea also to allow greater
         * scalability.
         */
-       *hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
-                                     node, __GFP_OTHER_NODE);
-
+       *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
+               khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
        /*
         * After allocating the hugepage, release the mmap_sem read lock in
         * preparation for taking it in write mode.
@@ -2217,6 +2294,17 @@ static struct page
        return *hpage;
 }
 #else
+static int khugepaged_find_target_node(void)
+{
+       return 0;
+}
+
+static inline struct page *alloc_hugepage(int defrag)
+{
+       return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
+                          HPAGE_PMD_ORDER);
+}
+
 static struct page *khugepaged_alloc_hugepage(bool *wait)
 {
        struct page *hpage;
@@ -2423,6 +2511,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        if (pmd_trans_huge(*pmd))
                goto out;
 
+       memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, _address += PAGE_SIZE) {
@@ -2439,12 +2528,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                if (unlikely(!page))
                        goto out_unmap;
                /*
-                * Chose the node of the first page. This could
-                * be more sophisticated and look at more pages,
-                * but isn't for now.
+                * Record which node the original page is from and save this
+                * information to khugepaged_node_load[].
+                * Khupaged will allocate hugepage from the node has the max
+                * hit record.
                 */
-               if (node == NUMA_NO_NODE)
-                       node = page_to_nid(page);
+               node = page_to_nid(page);
+               khugepaged_node_load[node]++;
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
@@ -2459,9 +2549,11 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                ret = 1;
 out_unmap:
        pte_unmap_unlock(pte, ptl);
-       if (ret)
+       if (ret) {
+               node = khugepaged_find_target_node();
                /* collapse_huge_page will return with the mmap_sem released */
                collapse_huge_page(mm, address, hpage, vma, node);
+       }
 out:
        return ret;
 }