Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / arch / powerpc / mm / hash_utils_64.c
index e303a6d..bde8b55 100644 (file)
@@ -807,7 +807,7 @@ void __init early_init_mmu(void)
 }
 
 #ifdef CONFIG_SMP
-void __cpuinit early_init_mmu_secondary(void)
+void early_init_mmu_secondary(void)
 {
        /* Initialize hash table for that CPU */
        if (!firmware_has_feature(FW_FEATURE_LPAR))
@@ -907,7 +907,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
 
        if (ea >= spt->maxaddr)
                return 0;
-       if (ea < 0x100000000) {
+       if (ea < 0x100000000UL) {
                /* addresses below 4GB use spt->low_prot */
                sbpm = spt->low_prot;
        } else {
@@ -1050,13 +1050,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                goto bail;
        }
 
-#ifdef CONFIG_HUGETLB_PAGE
        if (hugeshift) {
-               rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
-                                       ssize, hugeshift, psize);
+               if (pmd_trans_huge(*(pmd_t *)ptep))
+                       rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
+                                            trap, local, ssize, psize);
+#ifdef CONFIG_HUGETLB_PAGE
+               else
+                       rc = __hash_page_huge(ea, access, vsid, ptep, trap,
+                                             local, ssize, hugeshift, psize);
+#else
+               else {
+                       /*
+                        * if we have hugeshift, and is not transhuge with
+                        * hugetlb disabled, something is really wrong.
+                        */
+                       rc = 1;
+                       WARN_ON(1);
+               }
+#endif
                goto bail;
        }
-#endif /* CONFIG_HUGETLB_PAGE */
 
 #ifndef CONFIG_PPC_64K_PAGES
        DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
@@ -1145,6 +1158,7 @@ EXPORT_SYMBOL_GPL(hash_page);
 void hash_preload(struct mm_struct *mm, unsigned long ea,
                  unsigned long access, unsigned long trap)
 {
+       int hugepage_shift;
        unsigned long vsid;
        pgd_t *pgdir;
        pte_t *ptep;
@@ -1166,10 +1180,27 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        pgdir = mm->pgd;
        if (pgdir == NULL)
                return;
-       ptep = find_linux_pte(pgdir, ea);
-       if (!ptep)
+
+       /* Get VSID */
+       ssize = user_segment_size(ea);
+       vsid = get_vsid(mm->context.id, ea, ssize);
+       if (!vsid)
                return;
+       /*
+        * Hash doesn't like irqs. Walking linux page table with irq disabled
+        * saves us from holding multiple locks.
+        */
+       local_irq_save(flags);
+
+       /*
+        * THP pages use update_mmu_cache_pmd. We don't do
+        * hash preload there. Hence can ignore THP here
+        */
+       ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
+       if (!ptep)
+               goto out_exit;
 
+       WARN_ON(hugepage_shift);
 #ifdef CONFIG_PPC_64K_PAGES
        /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
         * a 64K kernel), then we don't preload, hash_page() will take
@@ -1178,18 +1209,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
         * page size demotion here
         */
        if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
-               return;
+               goto out_exit;
 #endif /* CONFIG_PPC_64K_PAGES */
 
-       /* Get VSID */
-       ssize = user_segment_size(ea);
-       vsid = get_vsid(mm->context.id, ea, ssize);
-       if (!vsid)
-               return;
-
-       /* Hash doesn't like irqs */
-       local_irq_save(flags);
-
        /* Is that local to this CPU ? */
        if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
                local = 1;
@@ -1211,7 +1233,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
                                   mm->context.user_psize,
                                   mm->context.user_psize,
                                   pte_val(*ptep));
-
+out_exit:
        local_irq_restore(flags);
 }
 
@@ -1232,7 +1254,11 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
                slot += hidx & _PTEIDX_GROUP_IX;
                DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
-               ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
+               /*
+                * We use same base page size and actual psize, because we don't
+                * use these functions for hugepage
+                */
+               ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
        } pte_iterate_hashed_end();
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1365,7 +1391,8 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
                hash = ~hash;
        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
        slot += hidx & _PTEIDX_GROUP_IX;
-       ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
+       ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
+                              mmu_kernel_ssize, 0);
 }
 
 void kernel_map_pages(struct page *page, int numpages, int enable)