powerpc/mm: Fix vma_mmu_pagesize() for radix
[cascardo/linux.git] / arch / powerpc / mm / hugetlbpage.c
index 6dd272b..7d677ef 100644 (file)
@@ -413,13 +413,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 {
        struct hugepd_freelist **batchp;
 
-       batchp = this_cpu_ptr(&hugepd_freelist_cur);
+       batchp = &get_cpu_var(hugepd_freelist_cur);
 
        if (atomic_read(&tlb->mm->mm_users) < 2 ||
            cpumask_equal(mm_cpumask(tlb->mm),
                          cpumask_of(smp_processor_id()))) {
                kmem_cache_free(hugepte_cache, hugepte);
-        put_cpu_var(hugepd_freelist_cur);
+               put_cpu_var(hugepd_freelist_cur);
                return;
        }
 
@@ -719,14 +719,14 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 {
 #ifdef CONFIG_PPC_MM_SLICES
        unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
-
-       return 1UL << mmu_psize_to_shift(psize);
-#else
+       /* With radix we don't use slice, so derive it from vma*/
+       if (!radix_enabled())
+               return 1UL << mmu_psize_to_shift(psize);
+#endif
        if (!is_vm_hugetlb_page(vma))
                return PAGE_SIZE;
 
        return huge_page_size(hstate_vma(vma));
-#endif
 }
 
 static inline bool is_power_of_4(unsigned long x)
@@ -1003,9 +1003,9 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
                end = pte_end;
 
        pte = READ_ONCE(*ptep);
-       mask = _PAGE_PRESENT | _PAGE_USER;
+       mask = _PAGE_PRESENT | _PAGE_READ;
        if (write)
-               mask |= _PAGE_RW;
+               mask |= _PAGE_WRITE;
 
        if ((pte_val(pte) & mask) != mask)
                return 0;