powerpc/mm/book3s: Rename hash specific PTE bits to carry H_ prefix
[cascardo/linux.git] / arch / powerpc / mm / hugepage-hash64.c
index eb2accd..ba3fc22 100644 (file)
@@ -37,20 +37,20 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
 
                old_pmd = pmd_val(pmd);
                /* If PMD busy, retry the access */
-               if (unlikely(old_pmd & _PAGE_BUSY))
+               if (unlikely(old_pmd & H_PAGE_BUSY))
                        return 0;
                /* If PMD permissions don't match, take page fault */
-               if (unlikely(access & ~old_pmd))
+               if (unlikely(!check_pte_access(access, old_pmd)))
                        return 1;
                /*
                 * Try to lock the PTE, add ACCESSED and DIRTY if it was
                 * a write access
                 */
-               new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
-               if (access & _PAGE_RW)
+               new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_WRITE)
                        new_pmd |= _PAGE_DIRTY;
-       } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
-                                         old_pmd, new_pmd));
+       } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
+
        rflags = htab_convert_pte_flags(new_pmd);
 
 #if 0
@@ -78,7 +78,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                 * base page size. This is because demote_segment won't flush
                 * hash page table entries.
                 */
-               if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
+               if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
                        flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
                                            ssize, flags);
                        /*
@@ -125,7 +125,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                hash = hpt_hash(vpn, shift, ssize);
                /* insert new entry */
                pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
-               new_pmd |= _PAGE_HASHPTE;
+               new_pmd |= H_PAGE_HASHPTE;
 
 repeat:
                hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
@@ -169,17 +169,17 @@ repeat:
                mark_hpte_slot_valid(hpte_slot_array, index, slot);
        }
        /*
-        * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
+        * Mark the pte with H_PAGE_COMBO, if we are trying to hash it with
         * base page size 4k.
         */
        if (psize == MMU_PAGE_4K)
-               new_pmd |= _PAGE_COMBO;
+               new_pmd |= H_PAGE_COMBO;
        /*
         * The hpte valid is stored in the pgtable whose address is in the
         * second half of the PMD. Order this against clearing of the busy bit in
         * huge pmd.
         */
        smp_wmb();
-       *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+       *pmdp = __pmd(new_pmd & ~H_PAGE_BUSY);
        return 0;
 }