summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-04-29 23:25:27 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:32:16 +1000
commit3910a7f485e1222de2176ef5bf3a1d8de3152920 (patch)
tree66de016468eb80717dbe0fc4a3d9b0081031b98c /arch/powerpc/mm
parent4bece39b503c2340de154c82bf6d928e72d74d4e (diff)
downloadlinux-3910a7f485e1222de2176ef5bf3a1d8de3152920.tar.gz
powerpc/mm: Add pte_xchg() helper
We have five locations in 64-bit hash MMU code that do a cmpxchg() of a PTE. Currently doing it inline OK, but in a future patch we will be converting the PTEs to __be64 in some configs. In that case we will need casts at every cmpxchg() site in order to keep sparse happy. So move the logic into a helper, this is a reasonably nice cleanup on its own. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash64_4k.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
3 files changed, 7 insertions, 8 deletions
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 47d1b26effc6..dbd3133a8562 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -47,8 +47,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
- } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
- old_pte, new_pte));
+ } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
+
/*
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index b2d659cf51c6..6fbf983b3ae5 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -79,8 +79,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
- } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
- old_pte, new_pte));
+ } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
+
/*
* Handle the subpage protection bits
*/
@@ -254,8 +254,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
- } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
- old_pte, new_pte));
+ } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 8555fce902fe..9c0518761279 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -57,8 +57,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_RW)
new_pte |= _PAGE_DIRTY;
- } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
- old_pte, new_pte));
+ } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
+
rflags = htab_convert_pte_flags(new_pte);
sz = ((1UL) << shift);