diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2016-04-29 23:25:27 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-05-01 18:32:16 +1000 |
commit | 3910a7f485e1222de2176ef5bf3a1d8de3152920 (patch) | |
tree | 66de016468eb80717dbe0fc4a3d9b0081031b98c /arch/powerpc/mm/hash64_64k.c | |
parent | 4bece39b503c2340de154c82bf6d928e72d74d4e (diff) | |
download | linux-3910a7f485e1222de2176ef5bf3a1d8de3152920.tar.gz |
powerpc/mm: Add pte_xchg() helper
We have five locations in 64-bit hash MMU code that do a cmpxchg() of a
PTE. Currently doing it inline OK, but in a future patch we will be
converting the PTEs to __be64 in some configs. In that case we will need
casts at every cmpxchg() site in order to keep sparse happy.
So move the logic into a helper, this is a reasonably nice cleanup on
its own.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/hash64_64k.c')
-rw-r--r-- | arch/powerpc/mm/hash64_64k.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index b2d659cf51c6..6fbf983b3ae5 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -79,8 +79,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO; if (access & _PAGE_RW) new_pte |= _PAGE_DIRTY; - } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, - old_pte, new_pte)); + } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); + /* * Handle the subpage protection bits */ @@ -254,8 +254,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_RW) new_pte |= _PAGE_DIRTY; - } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, - old_pte, new_pte)); + } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); rflags = htab_convert_pte_flags(new_pte); |