From 6da7539734d4e0f5caeea1e0efbb74d7b83db1ca Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Thu, 11 Jul 2013 07:42:14 -0700 Subject: alpha: Improve atomic_add_unless Use ll/sc loops instead of C loops around cmpxchg. Update the atomic64_add_unless block comment to match the code. Reviewed-and-Tested-by: Matt Turner Signed-off-by: Matt Turner Signed-off-by: Richard Henderson --- arch/alpha/include/asm/atomic.h | 60 +++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 23 deletions(-) (limited to 'arch/alpha') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index c2cbe4fc391c..0dc18fc4d925 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) */ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; + int c, new, old; + smp_mb(); + __asm__ __volatile__( + "1: ldl_l %[old],%[mem]\n" + " cmpeq %[old],%[u],%[c]\n" + " addl %[old],%[a],%[new]\n" + " bne %[c],2f\n" + " stl_c %[new],%[mem]\n" + " beq %[new],3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) + : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u) + : "memory"); + smp_mb(); + return old; } @@ -207,21 +214,28 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. + * Returns true iff @v was not @u. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); + long c, tmp; + smp_mb(); + __asm__ __volatile__( + "1: ldq_l %[tmp],%[mem]\n" + " cmpeq %[tmp],%[u],%[c]\n" + " addq %[tmp],%[a],%[tmp]\n" + " bne %[c],2f\n" + " stq_c %[tmp],%[mem]\n" + " beq %[tmp],3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : [tmp] "=&r"(tmp), [c] "=&r"(c) + : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) + : "memory"); + smp_mb(); + return !c; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -- cgit v1.2.1