diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 12:23:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 12:23:39 -0700 |
commit | de5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch) | |
tree | 3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /arch/alpha | |
parent | 1c594774283a7cfe6dc0f8ffdfb2dbfc497502c4 (diff) | |
parent | fd2efaa4eb5317c3a86357a83a7d456a1b86a0ac (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner:
"The locking, atomics and memory model brains delivered:
- A larger update to the atomics code which reworks the ordering
barriers, consolidates the atomic primitives, provides the new
atomic64_fetch_add_unless() primitive and cleans up the include
hell.
- Simplify cmpxchg() instrumentation and add instrumentation for
xchg() and cmpxchg_double().
- Updates to the memory model and documentation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
locking/atomics: Rework ordering barriers
locking/atomics: Instrument cmpxchg_double*()
locking/atomics: Instrument xchg()
locking/atomics: Simplify cmpxchg() instrumentation
locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation
tools/memory-model: Rename litmus tests to comply to norm7
tools/memory-model/Documentation: Fix typo, smb->smp
sched/Documentation: Update wake_up() & co. memory-barrier guarantees
locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
sched/core: Use smp_mb() in wake_woken_function()
tools/memory-model: Add informal LKMM documentation to MAINTAINERS
locking/atomics/Documentation: Describe atomic_set() as a write operation
tools/memory-model: Make scripts executable
tools/memory-model: Remove ACCESS_ONCE() from model
tools/memory-model: Remove ACCESS_ONCE() from recipes
locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example
MAINTAINERS: Add Daniel Lustig as an LKMM reviewer
tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name
tools/memory-model: Add litmus test for full multicopy atomicity
locking/refcount: Always allow checked forms
...
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/include/asm/atomic.h | 64 |
1 files changed, 20 insertions, 44 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 767bfdd42992..150a1c5d6a2c 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -18,11 +18,11 @@ * To ensure dependency ordering is preserved for the _relaxed and * _release atomics, an smp_read_barrier_depends() is unconditionally * inserted into the _relaxed variants, which are used to build the - * barriered versions. To avoid redundant back-to-back fences, we can - * define the _acquire and _fence versions explicitly. + * barriered versions. Avoid redundant back-to-back fences in the + * _acquire and _fence versions. */ -#define __atomic_op_acquire(op, args...) op##_relaxed(args) -#define __atomic_op_fence __atomic_op_release +#define __atomic_acquire_fence() +#define __atomic_post_full_fence() #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } @@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, new, old; smp_mb(); @@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) smp_mb(); return old; } - +#define atomic_fetch_add_unless atomic_fetch_add_unless /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns true iff @v was not @u. + * Returns the old value of @v. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { - long c, tmp; + long c, new, old; smp_mb(); __asm__ __volatile__( - "1: ldq_l %[tmp],%[mem]\n" - " cmpeq %[tmp],%[u],%[c]\n" - " addq %[tmp],%[a],%[tmp]\n" + "1: ldq_l %[old],%[mem]\n" + " cmpeq %[old],%[u],%[c]\n" + " addq %[old],%[a],%[new]\n" " bne %[c],2f\n" - " stq_c %[tmp],%[mem]\n" - " beq %[tmp],3f\n" + " stq_c %[new],%[mem]\n" + " beq %[new],3f\n" "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" - : [tmp] "=&r"(tmp), [c] "=&r"(c) + : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) : "memory"); smp_mb(); - return !c; + return old; } +#define atomic64_fetch_add_unless atomic64_fetch_add_unless /* * atomic64_dec_if_positive - decrement by 1 if old value positive @@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) smp_mb(); return old - 1; } - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) - -#define atomic_inc_return(v) atomic_add_return(1,(v)) -#define atomic64_inc_return(v) atomic64_add_return(1,(v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) - -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic64_inc(v) atomic64_add(1,(v)) - -#define atomic_dec(v) atomic_sub(1,(v)) -#define atomic64_dec(v) atomic64_sub(1,(v)) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif /* _ALPHA_ATOMIC_H */ |