diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 15:46:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-03 15:46:07 -0700 |
commit | ca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch) | |
tree | 883eb497642d98635817f9cf954ac98e043fb573 /include/linux/atomic.h | |
parent | 4c12ab7e5e2e892fa94df500f96001837918a281 (diff) | |
parent | d420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar:
"Main changes in this cycle are:
- Extend atomic primitives with coherent logic op primitives
(atomic_{or,and,xor}()) and deprecate the old partial APIs
(atomic_{set,clear}_mask())
The old ops were incoherent with incompatible signatures across
architectures and with incomplete support. Now every architecture
supports the primitives consistently (by Peter Zijlstra)
- Generic support for 'relaxed atomics':
- _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return()
- atomic_read_acquire()
- atomic_set_release()
This came out of porting qwrlock code to arm64 (by Will Deacon)
- Clean up the fragile static_key APIs that were causing repeat bugs,
by introducing a new one:
DEFINE_STATIC_KEY_TRUE(name);
DEFINE_STATIC_KEY_FALSE(name);
which define a key of different types with an initial true/false
value.
Then allow:
static_branch_likely()
static_branch_unlikely()
to take a key of either type and emit the right instruction for the
case. To be able to know the 'type' of the static key we encode it
in the jump entry (by Peter Zijlstra)
- Static key self-tests (by Jason Baron)
- qrwlock optimizations (by Waiman Long)
- small futex enhancements (by Davidlohr Bueso)
- ... and misc other changes"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits)
jump_label/x86: Work around asm build bug on older/backported GCCs
locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h
locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
locking/qrwlock: Implement queue_write_unlock() using smp_store_release()
locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition
locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'
locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication
locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations
locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic
locking/static_keys: Make verify_keys() static
jump label, locking/static_keys: Update docs
locking/static_keys: Provide a selftest
jump_label: Provide a self-test
s390/uaccess, locking/static_keys: employ static_branch_likely()
x86, tsc, locking/static_keys: Employ static_branch_likely()
locking/static_keys: Add selftest
locking/static_keys: Add a new static_key interface
locking/static_keys: Rework update logic
locking/static_keys: Add static_key_{en,dis}able() helpers
...
Diffstat (limited to 'include/linux/atomic.h')
-rw-r--r-- | include/linux/atomic.h | 361 |
1 files changed, 348 insertions, 13 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5b08a8540ecf..00a5763e850e 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -2,6 +2,329 @@ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H #include <asm/atomic.h> +#include <asm/barrier.h> + +/* + * Relaxed variants of xchg, cmpxchg and some atomic operations. + * + * We support four variants: + * + * - Fully ordered: The default implementation, no suffix required. + * - Acquire: Provides ACQUIRE semantics, _acquire suffix. + * - Release: Provides RELEASE semantics, _release suffix. + * - Relaxed: No ordering guarantees, _relaxed suffix. + * + * For compound atomics performing both a load and a store, ACQUIRE + * semantics apply only to the load and RELEASE semantics only to the + * store portion of the operation. Note that a failed cmpxchg_acquire + * does -not- imply any memory ordering constraints. + * + * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. + */ + +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* + * The idea here is to build acquire/release variants by adding explicit + * barriers on top of the relaxed variant. In the case where the relaxed + * variant is already fully ordered, no additional barriers are needed. + */ +#define __atomic_op_acquire(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ + smp_mb__after_atomic(); \ + __ret; \ +}) + +#define __atomic_op_release(op, args...) \ +({ \ + smp_mb__before_atomic(); \ + op##_relaxed(args); \ +}) + +#define __atomic_op_fence(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret; \ + smp_mb__before_atomic(); \ + __ret = op##_relaxed(args); \ + smp_mb__after_atomic(); \ + __ret; \ +}) + +/* atomic_add_return_relaxed */ +#ifndef atomic_add_return_relaxed +#define atomic_add_return_relaxed atomic_add_return +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return + +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +#define atomic_add_return_acquire(...) \ + __atomic_op_acquire(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return_release +#define atomic_add_return_release(...) \ + __atomic_op_release(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return +#define atomic_add_return(...) \ + __atomic_op_fence(atomic_add_return, __VA_ARGS__) +#endif +#endif /* atomic_add_return_relaxed */ + +/* atomic_sub_return_relaxed */ +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return + +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +#define atomic_sub_return_acquire(...) \ + __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return_release +#define atomic_sub_return_release(...) \ + __atomic_op_release(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return +#define atomic_sub_return(...) \ + __atomic_op_fence(atomic_sub_return, __VA_ARGS__) +#endif +#endif /* atomic_sub_return_relaxed */ + +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg + +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#endif +#endif /* atomic_xchg_relaxed */ + +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg + +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + +/* cmpxchg_relaxed */ +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg + +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif +#endif /* cmpxchg_relaxed */ + +/* cmpxchg64_relaxed */ +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 + +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif +#endif /* cmpxchg64_relaxed */ + +/* xchg_relaxed */ +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg + +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) +#endif +#endif /* xchg_relaxed */ /** * atomic_add_unless - add unless the number is already a given value @@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #endif +#ifndef atomic_andnot +static inline void atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#endif + +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_andnot(mask, v); +} + +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -static inline void atomic_or(int i, atomic_t *v) -{ - int old; - int new; - - do { - old = atomic_read(v); - new = old | i; - } while (atomic_cmpxchg(v, old, new) != old); -} -#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ - #include <asm-generic/atomic-long.h> #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif + +#ifndef atomic64_andnot +static inline void atomic64_andnot(long long i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#endif + #endif /* _LINUX_ATOMIC_H */ |