diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-07-29 13:00:57 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-08-26 12:42:02 +0200 |
commit | a435b9a14356587cf512ea6473368a579373c74c (patch) | |
tree | a32c367027487b27bed5d119d1ed24ba36b1c9c0 /include/linux/refcount.h | |
parent | 6eb6d05958f3208b3dd2b5311f1a6e68abdbe5d5 (diff) |
locking/refcount: Provide __refcount API to obtain the old value
David requested means to obtain the old/previous value from the
refcount API for tracing purposes.
Duplicate (most of) the API as __refcount*() with an additional
'int *' argument into which, if !NULL, the old value will be stored.
Requested-by: David Howells <dhowells@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20200729111120.GA2638@hirez.programming.kicks-ass.net
Diffstat (limited to 'include/linux/refcount.h')
-rw-r--r-- | include/linux/refcount.h | 65 |
1 files changed, 57 insertions, 8 deletions
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 0e3ee25eb156..7fabb1af18e0 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -165,7 +165,7 @@ static inline unsigned int refcount_read(const refcount_t *r) * * Return: false if the passed refcount is 0, true otherwise */ -static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) +static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) { int old = refcount_read(r); @@ -174,12 +174,20 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) break; } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); + if (oldp) + *oldp = old; + if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); return old; } +static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) +{ + return __refcount_add_not_zero(i, r, NULL); +} + /** * refcount_add - add a value to a refcount * @i: the value to add to the refcount @@ -196,16 +204,24 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ -static inline void refcount_add(int i, refcount_t *r) +static inline void __refcount_add(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_add_relaxed(i, &r->refs); + if (oldp) + *oldp = old; + if (unlikely(!old)) refcount_warn_saturate(r, REFCOUNT_ADD_UAF); else if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } +static inline void refcount_add(int i, refcount_t *r) +{ + __refcount_add(i, r, NULL); +} + /** * refcount_inc_not_zero - increment a refcount unless it is 0 * @r: the refcount to increment @@ -219,9 +235,14 @@ static inline void refcount_add(int i, refcount_t *r) * * Return: true if the increment was successful, false otherwise */ +static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero(1, r, oldp); +} + static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { - return refcount_add_not_zero(1, r); + return __refcount_inc_not_zero(r, NULL); } /** @@ -236,9 +257,14 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ +static inline void __refcount_inc(refcount_t *r, int *oldp) +{ + __refcount_add(1, r, oldp); +} + static inline void refcount_inc(refcount_t *r) { - refcount_add(1, r); + __refcount_inc(r, NULL); } /** @@ -261,10 +287,13 @@ static inline void refcount_inc(refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ -static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) +static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(i, &r->refs); + if (oldp) + *oldp = old; + if (old == i) { smp_acquire__after_ctrl_dep(); return true; @@ -276,6 +305,11 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) return false; } +static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) +{ + return __refcount_sub_and_test(i, r, NULL); +} + /** * refcount_dec_and_test - decrement a refcount and test if it is 0 * @r: the refcount @@ -289,9 +323,14 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ +static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) +{ + return __refcount_sub_and_test(1, r, oldp); +} + static inline __must_check bool refcount_dec_and_test(refcount_t *r) { - return refcount_sub_and_test(1, r); + return __refcount_dec_and_test(r, NULL); } /** @@ -304,12 +343,22 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r) * Provides release memory ordering, such that prior loads and stores are done * before. */ -static inline void refcount_dec(refcount_t *r) +static inline void __refcount_dec(refcount_t *r, int *oldp) { - if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1)) + int old = atomic_fetch_sub_release(1, &r->refs); + + if (oldp) + *oldp = old; + + if (unlikely(old <= 1)) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } +static inline void refcount_dec(refcount_t *r) +{ + __refcount_dec(r, NULL); +} + extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); |