diff options
| author | Dave Airlie <airlied@redhat.com> | 2024-11-04 14:25:33 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2024-11-04 14:25:33 +1000 |
| commit | 30169bb64580bd7bce9290c1952bf0aa6cc37fe5 (patch) | |
| tree | 94c6ab9dec68f5648a055752aad32d816bd27e11 /include/linux/alloc_tag.h | |
| parent | bcfe43f0ea77c42c2154fb79b99b7d1d82ac3231 (diff) | |
| parent | 59b723cd2adbac2a34fc8e12c74ae26ae45bf230 (diff) | |
Backmerge v6.12-rc6 of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next
Backmerge Linus tree for some drm-fixes needed for msm and xe merges.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'include/linux/alloc_tag.h')
| -rw-r--r-- | include/linux/alloc_tag.h | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 1f0a9ff23a2c..941deffc590d 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -135,18 +135,21 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {} #endif /* Caller should verify both ref and tag to be valid */ -static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) { alloc_tag_add_check(ref, tag); if (!ref || !tag) - return; + return false; ref->ct = &tag->ct; + return true; } -static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) { - __alloc_tag_ref_set(ref, tag); + if (unlikely(!__alloc_tag_ref_set(ref, tag))) + return false; + /* * We need in increment the call counter every time we have a new * allocation or when we split a large allocation into smaller ones. @@ -154,12 +157,13 @@ static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *t * counter because when we free each part the counter will be decremented. */ this_cpu_inc(tag->counters->calls); + return true; } static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) { - alloc_tag_ref_set(ref, tag); - this_cpu_add(tag->counters->bytes, bytes); + if (likely(alloc_tag_ref_set(ref, tag))) + this_cpu_add(tag->counters->bytes, bytes); } static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) |
