summaryrefslogtreecommitdiff
path: root/mm/cma.c
diff options
context:
space:
mode:
authorKalesh Singh <kaleshsingh@google.com>2024-01-09 17:22:33 -0800
committerAndrew Morton <akpm@linux-foundation.org>2024-02-21 16:00:01 -0800
commit51ae3f4ac5e94334ac6078145a02bc8f30e8528b (patch)
tree6c7210cc29de716f6bd18330867a291583382648 /mm/cma.c
parent2597c9947b0174fcc71bdd7ab6cb49c2b4291e95 (diff)
mm/cma: fix placement of trace_cma_alloc_start/finish
The current placement of trace_cma_alloc_start/finish misses the fail cases: !cma || !cma->count || !cma->bitmap. trace_cma_alloc_finish is also not emitted for the failure case where bitmap_count > bitmap_maxno. Fix these missed cases by moving the start event before the failure checks and moving the finish event to the out label. Link: https://lkml.kernel.org/r/20240110012234.3793639-1-kaleshsingh@google.com Fixes: 7bc1aec5e287 ("mm: cma: add trace events for CMA alloc perf testing") Signed-off-by: Kalesh Singh <kaleshsingh@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Liam Mark <lmark@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 7c09c47e530b..e12cf41d8354 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -436,6 +436,9 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
unsigned long i;
struct page *page = NULL;
int ret = -ENOMEM;
+ const char *name = cma ? cma->name : NULL;
+
+ trace_cma_alloc_start(name, count, align);
if (!cma || !cma->count || !cma->bitmap)
goto out;
@@ -446,8 +449,6 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
if (!count)
goto out;
- trace_cma_alloc_start(cma->name, count, align);
-
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
@@ -496,8 +497,6 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
start = bitmap_no + mask + 1;
}
- trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
-
/*
* CMA can allocate multiple page blocks, which results in different
* blocks being marked with different tags. Reset the tags to ignore
@@ -516,6 +515,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
pr_debug("%s(): returned %p\n", __func__, page);
out:
+ trace_cma_alloc_finish(name, pfn, page, count, align, ret);
if (page) {
count_vm_event(CMA_ALLOC_SUCCESS);
cma_sysfs_account_success_pages(cma, count);