From df9cd3cbf209b9357f2939ba63afa427026e954b Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Sat, 24 Jun 2023 14:12:14 +0900 Subject: zsmalloc: do not scan for allocated objects in empty zspage Patch series "zsmalloc: small compaction improvements", v2. A tiny series that can reduce the number of find_alloced_obj() invocations (which perform a linear scan of sub-page) during compaction. Inspired by Alexey Romanov's findings. This patch (of 3): zspage migration can terminate as soon as it moves the last allocated object from the source zspage. Add a simple helper zspage_empty() that tests zspage ->inuse on each migration iteration. Link: https://lkml.kernel.org/r/20230624053120.643409-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Suggested-by: Alexey Romanov Reviewed-by: Alexey Romanov Acked-by: Minchan Kim Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'mm/zsmalloc.c') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 3f057970504e..5d60eaedc3b7 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1147,6 +1147,11 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage) return get_zspage_inuse(zspage) == class->objs_per_zspage; } +static bool zspage_empty(struct zspage *zspage) +{ + return get_zspage_inuse(zspage) == 0; +} + /** * zs_lookup_class_index() - Returns index of the zsmalloc &size_class * that hold objects of the provided size. @@ -1625,6 +1630,10 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class, obj_idx++; record_obj(handle, free_obj); obj_free(class->size, used_obj); + + /* Stop if there are no more objects to migrate */ + if (zspage_empty(get_zspage(s_page))) + break; } /* Remember last position in this iteration */ -- cgit v1.2.3-70-g09d2