summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-25 16:51:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-25 16:51:24 -0800
commite06635e26cd8144eee17e9f256e8fde8aed3ba4f (patch)
tree24bd832e3098ebf70999a1af6c16087ce4c5995e /lib
parentf5f4745a7f057b58c9728ee4e2c5d6d79f382fe7 (diff)
parent9008fe8fad8255edfdbecea32d7eb0485d939d0d (diff)
Merge tag 'slab-for-6.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - Add new slab_strict_numa boot parameter to enforce per-object memory policies on top of slab folio policies, for systems where saving cost of remote accesses is more important than minimizing slab allocation overhead (Christoph Lameter) - Fix for freeptr_offset alignment check being too strict for m68k (Geert Uytterhoeven) - krealloc() fixes for not violating __GFP_ZERO guarantees on krealloc() when slub_debug (redzone and object tracking) is enabled (Feng Tang) - Fix a memory leak in case sysfs registration fails for a slab cache, and also no longer fail to create the cache in that case (Hyeonggon Yoo) - Fix handling of detected consistency problems (due to buggy slab user) with slub_debug enabled, so that it does not cause further list corruption bugs (yuan.gao) - Code cleanup and kerneldocs polishing (Zhen Lei, Vlastimil Babka) * tag 'slab-for-6.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: slab: Fix too strict alignment check in create_cache() mm/slab: Allow cache creation to proceed even if sysfs registration fails mm/slub: Avoid list corruption when removing a slab from the full list mm/slub, kunit: Add testcase for krealloc redzone and zeroing mm/slub: Improve redzone check and zeroing for krealloc() mm/slub: Consider kfence case for get_orig_size() SLUB: Add support for per object memory policies mm, slab: add kerneldocs for common SLAB_ flags mm/slab: remove duplicate check in create_cache() mm/slub: Move krealloc() and related code to slub.c mm/kasan: Don't store metadata inside kmalloc object when slub_debug_orig_size is on
Diffstat (limited to 'lib')
-rw-r--r--lib/slub_kunit.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 33564f965958..f11691315c2f 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -192,6 +192,47 @@ static void test_leak_destroy(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, slab_errors);
}
+static void test_krealloc_redzone_zeroing(struct kunit *test)
+{
+ u8 *p;
+ int i;
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+
+ p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
+ memset(p, 0xff, 48);
+
+ kasan_disable_current();
+ OPTIMIZER_HIDE_VAR(p);
+
+ /* Test shrink */
+ p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ /* Test grow within the same 64B kmalloc object */
+ p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+ for (i = 56; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ memset(p, 0xff, 56);
+ /* Test grow with allocating a bigger 128B object */
+ p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
+ for (i = 0; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0xff);
+ for (i = 56; i < 112; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+
+ kfree(p);
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -214,6 +255,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
KUNIT_CASE(test_leak_destroy),
+ KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
};