diff options
author | Andrey Konovalov <andreyknvl@google.com> | 2023-12-19 23:29:02 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-29 11:58:40 -0800 |
commit | 86b15969831bde23c96de00db46687762a6e9e7d (patch) | |
tree | 35c7b5b29e637366f5d5fc62ebf19730b2e62790 | |
parent | 0f18ea6ea44cde3d7660e52fa8729d420f97409a (diff) |
kasan: reorder tests
Put closely related tests next to each other.
No functional changes.
Link: https://lkml.kernel.org/r/acf0ee309394dbb5764c400434753ff030dd3d6c.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/kasan/kasan_test.c | 418 |
1 files changed, 209 insertions, 209 deletions
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 3cd977d60569..aa994b62378b 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -214,6 +214,23 @@ static void kmalloc_node_oob_right(struct kunit *test) } /* + * Check that KASAN detects an out-of-bounds access for a big object allocated + * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. + */ +static void kmalloc_big_oob_right(struct kunit *test) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE - 256; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); + kfree(ptr); +} + +/* * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk * that does not fit into the largest slab cache and therefore is allocated via * the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these @@ -299,23 +316,6 @@ static void page_alloc_uaf(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } -/* - * Check that KASAN detects an out-of-bounds access for a big object allocated - * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. - */ -static void kmalloc_big_oob_right(struct kunit *test) -{ - char *ptr; - size_t size = KMALLOC_MAX_CACHE_SIZE - 256; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - OPTIMIZER_HIDE_VAR(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); - kfree(ptr); -} - static void krealloc_more_oob_helper(struct kunit *test, size_t size1, size_t size2) { @@ -710,6 +710,126 @@ static void kmalloc_uaf3(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]); } +static void kmalloc_double_kzfree(struct kunit *test) +{ + char *ptr; + size_t size = 16; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + kfree_sensitive(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); +} + +/* Check that ksize() does NOT unpoison whole object. */ +static void ksize_unpoisons_memory(struct kunit *test) +{ + char *ptr; + size_t size = 128 - KASAN_GRANULE_SIZE - 5; + size_t real_size; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + real_size = ksize(ptr); + KUNIT_EXPECT_GT(test, real_size, size); + + OPTIMIZER_HIDE_VAR(ptr); + + /* These accesses shouldn't trigger a KASAN report. */ + ptr[0] = 'x'; + ptr[size - 1] = 'x'; + + /* These must trigger a KASAN report. */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); + + kfree(ptr); +} + +/* + * Check that a use-after-free is detected by ksize() and via normal accesses + * after it. + */ +static void ksize_uaf(struct kunit *test) +{ + char *ptr; + int size = 128 - KASAN_GRANULE_SIZE; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + kfree(ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); +} + +/* + * The two tests below check that Generic KASAN prints auxiliary stack traces + * for RCU callbacks and workqueues. The reports need to be inspected manually. + * + * These tests are still enabled for other KASAN modes to make sure that all + * modes report bad accesses in tested scenarios. + */ + +static struct kasan_rcu_info { + int i; + struct rcu_head rcu; +} *global_rcu_ptr; + +static void rcu_uaf_reclaim(struct rcu_head *rp) +{ + struct kasan_rcu_info *fp = + container_of(rp, struct kasan_rcu_info, rcu); + + kfree(fp); + ((volatile struct kasan_rcu_info *)fp)->i; +} + +static void rcu_uaf(struct kunit *test) +{ + struct kasan_rcu_info *ptr; + + ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + global_rcu_ptr = rcu_dereference_protected( + (struct kasan_rcu_info __rcu *)ptr, NULL); + + KUNIT_EXPECT_KASAN_FAIL(test, + call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); + rcu_barrier()); +} + +static void workqueue_uaf_work(struct work_struct *work) +{ + kfree(work); +} + +static void workqueue_uaf(struct kunit *test) +{ + struct workqueue_struct *workqueue; + struct work_struct *work; + + workqueue = create_workqueue("kasan_workqueue_test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); + + work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); + + INIT_WORK(work, workqueue_uaf_work); + queue_work(workqueue, work); + destroy_workqueue(workqueue); + + KUNIT_EXPECT_KASAN_FAIL(test, + ((volatile struct work_struct *)work)->data); +} + static void kfree_via_page(struct kunit *test) { char *ptr; @@ -760,6 +880,69 @@ static void kmem_cache_oob(struct kunit *test) kmem_cache_destroy(cache); } +static void kmem_cache_double_free(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, 0, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + + kmem_cache_free(cache, p); + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); + kmem_cache_destroy(cache); +} + +static void kmem_cache_invalid_free(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, + NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + + /* Trigger invalid free, the object doesn't get freed. */ + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); + + /* + * Properly free the object to prevent the "Objects remaining in + * test_cache on __kmem_cache_shutdown" BUG failure. + */ + kmem_cache_free(cache, p); + + kmem_cache_destroy(cache); +} + +static void empty_cache_ctor(void *object) { } + +static void kmem_cache_double_destroy(struct kunit *test) +{ + struct kmem_cache *cache; + + /* Provide a constructor to prevent cache merging. */ + cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + kmem_cache_destroy(cache); + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); +} + static void kmem_cache_accounted(struct kunit *test) { int i; @@ -1157,53 +1340,6 @@ static void kasan_global_oob_left(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } -/* Check that ksize() does NOT unpoison whole object. */ -static void ksize_unpoisons_memory(struct kunit *test) -{ - char *ptr; - size_t size = 128 - KASAN_GRANULE_SIZE - 5; - size_t real_size; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - real_size = ksize(ptr); - KUNIT_EXPECT_GT(test, real_size, size); - - OPTIMIZER_HIDE_VAR(ptr); - - /* These accesses shouldn't trigger a KASAN report. */ - ptr[0] = 'x'; - ptr[size - 1] = 'x'; - - /* These must trigger a KASAN report. */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC)) - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); - - kfree(ptr); -} - -/* - * Check that a use-after-free is detected by ksize() and via normal accesses - * after it. - */ -static void ksize_uaf(struct kunit *test) -{ - char *ptr; - int size = 128 - KASAN_GRANULE_SIZE; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - kfree(ptr); - - OPTIMIZER_HIDE_VAR(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); -} - static void kasan_stack_oob(struct kunit *test) { char stack_array[10]; @@ -1246,69 +1382,6 @@ static void kasan_alloca_oob_right(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } -static void kmem_cache_double_free(struct kunit *test) -{ - char *p; - size_t size = 200; - struct kmem_cache *cache; - - cache = kmem_cache_create("test_cache", size, 0, 0, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - - p = kmem_cache_alloc(cache, GFP_KERNEL); - if (!p) { - kunit_err(test, "Allocation failed: %s\n", __func__); - kmem_cache_destroy(cache); - return; - } - - kmem_cache_free(cache, p); - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); - kmem_cache_destroy(cache); -} - -static void kmem_cache_invalid_free(struct kunit *test) -{ - char *p; - size_t size = 200; - struct kmem_cache *cache; - - cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, - NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - - p = kmem_cache_alloc(cache, GFP_KERNEL); - if (!p) { - kunit_err(test, "Allocation failed: %s\n", __func__); - kmem_cache_destroy(cache); - return; - } - - /* Trigger invalid free, the object doesn't get freed. */ - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); - - /* - * Properly free the object to prevent the "Objects remaining in - * test_cache on __kmem_cache_shutdown" BUG failure. - */ - kmem_cache_free(cache, p); - - kmem_cache_destroy(cache); -} - -static void empty_cache_ctor(void *object) { } - -static void kmem_cache_double_destroy(struct kunit *test) -{ - struct kmem_cache *cache; - - /* Provide a constructor to prevent cache merging. */ - cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - kmem_cache_destroy(cache); - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); -} - static void kasan_memchr(struct kunit *test) { char *ptr; @@ -1470,79 +1543,6 @@ static void kasan_bitops_tags(struct kunit *test) kfree(bits); } -static void kmalloc_double_kzfree(struct kunit *test) -{ - char *ptr; - size_t size = 16; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - kfree_sensitive(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); -} - -/* - * The two tests below check that Generic KASAN prints auxiliary stack traces - * for RCU callbacks and workqueues. The reports need to be inspected manually. - * - * These tests are still enabled for other KASAN modes to make sure that all - * modes report bad accesses in tested scenarios. - */ - -static struct kasan_rcu_info { - int i; - struct rcu_head rcu; -} *global_rcu_ptr; - -static void rcu_uaf_reclaim(struct rcu_head *rp) -{ - struct kasan_rcu_info *fp = - container_of(rp, struct kasan_rcu_info, rcu); - - kfree(fp); - ((volatile struct kasan_rcu_info *)fp)->i; -} - -static void rcu_uaf(struct kunit *test) -{ - struct kasan_rcu_info *ptr; - - ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - global_rcu_ptr = rcu_dereference_protected( - (struct kasan_rcu_info __rcu *)ptr, NULL); - - KUNIT_EXPECT_KASAN_FAIL(test, - call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); - rcu_barrier()); -} - -static void workqueue_uaf_work(struct work_struct *work) -{ - kfree(work); -} - -static void workqueue_uaf(struct kunit *test) -{ - struct workqueue_struct *workqueue; - struct work_struct *work; - - workqueue = create_workqueue("kasan_workqueue_test"); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); - - work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); - - INIT_WORK(work, workqueue_uaf_work); - queue_work(workqueue, work); - destroy_workqueue(workqueue); - - KUNIT_EXPECT_KASAN_FAIL(test, - ((volatile struct work_struct *)work)->data); -} - static void vmalloc_helpers_tags(struct kunit *test) { void *ptr; @@ -1829,12 +1829,12 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_right), KUNIT_CASE(kmalloc_oob_left), KUNIT_CASE(kmalloc_node_oob_right), + KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(kmalloc_large_oob_right), KUNIT_CASE(kmalloc_large_uaf), KUNIT_CASE(kmalloc_large_invalid_free), KUNIT_CASE(page_alloc_oob_right), KUNIT_CASE(page_alloc_uaf), - KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(krealloc_more_oob), KUNIT_CASE(krealloc_less_oob), KUNIT_CASE(krealloc_large_more_oob), @@ -1853,9 +1853,17 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_uaf_memset), KUNIT_CASE(kmalloc_uaf2), KUNIT_CASE(kmalloc_uaf3), + KUNIT_CASE(kmalloc_double_kzfree), + KUNIT_CASE(ksize_unpoisons_memory), + KUNIT_CASE(ksize_uaf), + KUNIT_CASE(rcu_uaf), + KUNIT_CASE(workqueue_uaf), KUNIT_CASE(kfree_via_page), KUNIT_CASE(kfree_via_phys), KUNIT_CASE(kmem_cache_oob), + KUNIT_CASE(kmem_cache_double_free), + KUNIT_CASE(kmem_cache_invalid_free), + KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), KUNIT_CASE(mempool_kmalloc_oob_right), @@ -1875,19 +1883,11 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_stack_oob), KUNIT_CASE(kasan_alloca_oob_left), KUNIT_CASE(kasan_alloca_oob_right), - KUNIT_CASE(ksize_unpoisons_memory), - KUNIT_CASE(ksize_uaf), - KUNIT_CASE(kmem_cache_double_free), - KUNIT_CASE(kmem_cache_invalid_free), - KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_bitops_generic), KUNIT_CASE(kasan_bitops_tags), - KUNIT_CASE(kmalloc_double_kzfree), - KUNIT_CASE(rcu_uaf), - KUNIT_CASE(workqueue_uaf), KUNIT_CASE(vmalloc_helpers_tags), KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmap_tags), |