From 72786c0a3dc5d4151469f512909049a0b17ada3d Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Mon, 2 Oct 2023 16:17:16 +0200 Subject: KASAN: remove code paths guarded by CONFIG_SLAB With SLAB removed and SLUB the only remaining allocator, we can clean up some code that was depending on the choice. Reviewed-by: Kees Cook Reviewed-by: Marco Elver Reviewed-by: Andrey Konovalov Acked-by: David Rientjes Tested-by: David Rientjes Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- mm/kasan/common.c | 13 ++----------- mm/kasan/kasan.h | 3 +-- mm/kasan/quarantine.c | 7 ------- 3 files changed, 3 insertions(+), 20 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 256930da578a..5d95219e69d7 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object) * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be * accessed after being freed. We preassign tags for objects in these * caches as well. - * 3. For SLAB allocator we can't preassign tags randomly since the freelist - * is stored as an array of indexes instead of a linked list. Assign tags - * based on objects indexes, so that objects that are next to each other - * get different tags. */ static inline u8 assign_tag(struct kmem_cache *cache, const void *object, bool init) @@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache, if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) return init ? KASAN_TAG_KERNEL : kasan_random_tag(); - /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ -#ifdef CONFIG_SLAB - /* For SLAB assign tags based on the object index in the freelist. */ - return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object); -#else /* - * For SLUB assign a random tag during slab creation, otherwise reuse + * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU, + * assign a random tag during slab creation, otherwise reuse * the already assigned tag. */ return init ? kasan_random_tag() : get_tag(object); -#endif } void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 8b06bab5c406..eef50233640a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_free_info(struct kmem_cache *cache, void *object); -#if defined(CONFIG_KASAN_GENERIC) && \ - (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) +#ifdef CONFIG_KASAN_GENERIC bool kasan_quarantine_put(struct kmem_cache *cache, void *object); void kasan_quarantine_reduce(void); void kasan_quarantine_remove_cache(struct kmem_cache *cache); diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index ca4529156735..138c57b836f2 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) { void *object = qlink_to_object(qlink, cache); struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); - unsigned long flags; - - if (IS_ENABLED(CONFIG_SLAB)) - local_irq_save(flags); /* * If init_on_free is enabled and KASAN's free metadata is stored in @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; ___cache_free(cache, object, _THIS_IP_); - - if (IS_ENABLED(CONFIG_SLAB)) - local_irq_restore(flags); } static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) -- cgit v1.2.3-70-g09d2 From 6011be59910fb12b757f9d37793d21763268b4a1 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 3 Oct 2023 11:57:45 +0200 Subject: mm/slab: move pre/post-alloc hooks from slab.h to slub.c We don't share the hooks between two slab implementations anymore so they can be moved away from the header. As part of the move, also move should_failslab() from slab_common.c as the pre_alloc hook uses it. This means slab.h can stop including fault-inject.h and kmemleak.h. Fix up some files that were depending on the includes transitively. Reviewed-by: Kees Cook Acked-by: David Rientjes Tested-by: David Rientjes Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- mm/kasan/report.c | 1 + mm/memcontrol.c | 1 + mm/slab.h | 72 ------------------------------------------------- mm/slab_common.c | 8 +----- mm/slub.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 84 insertions(+), 79 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/report.c b/mm/kasan/report.c index e77facb62900..011f727bfaff 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 947fb50eba31..8a0603517065 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "internal.h" #include #include diff --git a/mm/slab.h b/mm/slab.h index 1ac3a2f8d4c0..65ebf86b3fe9 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -9,8 +9,6 @@ #include #include #include -#include -#include #include #include @@ -796,76 +794,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s) return s->size; } -static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, - struct list_lru *lru, - struct obj_cgroup **objcgp, - size_t size, gfp_t flags) -{ - flags &= gfp_allowed_mask; - - might_alloc(flags); - - if (should_failslab(s, flags)) - return NULL; - - if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) - return NULL; - - return s; -} - -static inline void slab_post_alloc_hook(struct kmem_cache *s, - struct obj_cgroup *objcg, gfp_t flags, - size_t size, void **p, bool init, - unsigned int orig_size) -{ - unsigned int zero_size = s->object_size; - bool kasan_init = init; - size_t i; - - flags &= gfp_allowed_mask; - - /* - * For kmalloc object, the allocated memory size(object_size) is likely - * larger than the requested size(orig_size). If redzone check is - * enabled for the extra space, don't zero it, as it will be redzoned - * soon. The redzone operation for this extra space could be seen as a - * replacement of current poisoning under certain debug option, and - * won't break other sanity checks. - */ - if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && - (s->flags & SLAB_KMALLOC)) - zero_size = orig_size; - - /* - * When slub_debug is enabled, avoid memory initialization integrated - * into KASAN and instead zero out the memory via the memset below with - * the proper size. Otherwise, KASAN might overwrite SLUB redzones and - * cause false-positive reports. This does not lead to a performance - * penalty on production builds, as slub_debug is not intended to be - * enabled there. - */ - if (__slub_debug_enabled()) - kasan_init = false; - - /* - * As memory initialization might be integrated into KASAN, - * kasan_slab_alloc and initialization memset must be - * kept together to avoid discrepancies in behavior. - * - * As p[i] might get tagged, memset and kmemleak hook come after KASAN. - */ - for (i = 0; i < size; i++) { - p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init); - if (p[i] && init && (!kasan_init || !kasan_has_integrated_init())) - memset(p[i], 0, zero_size); - kmemleak_alloc_recursive(p[i], s->object_size, 1, - s->flags, flags); - kmsan_slab_alloc(s, p[i], flags); - } - - memcg_slab_post_alloc_hook(s, objcg, flags, size, p); -} /* * The slab lists for all objects. diff --git a/mm/slab_common.c b/mm/slab_common.c index 63b8411db7ce..bbc2e3f061f1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1470,10 +1471,3 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); EXPORT_TRACEPOINT_SYMBOL(kfree); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); -int should_failslab(struct kmem_cache *s, gfp_t gfpflags) -{ - if (__should_failslab(s, gfpflags)) - return -ENOMEM; - return 0; -} -ALLOW_ERROR_INJECTION(should_failslab, ERRNO); diff --git a/mm/slub.c b/mm/slub.c index 979932d046fd..9eb6508152c2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -3494,6 +3495,86 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 0, sizeof(void *)); } +noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags) +{ + if (__should_failslab(s, gfpflags)) + return -ENOMEM; + return 0; +} +ALLOW_ERROR_INJECTION(should_failslab, ERRNO); + +static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, + struct obj_cgroup **objcgp, + size_t size, gfp_t flags) +{ + flags &= gfp_allowed_mask; + + might_alloc(flags); + + if (should_failslab(s, flags)) + return NULL; + + if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) + return NULL; + + return s; +} + +static inline void slab_post_alloc_hook(struct kmem_cache *s, + struct obj_cgroup *objcg, gfp_t flags, + size_t size, void **p, bool init, + unsigned int orig_size) +{ + unsigned int zero_size = s->object_size; + bool kasan_init = init; + size_t i; + + flags &= gfp_allowed_mask; + + /* + * For kmalloc object, the allocated memory size(object_size) is likely + * larger than the requested size(orig_size). If redzone check is + * enabled for the extra space, don't zero it, as it will be redzoned + * soon. The redzone operation for this extra space could be seen as a + * replacement of current poisoning under certain debug option, and + * won't break other sanity checks. + */ + if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && + (s->flags & SLAB_KMALLOC)) + zero_size = orig_size; + + /* + * When slub_debug is enabled, avoid memory initialization integrated + * into KASAN and instead zero out the memory via the memset below with + * the proper size. Otherwise, KASAN might overwrite SLUB redzones and + * cause false-positive reports. This does not lead to a performance + * penalty on production builds, as slub_debug is not intended to be + * enabled there. + */ + if (__slub_debug_enabled()) + kasan_init = false; + + /* + * As memory initialization might be integrated into KASAN, + * kasan_slab_alloc and initialization memset must be + * kept together to avoid discrepancies in behavior. + * + * As p[i] might get tagged, memset and kmemleak hook come after KASAN. + */ + for (i = 0; i < size; i++) { + p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init); + if (p[i] && init && (!kasan_init || + !kasan_has_integrated_init())) + memset(p[i], 0, zero_size); + kmemleak_alloc_recursive(p[i], s->object_size, 1, + s->flags, flags); + kmsan_slab_alloc(s, p[i], flags); + } + + memcg_slab_post_alloc_hook(s, objcg, flags, size, p); +} + /* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) * have the fastpath folded into their functions. So no function call -- cgit v1.2.3-70-g09d2