diff options
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 63 | 
1 files changed, 33 insertions, 30 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index adbace4256ef..88e833986332 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -12,6 +12,7 @@  #include <linux/memory.h>  #include <linux/cache.h>  #include <linux/compiler.h> +#include <linux/kfence.h>  #include <linux/module.h>  #include <linux/cpu.h>  #include <linux/uaccess.h> @@ -197,7 +198,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,  	size = ALIGN(size, sizeof(void *));  	align = calculate_alignment(flags, align, size);  	size = ALIGN(size, align); -	flags = kmem_cache_flags(size, flags, name, NULL); +	flags = kmem_cache_flags(size, flags, name);  	if (flags & SLAB_NEVER_MERGE)  		return NULL; @@ -309,9 +310,6 @@ kmem_cache_create_usercopy(const char *name,  	const char *cache_name;  	int err; -	get_online_cpus(); -	get_online_mems(); -  	mutex_lock(&slab_mutex);  	err = kmem_cache_sanity_check(name, size); @@ -360,9 +358,6 @@ kmem_cache_create_usercopy(const char *name,  out_unlock:  	mutex_unlock(&slab_mutex); -	put_online_mems(); -	put_online_cpus(); -  	if (err) {  		if (flags & SLAB_PANIC)  			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", @@ -436,6 +431,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)  	rcu_barrier();  	list_for_each_entry_safe(s, s2, &to_destroy, list) { +		kfence_shutdown_cache(s);  #ifdef SLAB_SUPPORTS_SYSFS  		sysfs_slab_release(s);  #else @@ -461,6 +457,7 @@ static int shutdown_cache(struct kmem_cache *s)  		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);  		schedule_work(&slab_caches_to_rcu_destroy_work);  	} else { +		kfence_shutdown_cache(s);  #ifdef SLAB_SUPPORTS_SYSFS  		sysfs_slab_unlink(s);  		sysfs_slab_release(s); @@ -486,9 +483,6 @@ void kmem_cache_destroy(struct kmem_cache *s)  	if (unlikely(!s))  		return; -	get_online_cpus(); -	get_online_mems(); -  	mutex_lock(&slab_mutex);  	s->refcount--; @@ -503,9 +497,6 @@ void kmem_cache_destroy(struct kmem_cache *s)  	}  out_unlock:  	mutex_unlock(&slab_mutex); - -	put_online_mems(); -	put_online_cpus();  }  EXPORT_SYMBOL(kmem_cache_destroy); @@ -522,12 +513,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep)  {  	int ret; -	get_online_cpus(); -	get_online_mems(); +  	kasan_cache_shrink(cachep);  	ret = __kmem_cache_shrink(cachep); -	put_online_mems(); -	put_online_cpus(); +  	return ret;  }  EXPORT_SYMBOL(kmem_cache_shrink); @@ -654,6 +643,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,  		panic("Out of memory when creating slab %s\n", name);  	create_boot_cache(s, name, size, flags, useroffset, usersize); +	kasan_cache_create_kmalloc(s);  	list_add(&s->list, &slab_caches);  	s->refcount = 1;  	return s; @@ -912,8 +902,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)  	page = alloc_pages(flags, order);  	if (likely(page)) {  		ret = page_address(page); -		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -				    PAGE_SIZE << order); +		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, +				      PAGE_SIZE << order);  	}  	ret = kasan_kmalloc_large(ret, size, flags);  	/* As ret might get tagged, call kmemleak hook after KASAN. */ @@ -1146,16 +1136,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,  	void *ret;  	size_t ks; -	ks = ksize(p); +	/* Don't use instrumented ksize to allow precise KASAN poisoning. */ +	if (likely(!ZERO_OR_NULL_PTR(p))) { +		if (!kasan_check_byte(p)) +			return NULL; +		ks = kfence_ksize(p) ?: __ksize(p); +	} else +		ks = 0; +	/* If the object still fits, repoison it precisely. */  	if (ks >= new_size) {  		p = kasan_krealloc((void *)p, new_size, flags);  		return (void *)p;  	}  	ret = kmalloc_track_caller(new_size, flags); -	if (ret && p) -		memcpy(ret, p, ks); +	if (ret && p) { +		/* Disable KASAN checks as the object's redzone is accessed. */ +		kasan_disable_current(); +		memcpy(ret, kasan_reset_tag(p), ks); +		kasan_enable_current(); +	}  	return ret;  } @@ -1232,22 +1233,24 @@ size_t ksize(const void *objp)  	size_t size;  	/* -	 * We need to check that the pointed to object is valid, and only then -	 * unpoison the shadow memory below. We use __kasan_check_read(), to -	 * generate a more useful report at the time ksize() is called (rather -	 * than later where behaviour is undefined due to potential -	 * use-after-free or double-free). +	 * We need to first check that the pointer to the object is valid, and +	 * only then unpoison the memory. The report printed from ksize() is +	 * more useful, then when it's printed later when the behaviour could +	 * be undefined due to a potential use-after-free or double-free. +	 * +	 * We use kasan_check_byte(), which is supported for the hardware +	 * tag-based KASAN mode, unlike kasan_check_read/write().  	 * -	 * If the pointed to memory is invalid we return 0, to avoid users of +	 * If the pointed to memory is invalid, we return 0 to avoid users of  	 * ksize() writing to and potentially corrupting the memory region.  	 *  	 * We want to perform the check before __ksize(), to avoid potentially  	 * crashing in __ksize() due to accessing invalid metadata.  	 */ -	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1)) +	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))  		return 0; -	size = __ksize(objp); +	size = kfence_ksize(objp) ?: __ksize(objp);  	/*  	 * We assume that ksize callers could use whole allocated area,  	 * so we need to unpoison this area.  | 
