diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 59 | 
1 files changed, 39 insertions, 20 deletions
diff --git a/mm/slab.c b/mm/slab.c index dcc55e78f353..51fd424e0d6d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -100,6 +100,7 @@  #include	<linux/seq_file.h>  #include	<linux/notifier.h>  #include	<linux/kallsyms.h> +#include	<linux/kfence.h>  #include	<linux/cpu.h>  #include	<linux/sysctl.h>  #include	<linux/module.h> @@ -272,7 +273,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)  #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)  #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)  #define	STATS_INC_GROWN(x)	((x)->grown++) -#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y)) +#define	STATS_ADD_REAPED(x, y)	((x)->reaped += (y))  #define	STATS_SET_HIGH(x)						\  	do {								\  		if ((x)->num_active > (x)->high_mark)			\ @@ -296,7 +297,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)  #define	STATS_DEC_ACTIVE(x)	do { } while (0)  #define	STATS_INC_ALLOCED(x)	do { } while (0)  #define	STATS_INC_GROWN(x)	do { } while (0) -#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0) +#define	STATS_ADD_REAPED(x, y)	do { (void)(y); } while (0)  #define	STATS_SET_HIGH(x)	do { } while (0)  #define	STATS_INC_ERR(x)	do { } while (0)  #define	STATS_INC_NODEALLOCS(x)	do { } while (0) @@ -332,7 +333,7 @@ static int obj_offset(struct kmem_cache *cachep)  static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)  {  	BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); -	return (unsigned long long*) (objp + obj_offset(cachep) - +	return (unsigned long long *) (objp + obj_offset(cachep) -  				      sizeof(unsigned long long));  } @@ -580,7 +581,7 @@ static int transfer_objects(struct array_cache *to,  	if (!nr)  		return 0; -	memcpy(to->entry + to->avail, from->entry + from->avail -nr, +	memcpy(to->entry + to->avail, from->entry + from->avail - nr,  			sizeof(void *) *nr);  	from->avail -= nr; @@ -1379,7 +1380,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,  		return NULL;  	} -	account_slab_page(page, cachep->gfporder, cachep); +	account_slab_page(page, cachep->gfporder, cachep, flags);  	__SetPageSlab(page);  	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */  	if (sk_memalloc_socks() && page_is_pfmemalloc(page)) @@ -1790,8 +1791,7 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)  }  slab_flags_t kmem_cache_flags(unsigned int object_size, -	slab_flags_t flags, const char *name, -	void (*ctor)(void *)) +	slab_flags_t flags, const char *name)  {  	return flags;  } @@ -2738,7 +2738,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,  #else  #define kfree_debugcheck(x) do { } while(0) -#define cache_free_debugcheck(x,objp,z) (objp) +#define cache_free_debugcheck(x, objp, z) (objp)  #endif  static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, @@ -3025,7 +3025,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,  	return objp;  }  #else -#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) +#define cache_alloc_debugcheck_after(a, b, objp, d) (objp)  #endif  static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) @@ -3209,7 +3209,7 @@ must_grow:  }  static __always_inline void * -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,  		   unsigned long caller)  {  	unsigned long save_flags; @@ -3222,6 +3222,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,  	if (unlikely(!cachep))  		return NULL; +	ptr = kfence_alloc(cachep, orig_size, flags); +	if (unlikely(ptr)) +		goto out_hooks; +  	cache_alloc_debugcheck_before(cachep, flags);  	local_irq_save(save_flags); @@ -3254,6 +3258,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,  	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)  		memset(ptr, 0, cachep->object_size); +out_hooks:  	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);  	return ptr;  } @@ -3291,7 +3296,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)  #endif /* CONFIG_NUMA */  static __always_inline void * -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)  {  	unsigned long save_flags;  	void *objp; @@ -3302,6 +3307,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)  	if (unlikely(!cachep))  		return NULL; +	objp = kfence_alloc(cachep, orig_size, flags); +	if (unlikely(objp)) +		goto out; +  	cache_alloc_debugcheck_before(cachep, flags);  	local_irq_save(save_flags);  	objp = __do_cache_alloc(cachep, flags); @@ -3312,6 +3321,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)  	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)  		memset(objp, 0, cachep->object_size); +out:  	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);  	return objp;  } @@ -3417,11 +3427,17 @@ free_done:  static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,  					 unsigned long caller)  { +	if (is_kfence_address(objp)) { +		kmemleak_free_recursive(objp, cachep->flags); +		__kfence_free(objp); +		return; +	} +  	if (unlikely(slab_want_init_on_free(cachep)))  		memset(objp, 0, cachep->object_size);  	/* Put the object into the quarantine, don't touch it for now. */ -	if (kasan_slab_free(cachep, objp, _RET_IP_)) +	if (kasan_slab_free(cachep, objp))  		return;  	/* Use KCSAN to help debug racy use-after-free. */ @@ -3483,7 +3499,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,   */  void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)  { -	void *ret = slab_alloc(cachep, flags, _RET_IP_); +	void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);  	trace_kmem_cache_alloc(_RET_IP_, ret,  			       cachep->object_size, cachep->size, flags); @@ -3516,7 +3532,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,  	local_irq_disable();  	for (i = 0; i < size; i++) { -		void *objp = __do_cache_alloc(s, flags); +		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);  		if (unlikely(!objp))  			goto error; @@ -3549,7 +3565,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)  {  	void *ret; -	ret = slab_alloc(cachep, flags, _RET_IP_); +	ret = slab_alloc(cachep, flags, size, _RET_IP_);  	ret = kasan_kmalloc(cachep, ret, size, flags);  	trace_kmalloc(_RET_IP_, ret, @@ -3575,7 +3591,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);   */  void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)  { -	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); +	void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);  	trace_kmem_cache_alloc_node(_RET_IP_, ret,  				    cachep->object_size, cachep->size, @@ -3593,7 +3609,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,  {  	void *ret; -	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); +	ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);  	ret = kasan_kmalloc(cachep, ret, size, flags);  	trace_kmalloc_node(_RET_IP_, ret, @@ -3674,7 +3690,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,  	cachep = kmalloc_slab(size, flags);  	if (unlikely(ZERO_OR_NULL_PTR(cachep)))  		return cachep; -	ret = slab_alloc(cachep, flags, caller); +	ret = slab_alloc(cachep, flags, size, caller);  	ret = kasan_kmalloc(cachep, ret, size, flags);  	trace_kmalloc(caller, ret, @@ -3717,7 +3733,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)  	__cache_free(cachep, objp, _RET_IP_);  	local_irq_restore(flags); -	trace_kmem_cache_free(_RET_IP_, objp); +	trace_kmem_cache_free(_RET_IP_, objp, cachep->name);  }  EXPORT_SYMBOL(kmem_cache_free); @@ -4173,7 +4189,10 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,  	BUG_ON(objnr >= cachep->num);  	/* Find offset within object. */ -	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); +	if (is_kfence_address(ptr)) +		offset = ptr - kfence_object_start(ptr); +	else +		offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);  	/* Allow address range falling entirely within usercopy region. */  	if (offset >= cachep->useroffset &&  | 
