diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-10 11:58:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-10 11:58:12 -0800 |
commit | ca1a46d6f5064c129f7ca6bcfd8f035d69da175c (patch) | |
tree | fb3da9324e2caa4ae650177ebd5598214e28c2b8 /mm/kasan/common.c | |
parent | d93aebbd76a07a8101d2f7393dc18be3e235f11b (diff) | |
parent | 9d6c59c1c0d62a314a2b46839699b200cccd2d08 (diff) |
Merge tag 'slab-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- Separate struct slab from struct page - an offshot of the page folio
work.
Struct page fields used by slab allocators are moved from struct page
to a new struct slab, that uses the same physical storage. Similar to
struct folio, it always is a head page. This brings better type
safety, separation of large kmalloc allocations from true slabs, and
cleanup of related objcg code.
- A SLAB_MERGE_DEFAULT config optimization.
* tag 'slab-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (33 commits)
mm/slob: Remove unnecessary page_mapcount_reset() function call
bootmem: Use page->index instead of page->freelist
zsmalloc: Stop using slab fields in struct page
mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled
mm/slub: Simplify struct slab slabs field definition
mm/sl*b: Differentiate struct slab fields by sl*b implementations
mm/kfence: Convert kfence_guarded_alloc() to struct slab
mm/kasan: Convert to struct folio and struct slab
mm/slob: Convert SLOB to use struct slab and struct folio
mm/memcg: Convert slab objcgs from struct page to struct slab
mm: Convert struct page to struct slab in functions used by other subsystems
mm/slab: Finish struct page to struct slab conversion
mm/slab: Convert most struct page to struct slab by spatch
mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab
mm/slub: Finish struct page to struct slab conversion
mm/slub: Convert most struct page to struct slab by spatch
mm/slub: Convert pfmemalloc_match() to take a struct slab
mm/slub: Convert __free_slab() to use struct slab
mm/slub: Convert alloc_slab_page() to return a struct slab
mm/slub: Convert print_page_info() to print_slab_info()
...
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r-- | mm/kasan/common.c | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 8428da2aaf17..7c06db78a76c 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, } #endif -void __kasan_poison_slab(struct page *page) +void __kasan_poison_slab(struct slab *slab) { + struct page *page = slab_page(slab); unsigned long i; for (i = 0; i < compound_nr(page); i++) @@ -298,7 +299,7 @@ static inline u8 assign_tag(struct kmem_cache *cache, /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ #ifdef CONFIG_SLAB /* For SLAB assign tags based on the object index in the freelist. */ - return (u8)obj_to_index(cache, virt_to_head_page(object), (void *)object); + return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object); #else /* * For SLUB assign a random tag during slab creation, otherwise reuse @@ -341,7 +342,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, if (is_kfence_address(object)) return false; - if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != + if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { kasan_report_invalid_free(tagged_object, ip); return true; @@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) void __kasan_slab_free_mempool(void *ptr, unsigned long ip) { - struct page *page; + struct folio *folio; - page = virt_to_head_page(ptr); + folio = virt_to_folio(ptr); /* * Even though this function is only called for kmem_cache_alloc and @@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) * !PageSlab() when the size provided to kmalloc is larger than * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. */ - if (unlikely(!PageSlab(page))) { + if (unlikely(!folio_test_slab(folio))) { if (____kasan_kfree_large(ptr, ip)) return; - kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); + kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false); } else { - ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); + struct slab *slab = folio_slab(folio); + + ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); } } @@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) { - struct page *page; + struct slab *slab; if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; @@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag */ kasan_unpoison(object, size, false); - page = virt_to_head_page(object); + slab = virt_to_slab(object); /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ - if (unlikely(!PageSlab(page))) + if (unlikely(!slab)) return __kasan_kmalloc_large(object, size, flags); else - return ____kasan_kmalloc(page->slab_cache, object, size, flags); + return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } bool __kasan_check_byte(const void *address, unsigned long ip) |