diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-22 17:32:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-22 17:32:04 -0700 |
commit | 5c6f4d68e2aca67e425b7227369ec9fde8adfb6d (patch) | |
tree | b60f38675b4572047bcb840a89cb07329a5f6c22 | |
parent | de7e71ef8bed222dd144d8878091ecb6d5dfd208 (diff) | |
parent | 99b80ac45f7ec351c2d1c9fbfec702213dcae566 (diff) |
Merge tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more mm updates from Andrew Morton:
"A series from Dave Chinner which cleans up and fixes the handling of
nested allocations within stackdepot and page-owner"
* tag 'mm-stable-2024-05-22-17-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/page-owner: use gfp_nested_mask() instead of open coded masking
stackdepot: use gfp_nested_mask() instead of open coded masking
mm: lift gfp_kmemleak_mask() to gfp.h
-rw-r--r-- | include/linux/gfp.h | 25 | ||||
-rw-r--r-- | lib/stackdepot.c | 11 | ||||
-rw-r--r-- | mm/kmemleak.c | 12 | ||||
-rw-r--r-- | mm/page_owner.c | 7 |
4 files changed, 32 insertions, 23 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 450c2cbcf04b..7f9691d375f0 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -157,6 +157,31 @@ static inline int gfp_zonelist(gfp_t flags) } /* + * gfp flag masking for nested internal allocations. + * + * For code that needs to do allocations inside the public allocation API (e.g. + * memory allocation tracking code) the allocations need to obey the caller + * allocation context constrains to prevent allocation context mismatches (e.g. + * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock + * situations. + * + * It is also assumed that these nested allocations are for internal kernel + * object storage purposes only and are not going to be used for DMA, etc. Hence + * we strip out all the zone information and leave just the context information + * intact. + * + * Further, internal allocations must fail before the higher level allocation + * can fail, so we must make them fail faster and fail silently. We also don't + * want them to deplete emergency reserves. Hence nested allocations must be + * prepared for these allocations to fail. + */ +static inline gfp_t gfp_nested_mask(gfp_t flags) +{ + return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | + (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); +} + +/* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and diff --git a/lib/stackdepot.c b/lib/stackdepot.c index cd8f23455285..5ed34cc963fc 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, * we won't be able to do that under the lock. */ if (unlikely(can_alloc && !READ_ONCE(new_pool))) { - /* - * Zero out zone modifiers, as we don't have specific zone - * requirements. Keep the flags related to allocation in atomic - * contexts, I/O, nolockdep. - */ - alloc_flags &= ~GFP_ZONEMASK; - alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); - alloc_flags |= __GFP_NOWARN; - page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); + page = alloc_pages(gfp_nested_mask(alloc_flags), + DEPOT_POOL_ORDER); if (page) prealloc = page_address(page); } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index fdcf01f62202..d5b6fba44fc9 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -114,12 +114,6 @@ #define BYTES_PER_POINTER sizeof(void *) -/* GFP bitmask for kmemleak internal allocations */ -#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ - __GFP_NOLOCKDEP)) | \ - __GFP_NORETRY | __GFP_NOMEMALLOC | \ - __GFP_NOWARN) - /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; @@ -463,7 +457,8 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) /* try the slab allocator first */ if (object_cache) { - object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp)); + object = kmem_cache_alloc_noprof(object_cache, + gfp_nested_mask(gfp)); if (object) return object; } @@ -947,7 +942,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); if (scan_area_cache) - area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp)); + area = kmem_cache_alloc_noprof(scan_area_cache, + gfp_nested_mask(gfp)); raw_spin_lock_irqsave(&object->lock, flags); if (!area) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 0a448d76e0af..2d6360eaccbb 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -168,13 +168,8 @@ static void add_stack_record_to_list(struct stack_record *stack_record, unsigned long flags; struct stack *stack; - /* Filter gfp_mask the same way stackdepot does, for consistency */ - gfp_mask &= ~GFP_ZONEMASK; - gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); - gfp_mask |= __GFP_NOWARN; - set_current_in_page_owner(); - stack = kmalloc(sizeof(*stack), gfp_mask); + stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); if (!stack) { unset_current_in_page_owner(); return; |