From b8ca7ff7731f57b256fcc13a9b7d4913f5282e5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Wed, 10 Apr 2019 12:28:05 +0200 Subject: mm/slub: Remove the ULONG_MAX stack trace hackery No architecture terminates the stack trace with ULONG_MAX anymore. Remove the cruft. While at it remove the pointless loop of clearing the stack array completely. It's sufficient to clear the last entry as the consumers break out on the first zeroed entry anyway. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Link: https://lkml.kernel.org/r/20190410103644.574058244@linutronix.de --- mm/slub.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index d30ede89f4a6..e2ccd12b6faa 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -553,7 +553,6 @@ static void set_track(struct kmem_cache *s, void *object, if (addr) { #ifdef CONFIG_STACKTRACE struct stack_trace trace; - int i; trace.nr_entries = 0; trace.max_entries = TRACK_ADDRS_COUNT; @@ -563,20 +562,16 @@ static void set_track(struct kmem_cache *s, void *object, save_stack_trace(&trace); metadata_access_disable(); - /* See rant in lockdep.c */ - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries - 1] == ULONG_MAX) - trace.nr_entries--; - - for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) - p->addrs[i] = 0; + if (trace.nr_entries < TRACK_ADDRS_COUNT) + p->addrs[trace.nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; - } else + } else { memset(p, 0, sizeof(struct track)); + } } static void init_tracking(struct kmem_cache *s, void *object) -- cgit v1.2.3-70-g09d2 From 4621c9858f05ab08434221e3a15cc8098645ef2a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Wed, 10 Apr 2019 12:28:06 +0200 Subject: mm/page_owner: Remove the ULONG_MAX stack trace hackery No architecture terminates the stack trace with ULONG_MAX anymore. Remove the cruft. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: linux-mm@kvack.org Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20190410103644.661974663@linutronix.de --- mm/page_owner.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/page_owner.c b/mm/page_owner.c index 925b6f44a444..df277e6bc3c6 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -148,9 +148,6 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags) depot_stack_handle_t handle; save_stack_trace(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; /* * We need to check recursion here because our request to stackdepot -- cgit v1.2.3-70-g09d2 From ead97a49ec3a3cb9b5133acbfed9a49b91ebf37c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Wed, 10 Apr 2019 12:28:07 +0200 Subject: mm/kasan: Remove the ULONG_MAX stack trace hackery No architecture terminates the stack trace with ULONG_MAX anymore. Remove the cruft. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: linux-mm@kvack.org Link: https://lkml.kernel.org/r/20190410103644.750219625@linutronix.de --- mm/kasan/common.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 80bbe62b16cd..38e5f20a775a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -74,9 +74,6 @@ static inline depot_stack_handle_t save_stack(gfp_t flags) save_stack_trace(&trace); filter_irq_stacks(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; return depot_save_stack(&trace, flags); } -- cgit v1.2.3-70-g09d2 From 7971679994d3a239538ddcfea9f89468f0bd65e2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 25 Apr 2019 11:45:00 +0200 Subject: mm/slub: Simplify stack trace retrieval Replace the indirection through struct stack_trace with an invocation of the storage array based interface. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: David Rientjes <rientjes@google.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094801.771410441@linutronix.de --- mm/slub.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index e2ccd12b6faa..6b28cd2b5a58 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -552,18 +552,14 @@ static void set_track(struct kmem_cache *s, void *object, if (addr) { #ifdef CONFIG_STACKTRACE - struct stack_trace trace; + unsigned int nr_entries; - trace.nr_entries = 0; - trace.max_entries = TRACK_ADDRS_COUNT; - trace.entries = p->addrs; - trace.skip = 3; metadata_access_enable(); - save_stack_trace(&trace); + nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); metadata_access_disable(); - if (trace.nr_entries < TRACK_ADDRS_COUNT) - p->addrs[trace.nr_entries] = 0; + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); -- cgit v1.2.3-70-g09d2 From 07984aad1c7ed679596f849c0e7d5e029f76e0eb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 25 Apr 2019 11:45:01 +0200 Subject: mm/kmemleak: Simplify stacktrace handling Replace the indirection through struct stack_trace by using the storage array based interfaces. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: linux-mm@kvack.org Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094801.863716911@linutronix.de --- mm/kmemleak.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 6c318f5ac234..d12b35de1e7e 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq, */ static void dump_object_info(struct kmemleak_object *object) { - struct stack_trace trace; - - trace.nr_entries = object->trace_len; - trace.entries = object->trace; - pr_notice("Object 0x%08lx (size %zu):\n", object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", @@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object) pr_notice(" flags = 0x%x\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); - print_stack_trace(&trace, 4); + stack_trace_print(object->trace, object->trace_len, 4); } /* @@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali */ static int __save_stack_trace(unsigned long *trace) { - struct stack_trace stack_trace; - - stack_trace.max_entries = MAX_TRACE; - stack_trace.nr_entries = 0; - stack_trace.entries = trace; - stack_trace.skip = 2; - save_stack_trace(&stack_trace); - - return stack_trace.nr_entries; + return stack_trace_save(trace, MAX_TRACE, 2); } /* @@ -2019,13 +2006,8 @@ early_param("kmemleak", kmemleak_boot_config); static void __init print_log_trace(struct early_log *log) { - struct stack_trace trace; - - trace.nr_entries = log->trace_len; - trace.entries = log->trace; - pr_notice("Early log backtrace:\n"); - print_stack_trace(&trace, 2); + stack_trace_print(log->trace, log->trace_len, 2); } /* -- cgit v1.2.3-70-g09d2 From 880e049c9ce9020384ce305c71375aa1cb54addb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 25 Apr 2019 11:45:02 +0200 Subject: mm/kasan: Simplify stacktrace handling Replace the indirection through struct stack_trace by using the storage array based interfaces. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: kasan-dev@googlegroups.com Cc: linux-mm@kvack.org Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094801.963261479@linutronix.de --- mm/kasan/common.c | 30 ++++++++++++------------------ mm/kasan/report.c | 7 ++++--- 2 files changed, 16 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 38e5f20a775a..303a7379d2a3 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -48,34 +48,28 @@ static inline int in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__softirqentry_text_end); } -static inline void filter_irq_stacks(struct stack_trace *trace) +static inline unsigned int filter_irq_stacks(unsigned long *entries, + unsigned int nr_entries) { - int i; + unsigned int i; - if (!trace->nr_entries) - return; - for (i = 0; i < trace->nr_entries; i++) - if (in_irqentry_text(trace->entries[i])) { + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { /* Include the irqentry function into the stack. */ - trace->nr_entries = i + 1; - break; + return i + 1; } + } + return nr_entries; } static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = KASAN_STACK_DEPTH, - .skip = 0 - }; - - save_stack_trace(&trace); - filter_irq_stacks(&trace); + unsigned int nr_entries; - return depot_save_stack(&trace, flags); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + nr_entries = filter_irq_stacks(entries, nr_entries); + return stack_depot_save(entries, nr_entries, flags); } static inline void set_track(struct kasan_track *track, gfp_t flags) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca9418fe9232..882d77568e7e 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix) { pr_err("%s by task %u:\n", prefix, track->pid); if (track->stack) { - struct stack_trace trace; + unsigned long *entries; + unsigned int nr_entries; - depot_fetch_stack(track->stack, &trace); - print_stack_trace(&trace, 0); + nr_entries = stack_depot_fetch(track->stack, &entries); + stack_trace_print(entries, nr_entries, 0); } else { pr_err("(stack is not available)\n"); } -- cgit v1.2.3-70-g09d2 From af52bf6b92f7d8783c1e712cad6ef7d37cd773b2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 25 Apr 2019 11:45:03 +0200 Subject: mm/page_owner: Simplify stack trace handling Replace the indirection through struct stack_trace by using the storage array based interfaces. The original code in all printing functions is really wrong. It allocates a storage array on stack which is unused because depot_fetch_stack() does not store anything in it. It overwrites the entries pointer in the stack_trace struct so it points to the depot storage. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: linux-mm@kvack.org Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094802.067210525@linutronix.de --- mm/page_owner.c | 79 ++++++++++++++++++++------------------------------------- 1 file changed, 28 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/mm/page_owner.c b/mm/page_owner.c index df277e6bc3c6..addcbb2ae4e4 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -58,15 +58,10 @@ static bool need_page_owner(void) static __always_inline depot_stack_handle_t create_dummy_stack(void) { unsigned long entries[4]; - struct stack_trace dummy; + unsigned int nr_entries; - dummy.nr_entries = 0; - dummy.max_entries = ARRAY_SIZE(entries); - dummy.entries = &entries[0]; - dummy.skip = 0; - - save_stack_trace(&dummy); - return depot_save_stack(&dummy, GFP_KERNEL); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + return stack_depot_save(entries, nr_entries, GFP_KERNEL); } static noinline void register_dummy_stack(void) @@ -120,46 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order) } } -static inline bool check_recursive_alloc(struct stack_trace *trace, - unsigned long ip) +static inline bool check_recursive_alloc(unsigned long *entries, + unsigned int nr_entries, + unsigned long ip) { - int i; - - if (!trace->nr_entries) - return false; + unsigned int i; - for (i = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip) + for (i = 0; i < nr_entries; i++) { + if (entries[i] == ip) return true; } - return false; } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 2 - }; depot_stack_handle_t handle; + unsigned int nr_entries; - save_stack_trace(&trace); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); /* - * We need to check recursion here because our request to stackdepot - * could trigger memory allocation to save new entry. New memory - * allocation would reach here and call depot_save_stack() again - * if we don't catch it. There is still not enough memory in stackdepot - * so it would try to allocate memory again and loop forever. + * We need to check recursion here because our request to + * stackdepot could trigger memory allocation to save new + * entry. New memory allocation would reach here and call + * stack_depot_save_entries() again if we don't catch it. There is + * still not enough memory in stackdepot so it would try to + * allocate memory again and loop forever. */ - if (check_recursive_alloc(&trace, _RET_IP_)) + if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) return dummy_handle; - handle = depot_save_stack(&trace, flags); + handle = stack_depot_save(entries, nr_entries, flags); if (!handle) handle = failure_handle; @@ -337,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) { - int ret; - int pageblock_mt, page_mt; + int ret, pageblock_mt, page_mt; + unsigned long *entries; + unsigned int nr_entries; char *kbuf; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; count = min_t(size_t, count, PAGE_SIZE); kbuf = kmalloc(count, GFP_KERNEL); @@ -375,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; - depot_fetch_stack(handle, &trace); - ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); + nr_entries = stack_depot_fetch(handle, &entries); + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); if (ret >= count) goto err; @@ -407,14 +389,9 @@ void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); struct page_owner *page_owner; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; depot_stack_handle_t handle; + unsigned long *entries; + unsigned int nr_entries; gfp_t gfp_mask; int mt; @@ -438,10 +415,10 @@ void __dump_page_owner(struct page *page) return; } - depot_fetch_stack(handle, &trace); + nr_entries = stack_depot_fetch(handle, &entries); pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); - print_stack_trace(&trace, 0); + stack_trace_print(entries, nr_entries, 0); if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", -- cgit v1.2.3-70-g09d2