diff options
Diffstat (limited to 'arch/csky/kernel/stacktrace.c')
| -rw-r--r-- | arch/csky/kernel/stacktrace.c | 174 | 
1 files changed, 138 insertions, 36 deletions
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c index fec777a643f1..92809e1da723 100644 --- a/arch/csky/kernel/stacktrace.c +++ b/arch/csky/kernel/stacktrace.c @@ -1,57 +1,159 @@  // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */  #include <linux/sched/debug.h>  #include <linux/sched/task_stack.h>  #include <linux/stacktrace.h>  #include <linux/ftrace.h> +#include <linux/ptrace.h> -void save_stack_trace(struct stack_trace *trace) +#ifdef CONFIG_FRAME_POINTER + +struct stackframe { +	unsigned long fp; +	unsigned long ra; +}; + +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, +			     bool (*fn)(unsigned long, void *), void *arg)  { -	save_stack_trace_tsk(current, trace); +	unsigned long fp, sp, pc; + +	if (regs) { +		fp = frame_pointer(regs); +		sp = user_stack_pointer(regs); +		pc = instruction_pointer(regs); +	} else if (task == NULL || task == current) { +		const register unsigned long current_sp __asm__ ("sp"); +		const register unsigned long current_fp __asm__ ("r8"); +		fp = current_fp; +		sp = current_sp; +		pc = (unsigned long)walk_stackframe; +	} else { +		/* task blocked in __switch_to */ +		fp = thread_saved_fp(task); +		sp = thread_saved_sp(task); +		pc = thread_saved_lr(task); +	} + +	for (;;) { +		unsigned long low, high; +		struct stackframe *frame; + +		if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) +			break; + +		/* Validate frame pointer */ +		low = sp; +		high = ALIGN(sp, THREAD_SIZE); +		if (unlikely(fp < low || fp > high || fp & 0x3)) +			break; +		/* Unwind stack frame */ +		frame = (struct stackframe *)fp; +		sp = fp; +		fp = frame->fp; +		pc = ftrace_graph_ret_addr(current, NULL, frame->ra, +					   (unsigned long *)(fp - 8)); +	}  } -EXPORT_SYMBOL_GPL(save_stack_trace); -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +#else /* !CONFIG_FRAME_POINTER */ + +static void notrace walk_stackframe(struct task_struct *task, +	struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)  { -	unsigned long *fp, *stack_start, *stack_end; -	unsigned long addr; -	int skip = trace->skip; -	int savesched; -	int graph_idx = 0; +	unsigned long sp, pc; +	unsigned long *ksp; -	if (tsk == current) { -		asm volatile("mov %0, r8\n":"=r"(fp)); -		savesched = 1; +	if (regs) { +		sp = user_stack_pointer(regs); +		pc = instruction_pointer(regs); +	} else if (task == NULL || task == current) { +		const register unsigned long current_sp __asm__ ("sp"); +		sp = current_sp; +		pc = (unsigned long)walk_stackframe;  	} else { -		fp = (unsigned long *)thread_saved_fp(tsk); -		savesched = 0; +		/* task blocked in __switch_to */ +		sp = thread_saved_sp(task); +		pc = thread_saved_lr(task);  	} -	addr = (unsigned long) fp & THREAD_MASK; -	stack_start = (unsigned long *) addr; -	stack_end = (unsigned long *) (addr + THREAD_SIZE); +	if (unlikely(sp & 0x3)) +		return; -	while (fp > stack_start && fp < stack_end) { -		unsigned long lpp, fpp; - -		fpp = fp[0]; -		lpp = fp[1]; -		if (!__kernel_text_address(lpp)) +	ksp = (unsigned long *)sp; +	while (!kstack_end(ksp)) { +		if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))  			break; -		else -			lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); +		pc = (*ksp++) - 0x4; +	} +} +#endif /* CONFIG_FRAME_POINTER */ + +static bool print_trace_address(unsigned long pc, void *arg) +{ +	print_ip_sym(pc); +	return false; +} + +void show_stack(struct task_struct *task, unsigned long *sp) +{ +	pr_cont("Call Trace:\n"); +	walk_stackframe(task, NULL, print_trace_address, NULL); +} -		if (savesched || !in_sched_functions(lpp)) { -			if (skip) { -				skip--; -			} else { -				trace->entries[trace->nr_entries++] = lpp; -				if (trace->nr_entries >= trace->max_entries) -					break; -			} -		} -		fp = (unsigned long *)fpp; +static bool save_wchan(unsigned long pc, void *arg) +{ +	if (!in_sched_functions(pc)) { +		unsigned long *p = arg; +		*p = pc; +		return true; +	} +	return false; +} + +unsigned long get_wchan(struct task_struct *task) +{ +	unsigned long pc = 0; + +	if (likely(task && task != current && task->state != TASK_RUNNING)) +		walk_stackframe(task, NULL, save_wchan, &pc); +	return pc; +} + +#ifdef CONFIG_STACKTRACE +static bool __save_trace(unsigned long pc, void *arg, bool nosched) +{ +	struct stack_trace *trace = arg; + +	if (unlikely(nosched && in_sched_functions(pc))) +		return false; +	if (unlikely(trace->skip > 0)) { +		trace->skip--; +		return false;  	} + +	trace->entries[trace->nr_entries++] = pc; +	return (trace->nr_entries >= trace->max_entries); +} + +static bool save_trace(unsigned long pc, void *arg) +{ +	return __save_trace(pc, arg, false); +} + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ +	walk_stackframe(tsk, NULL, save_trace, trace);  }  EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ +	save_stack_trace_tsk(NULL, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif /* CONFIG_STACKTRACE */  | 
