diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-17 12:52:15 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-17 12:56:49 +0200 |
commit | eadb8a091b27a840de7450f84ecff5ef13476424 (patch) | |
tree | 58c3782d40def63baa8167f3d31e3048cb4c7660 /arch/s390/kernel/ftrace.c | |
parent | 73874005cd8800440be4299bd095387fff4b90ac (diff) | |
parent | 65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff) |
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts:
arch/x86/Kconfig
arch/x86/kernel/traps.c
arch/x86/power/cpu.c
arch/x86/power/cpu_32.c
kernel/Makefile
Semantic conflict:
arch/x86/kernel/hw_breakpoint.c
Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to
put_cpu() in arch/x86/kernel/hw_breakpoint.c.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r-- | arch/s390/kernel/ftrace.c | 260 |
1 files changed, 260 insertions, 0 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c new file mode 100644 index 000000000000..82ddfd3a75af --- /dev/null +++ b/arch/s390/kernel/ftrace.c @@ -0,0 +1,260 @@ +/* + * Dynamic function tracer architecture backend. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, + * + */ + +#include <linux/hardirq.h> +#include <linux/uaccess.h> +#include <linux/ftrace.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <trace/syscall.h> +#include <asm/lowcore.h> + +#ifdef CONFIG_DYNAMIC_FTRACE + +void ftrace_disable_code(void); +void ftrace_disable_return(void); +void ftrace_call_code(void); +void ftrace_nop_code(void); + +#define FTRACE_INSN_SIZE 4 + +#ifdef CONFIG_64BIT + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " .word 0x0024\n" + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + "ftrace_disable_return:\n" + " lg %r14,8(15)\n" + " lgr %r0,%r0\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " stg %r14,8(%r15)\n"); + +#else /* CONFIG_64BIT */ + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + "ftrace_disable_return:\n" + " l %r14,4(%r15)\n" + " j 0f\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " st %r14,4(%r15)\n"); + +#endif /* CONFIG_64BIT */ + +static int ftrace_modify_code(unsigned long ip, + void *old_code, int old_size, + void *new_code, int new_size) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: Due to modules code can disappear and change. + * We need to protect against faulting as well as code + * changing. We do this by using the probe_kernel_* + * functions. + * This however is just a simple sanity check. + */ + if (probe_kernel_read(replaced, (void *)ip, old_size)) + return -EFAULT; + if (memcmp(replaced, old_code, old_size) != 0) + return -EINVAL; + if (probe_kernel_write((void *)ip, new_code, new_size)) + return -EPERM; + return 0; +} + +static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_disable_code, MCOUNT_INSN_SIZE); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + if (addr == MCOUNT_ADDR) + return ftrace_make_initial_nop(mod, rec, addr); + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_nop_code, FTRACE_INSN_SIZE); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_nop_code, FTRACE_INSN_SIZE, + ftrace_call_code, FTRACE_INSN_SIZE); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + ftrace_dyn_func = (unsigned long)func; + return 0; +} + +int __init ftrace_dyn_arch_init(void *data) +{ + *(unsigned long *)data = 0; + return 0; +} + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Patch the kernel code at ftrace_graph_caller location: + * The instruction there is branch relative on condition. The condition mask + * is either all ones (always branch aka disable ftrace_graph_caller) or all + * zeroes (nop aka enable ftrace_graph_caller). + * Instruction format for brc is a7m4xxxx where m is the condition mask. + */ +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa704; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa7f4; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - (ftrace_disable_return - ftrace_disable_code); +} + +#else /* CONFIG_DYNAMIC_FTRACE */ + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - MCOUNT_OFFSET_RET; +} + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addresses + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) +{ + struct ftrace_graph_ent trace; + + /* Nmi's are currently unsupported. */ + if (unlikely(in_nmi())) + goto out; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + goto out; + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) + goto out; + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; + /* Only trace if the calling function expects to. */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + goto out; + } + parent = (unsigned long)return_to_handler; +out: + return parent; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned int sys_call_table[]; + +static struct syscall_metadata **syscalls_metadata; + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +static struct syscall_metadata *find_syscall_meta(unsigned long syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup(syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name + 3, str + 3)) + return start; + } + return NULL; +} + +void arch_init_ftrace_syscalls(void) +{ + struct syscall_metadata *meta; + int i; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto out; + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, + GFP_KERNEL); + if (!syscalls_metadata) + goto out; + for (i = 0; i < NR_syscalls; i++) { + meta = find_syscall_meta((unsigned long)sys_call_table[i]); + syscalls_metadata[i] = meta; + } + return; +out: + atomic_dec(&refs); +} +#endif |