summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-24 09:12:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-27 16:52:01 +0100
commit8115f3f0c939c5db0fe3c6c6c58911fd3a205b1e (patch)
tree576fef732087f789999c1fbab97a09c12ea167e7 /arch/x86
parentea31e72d753e5817a97de552f152d0cb55c7defc (diff)
ftrace: use a real variable for ftrace_nop in x86
Impact: avoid section mismatch warning, clean up The dynamic ftrace determines which nop is safe to use at start up. When it finds a safe nop for patching, it sets a pointer called ftrace_nop to point to the code. All call sites are then patched to this nop. Later, when tracing is turned on, this ftrace_nop variable is again used to compare the location to make sure it is a nop before we update it to an mcount call. If this fails just once, a warning is printed and ftrace is disabled. Rakib Mullick noted that the code that sets up the nop is a .init section where as the nop itself is in the .text section. This is needed because the nop is used later on after boot up. The problem is that the test of the nop jumps back to the setup code and causes a "section mismatch" warning. Rakib first recommended to convert the nop to .init.text, but as stated above, this would fail since that text is used later. The real solution is to extend Rabik's patch, and to make the ftrace_nop into an array, and just save the code from the assembly to this array. Now the section can stay as an init section, and we have a nop to use later on. Reported-by: Rakib Mullick <rakib.mullick@gmail.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/ftrace.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index b1e5e2244eca..50ea0ac8c9bf 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -21,8 +21,7 @@
#include <asm/nops.h>
-/* Long is fine, even if it is only 4 bytes ;-) */
-static unsigned long *ftrace_nop;
+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
@@ -40,7 +39,7 @@ static int ftrace_calc_offset(long ip, long addr)
unsigned char *ftrace_nop_replace(void)
{
- return (char *)ftrace_nop;
+ return ftrace_nop;
}
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
@@ -125,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data)
* TODO: check the cpuid to determine the best nop.
*/
asm volatile (
- "jmp ftrace_test_jmp\n"
- /* This code needs to stay around */
- ".section .text, \"ax\"\n"
"ftrace_test_jmp:"
"jmp ftrace_test_p6nop\n"
"nop\n"
@@ -138,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data)
"jmp 1f\n"
"ftrace_test_nop5:"
".byte 0x66,0x66,0x66,0x66,0x90\n"
- "jmp 1f\n"
- ".previous\n"
"1:"
".section .fixup, \"ax\"\n"
"2: movl $1, %0\n"
@@ -154,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data)
switch (faulted) {
case 0:
pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
- ftrace_nop = (unsigned long *)ftrace_test_p6nop;
+ memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
break;
case 1:
pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
- ftrace_nop = (unsigned long *)ftrace_test_nop5;
+ memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
break;
case 2:
pr_info("ftrace: converting mcount calls to jmp . + 5\n");
- ftrace_nop = (unsigned long *)ftrace_test_jmp;
+ memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
break;
}