From 68c76ad4a9571a2b603665c85cf8229bcf04982a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 27 Oct 2022 17:59:06 +0200 Subject: arm64: unwind: add asynchronous unwind tables to kernel and modules Enable asynchronous unwind table generation for both the core kernel as well as modules, and emit the resulting .eh_frame sections as init code so we can use the unwind directives for code patching at boot or module load time. This will be used by dynamic shadow call stack support, which will rely on code patching rather than compiler codegen to emit the shadow call stack push and pop instructions. Signed-off-by: Ard Biesheuvel Reviewed-by: Nick Desaulniers Reviewed-by: Sami Tolvanen Tested-by: Sami Tolvanen Link: https://lore.kernel.org/r/20221027155908.1940624-2-ardb@kernel.org Signed-off-by: Will Deacon --- arch/arm64/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64/Makefile') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 5e56d26a2239..7868a176993f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -45,8 +45,13 @@ KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) # Avoid generating .eh_frame* sections. +ifneq ($(CONFIG_UNWIND_TABLES),y) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +else +KBUILD_CFLAGS += -fasynchronous-unwind-tables +KBUILD_AFLAGS += -fasynchronous-unwind-tables +endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare -- cgit v1.2.3-70-g09d2 From 3b619e22c4601b444ed2d6a5458271f72625ac89 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 27 Oct 2022 17:59:08 +0200 Subject: arm64: implement dynamic shadow call stack for Clang Implement dynamic shadow call stack support on Clang, by parsing the unwind tables at init time to locate all occurrences of PACIASP/AUTIASP instructions, and replacing them with the shadow call stack push and pop instructions, respectively. This is useful because the overhead of the shadow call stack is difficult to justify on hardware that implements pointer authentication (PAC), and given that the PAC instructions are executed as NOPs on hardware that doesn't, we can just replace them without breaking anything. As PACIASP/AUTIASP are guaranteed to be paired with respect to manipulations of the return address, replacing them 1:1 with shadow call stack pushes and pops is guaranteed to result in the desired behavior. Signed-off-by: Ard Biesheuvel Reviewed-by: Sami Tolvanen Tested-by: Sami Tolvanen Link: https://lore.kernel.org/r/20221027155908.1940624-4-ardb@kernel.org Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 9 ++ arch/arm64/Makefile | 10 +- arch/arm64/include/asm/scs.h | 49 ++++++++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/head.S | 3 + arch/arm64/kernel/irq.c | 2 +- arch/arm64/kernel/module.c | 8 ++ arch/arm64/kernel/patch-scs.c | 257 ++++++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/sdei.c | 2 +- arch/arm64/kernel/setup.c | 4 + 10 files changed, 342 insertions(+), 4 deletions(-) create mode 100644 arch/arm64/kernel/patch-scs.c (limited to 'arch/arm64/Makefile') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7e3a9cf2193d..170832f31eff 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2160,6 +2160,15 @@ config ARCH_NR_GPIO If unsure, leave the default value. +config UNWIND_PATCH_PAC_INTO_SCS + bool "Enable shadow call stack dynamically using code patching" + # needs Clang with https://reviews.llvm.org/D111780 incorporated + depends on CC_IS_CLANG && CLANG_VERSION >= 150000 + depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET + depends on SHADOW_CALL_STACK + select UNWIND_TABLES + select DYNAMIC_SCS + endmenu # "Kernel Features" menu "Boot options" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 7868a176993f..4d272ad1df1f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -77,10 +77,16 @@ branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address= # We enable additional protection for leaf functions as there is some # narrow potential for ROP protection benefits and no substantial # performance impact has been observed. +PACRET-y := pac-ret+leaf + +# Using a shadow call stack in leaf functions is too costly, so avoid PAC there +# as well when we may be patching PAC into SCS +PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret + ifeq ($(CONFIG_ARM64_BTI_KERNEL),y) -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti +branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti else -branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf +branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y) endif # -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the # compiler to generate them and consequently to break the single image contract diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index 8297bccf0784..ff7da1268a52 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -5,6 +5,7 @@ #ifdef __ASSEMBLY__ #include +#include #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 @@ -24,6 +25,54 @@ .endm #endif /* CONFIG_SHADOW_CALL_STACK */ + +#else + +#include +#include + +#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS +static inline bool should_patch_pac_into_scs(void) +{ + u64 reg; + + /* + * We only enable the shadow call stack dynamically if we are running + * on a system that does not implement PAC or BTI. PAC and SCS provide + * roughly the same level of protection, and BTI relies on the PACIASP + * instructions serving as landing pads, preventing us from patching + * those instructions into something else. + */ + reg = read_sysreg_s(SYS_ID_AA64ISAR1_EL1); + if (SYS_FIELD_GET(ID_AA64ISAR1_EL1, APA, reg) | + SYS_FIELD_GET(ID_AA64ISAR1_EL1, API, reg)) + return false; + + reg = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); + if (SYS_FIELD_GET(ID_AA64ISAR2_EL1, APA3, reg)) + return false; + + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) { + reg = read_sysreg_s(SYS_ID_AA64PFR1_EL1); + if (reg & (0xf << ID_AA64PFR1_EL1_BT_SHIFT)) + return false; + } + return true; +} + +static inline void dynamic_scs_init(void) +{ + if (should_patch_pac_into_scs()) { + pr_info("Enabling dynamic shadow call stack\n"); + static_branch_enable(&dynamic_scs_enabled); + } +} +#else +static inline void dynamic_scs_init(void) {} +#endif + +int scs_patch(const u8 eh_frame[], int size); + #endif /* __ASSEMBLY __ */ #endif /* _ASM_SCS_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 2f361a883d8c..8dd925f4a4c6 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -80,6 +80,8 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o +obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o +CFLAGS_patch-scs.o += -mbranch-protection=none # Force dependency (vdso*-wrap.S includes vdso.so through incbin) $(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 2196aad7b55b..952e17bd1c0b 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -462,6 +462,9 @@ SYM_FUNC_START_LOCAL(__primary_switched) bl early_fdt_map // Try mapping the FDT early mov x0, x20 // pass the full boot status bl init_feature_override // Parse cpu feature overrides +#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS + bl scs_patch_vmlinux +#endif mov x0, x20 bl finalise_el2 // Prefer VHE if possible ldp x29, x30, [sp], #16 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 38dbd3828f13..9d8eaab742a4 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -41,7 +41,7 @@ static void init_irq_scs(void) { int cpu; - if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + if (!scs_is_enabled()) return; for_each_possible_cpu(cpu) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 76b41e4ca9fa..fa7b3228944b 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -15,9 +15,11 @@ #include #include #include +#include #include #include #include +#include #include void *module_alloc(unsigned long size) @@ -514,5 +516,11 @@ int module_finalize(const Elf_Ehdr *hdr, if (s) apply_alternatives_module((void *)s->sh_addr, s->sh_size); + if (scs_is_dynamic()) { + s = find_section(hdr, sechdrs, ".init.eh_frame"); + if (s) + scs_patch((void *)s->sh_addr, s->sh_size); + } + return module_init_ftrace_plt(hdr, sechdrs, me); } diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c new file mode 100644 index 000000000000..1b3da02d5b74 --- /dev/null +++ b/arch/arm64/kernel/patch-scs.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 - Google LLC + * Author: Ard Biesheuvel + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +// +// This minimal DWARF CFI parser is partially based on the code in +// arch/arc/kernel/unwind.c, and on the document below: +// https://refspecs.linuxbase.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html +// + +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_negate_ra_state 0x2d +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f +#define DW_CFA_hi_user 0x3f + +extern const u8 __eh_frame_start[], __eh_frame_end[]; + +enum { + PACIASP = 0xd503233f, + AUTIASP = 0xd50323bf, + SCS_PUSH = 0xf800865e, + SCS_POP = 0xf85f8e5e, +}; + +static void __always_inline scs_patch_loc(u64 loc) +{ + u32 insn = le32_to_cpup((void *)loc); + + switch (insn) { + case PACIASP: + *(u32 *)loc = cpu_to_le32(SCS_PUSH); + break; + case AUTIASP: + *(u32 *)loc = cpu_to_le32(SCS_POP); + break; + default: + /* + * While the DW_CFA_negate_ra_state directive is guaranteed to + * appear right after a PACIASP/AUTIASP instruction, it may + * also appear after a DW_CFA_restore_state directive that + * restores a state that is only partially accurate, and is + * followed by DW_CFA_negate_ra_state directive to toggle the + * PAC bit again. So we permit other instructions here, and ignore + * them. + */ + return; + } + dcache_clean_pou(loc, loc + sizeof(u32)); +} + +/* + * Skip one uleb128/sleb128 encoded quantity from the opcode stream. All bytes + * except the last one have bit #7 set. + */ +static int __always_inline skip_xleb128(const u8 **opcode, int size) +{ + u8 c; + + do { + c = *(*opcode)++; + size--; + } while (c & BIT(7)); + + return size; +} + +struct eh_frame { + /* + * The size of this frame if 0 < size < U32_MAX, 0 terminates the list. + */ + u32 size; + + /* + * The first frame is a Common Information Entry (CIE) frame, followed + * by one or more Frame Description Entry (FDE) frames. In the former + * case, this field is 0, otherwise it is the negated offset relative + * to the associated CIE frame. + */ + u32 cie_id_or_pointer; + + union { + struct { // CIE + u8 version; + u8 augmentation_string[]; + }; + + struct { // FDE + s32 initial_loc; + s32 range; + u8 opcodes[]; + }; + }; +}; + +static int noinstr scs_handle_fde_frame(const struct eh_frame *frame, + bool fde_has_augmentation_data, + int code_alignment_factor) +{ + int size = frame->size - offsetof(struct eh_frame, opcodes) + 4; + u64 loc = (u64)offset_to_ptr(&frame->initial_loc); + const u8 *opcode = frame->opcodes; + + if (fde_has_augmentation_data) { + int l; + + // assume single byte uleb128_t + if (WARN_ON(*opcode & BIT(7))) + return -ENOEXEC; + + l = *opcode++; + opcode += l; + size -= l + 1; + } + + /* + * Starting from 'loc', apply the CFA opcodes that advance the location + * pointer, and identify the locations of the PAC instructions. + */ + while (size-- > 0) { + switch (*opcode++) { + case DW_CFA_nop: + case DW_CFA_remember_state: + case DW_CFA_restore_state: + break; + + case DW_CFA_advance_loc1: + loc += *opcode++ * code_alignment_factor; + size--; + break; + + case DW_CFA_advance_loc2: + loc += *opcode++ * code_alignment_factor; + loc += (*opcode++ << 8) * code_alignment_factor; + size -= 2; + break; + + case DW_CFA_def_cfa: + case DW_CFA_offset_extended: + size = skip_xleb128(&opcode, size); + fallthrough; + case DW_CFA_def_cfa_offset: + case DW_CFA_def_cfa_offset_sf: + case DW_CFA_def_cfa_register: + case DW_CFA_same_value: + case DW_CFA_restore_extended: + case 0x80 ... 0xbf: + size = skip_xleb128(&opcode, size); + break; + + case DW_CFA_negate_ra_state: + scs_patch_loc(loc - 4); + break; + + case 0x40 ... 0x7f: + // advance loc + loc += (opcode[-1] & 0x3f) * code_alignment_factor; + break; + + case 0xc0 ... 0xff: + break; + + default: + pr_err("unhandled opcode: %02x in FDE frame %lx\n", opcode[-1], (uintptr_t)frame); + return -ENOEXEC; + } + } + return 0; +} + +int noinstr scs_patch(const u8 eh_frame[], int size) +{ + const u8 *p = eh_frame; + + while (size > 4) { + const struct eh_frame *frame = (const void *)p; + bool fde_has_augmentation_data = true; + int code_alignment_factor = 1; + int ret; + + if (frame->size == 0 || + frame->size == U32_MAX || + frame->size > size) + break; + + if (frame->cie_id_or_pointer == 0) { + const u8 *p = frame->augmentation_string; + + /* a 'z' in the augmentation string must come first */ + fde_has_augmentation_data = *p == 'z'; + + /* + * The code alignment factor is a uleb128 encoded field + * but given that the only sensible values are 1 or 4, + * there is no point in decoding the whole thing. + */ + p += strlen(p) + 1; + if (!WARN_ON(*p & BIT(7))) + code_alignment_factor = *p; + } else { + ret = scs_handle_fde_frame(frame, + fde_has_augmentation_data, + code_alignment_factor); + if (ret) + return ret; + } + + p += sizeof(frame->size) + frame->size; + size -= sizeof(frame->size) + frame->size; + } + return 0; +} + +asmlinkage void __init scs_patch_vmlinux(void) +{ + if (!should_patch_pac_into_scs()) + return; + + WARN_ON(scs_patch(__eh_frame_start, __eh_frame_end - __eh_frame_start)); + icache_inval_all_pou(); + isb(); +} diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index d56e170e1ca7..830be01af32d 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -144,7 +144,7 @@ static int init_sdei_scs(void) int cpu; int err = 0; - if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + if (!scs_is_enabled()) return 0; for_each_possible_cpu(cpu) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index fea3223704b6..12cfe9d0d3fa 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -312,6 +314,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) jump_label_init(); parse_early_param(); + dynamic_scs_init(); + /* * Unmask asynchronous aborts and fiq after bringing up possible * earlycon. (Report possible System Errors once we can report this -- cgit v1.2.3-70-g09d2 From 26299b3f6ba26bfc234b73126d14bdf4dec5275a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2022 17:05:20 +0000 Subject: ftrace: arm64: move from REGS to ARGS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit replaces arm64's support for FTRACE_WITH_REGS with support for FTRACE_WITH_ARGS. This removes some overhead and complexity, and removes some latent issues with inconsistent presentation of struct pt_regs (which can only be reliably saved/restored at exception boundaries). FTRACE_WITH_REGS has been supported on arm64 since commit: 3b23e4991fb66f6d ("arm64: implement ftrace with regs") As noted in the commit message, the major reasons for implementing FTRACE_WITH_REGS were: (1) To make it possible to use the ftrace graph tracer with pointer authentication, where it's necessary to snapshot/manipulate the LR before it is signed by the instrumented function. (2) To make it possible to implement LIVEPATCH in future, where we need to hook function entry before an instrumented function manipulates the stack or argument registers. Practically speaking, we need to preserve the argument/return registers, PC, LR, and SP. Neither of these need a struct pt_regs, and only require the set of registers which are live at function call/return boundaries. Our calling convention is defined by "Procedure Call Standard for the ArmĀ® 64-bit Architecture (AArch64)" (AKA "AAPCS64"), which can currently be found at: https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst Per AAPCS64, all function call argument and return values are held in the following GPRs: * X0 - X7 : parameter / result registers * X8 : indirect result location register * SP : stack pointer (AKA SP) Additionally, ad function call boundaries, the following GPRs hold context/return information: * X29 : frame pointer (AKA FP) * X30 : link register (AKA LR) ... and for ftrace we need to capture the instrumented address: * PC : program counter No other GPRs are relevant, as none of the other arguments hold parameters or return values: * X9 - X17 : temporaries, may be clobbered * X18 : shadow call stack pointer (or temorary) * X19 - X28 : callee saved This patch implements FTRACE_WITH_ARGS for arm64, only saving/restoring the minimal set of registers necessary. This is always sufficient to manipulate control flow (e.g. for live-patching) or to manipulate function arguments and return values. This reduces the necessary stack usage from 336 bytes for pt_regs down to 112 bytes for ftrace_regs + 32 bytes for two frame records, freeing up 188 bytes. This could be reduced further with changes to the unwinder. As there is no longer a need to save different sets of registers for different features, we no longer need distinct `ftrace_caller` and `ftrace_regs_caller` trampolines. This allows the trampoline assembly to be simpler, and simplifies code which previously had to handle the two trampolines. I've tested this with the ftrace selftests, where there are no unexpected failures. Co-developed-by: Florent Revest Signed-off-by: Mark Rutland Signed-off-by: Florent Revest Cc: Catalin Marinas Cc: Masami Hiramatsu Cc: Steven Rostedt Cc: Will Deacon Reviewed-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20221103170520.931305-5-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 18 +++--- arch/arm64/Makefile | 2 +- arch/arm64/include/asm/ftrace.h | 72 ++++++++++++++++++++++-- arch/arm64/kernel/asm-offsets.c | 13 +++++ arch/arm64/kernel/entry-ftrace.S | 117 +++++++++++++++------------------------ arch/arm64/kernel/ftrace.c | 82 +++++++++++++++------------ arch/arm64/kernel/module.c | 3 - 7 files changed, 184 insertions(+), 123 deletions(-) (limited to 'arch/arm64/Makefile') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 505c8a1ccbe0..b6b3305ba701 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -181,8 +181,10 @@ config ARM64 select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ + if $(cc-option,-fpatchable-function-entry=2) select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ - if DYNAMIC_FTRACE_WITH_REGS + if DYNAMIC_FTRACE_WITH_ARGS select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -233,16 +235,16 @@ config ARM64 help ARM 64-bit (AArch64) Linux support. -config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS +config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS def_bool CC_IS_CLANG # https://github.com/ClangBuiltLinux/linux/issues/1507 depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600)) - select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_DYNAMIC_FTRACE_WITH_ARGS -config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS +config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS def_bool CC_IS_GCC depends on $(cc-option,-fpatchable-function-entry=2) - select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_DYNAMIC_FTRACE_WITH_ARGS config 64BIT def_bool y @@ -1816,7 +1818,7 @@ config ARM64_PTR_AUTH_KERNEL # which is only understood by binutils starting with version 2.33.1. depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100) depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE - depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS) help If the compiler supports the -mbranch-protection or -msign-return-address flag (e.g. GCC 7 or later), then this option @@ -1826,7 +1828,7 @@ config ARM64_PTR_AUTH_KERNEL disabled with minimal loss of protection. This feature works with FUNCTION_GRAPH_TRACER option only if - DYNAMIC_FTRACE_WITH_REGS is enabled. + DYNAMIC_FTRACE_WITH_ARGS is enabled. config CC_HAS_BRANCH_PROT_PAC_RET # GCC 9 or later, clang 8 or later @@ -1924,7 +1926,7 @@ config ARM64_BTI_KERNEL depends on !CC_IS_GCC # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 - depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) + depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS) help Build the kernel with Branch Target Identification annotations and enable enforcement of this for kernel code. When this option diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 5e56d26a2239..b1202fa84bab 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -128,7 +128,7 @@ endif CHECKFLAGS += -D__aarch64__ -ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y) +ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y) KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 endif diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 329dbbd4d50b..5664729800ae 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -23,7 +23,7 @@ */ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS #define ARCH_SUPPORTS_FTRACE_OPS 1 #else #define MCOUNT_ADDR ((unsigned long)_mcount) @@ -33,8 +33,7 @@ #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE #define FTRACE_PLT_IDX 0 -#define FTRACE_REGS_PLT_IDX 1 -#define NR_FTRACE_PLTS 2 +#define NR_FTRACE_PLTS 1 /* * Currently, gcc tends to save the link register after the local variables @@ -69,7 +68,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) * Adjust addr to point at the BL in the callsite. * See ftrace_init_nop() for the callsite sequence. */ - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) return addr + AARCH64_INSN_SIZE; /* * addr is the address of the mcount call instruction. @@ -78,10 +77,71 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS struct dyn_ftrace; struct ftrace_ops; -struct ftrace_regs; + +#define arch_ftrace_get_regs(regs) NULL + +struct ftrace_regs { + /* x0 - x8 */ + unsigned long regs[9]; + unsigned long __unused; + + unsigned long fp; + unsigned long lr; + + unsigned long sp; + unsigned long pc; +}; + +static __always_inline unsigned long +ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) +{ + return fregs->pc; +} + +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long pc) +{ + fregs->pc = pc; +} + +static __always_inline unsigned long +ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs) +{ + return fregs->sp; +} + +static __always_inline unsigned long +ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n) +{ + if (n < 8) + return fregs->regs[n]; + return 0; +} + +static __always_inline unsigned long +ftrace_regs_get_return_value(const struct ftrace_regs *fregs) +{ + return fregs->regs[0]; +} + +static __always_inline void +ftrace_regs_set_return_value(struct ftrace_regs *fregs, + unsigned long ret) +{ + fregs->regs[0] = ret; +} + +static __always_inline void +ftrace_override_function_with_return(struct ftrace_regs *fregs) +{ + fregs->pc = fregs->lr; +} + +int ftrace_regs_query_register_offset(const char *name); int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 1197e7679882..2234624536d9 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -82,6 +82,19 @@ int main(void) DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); BLANK(); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS + DEFINE(FREGS_X0, offsetof(struct ftrace_regs, regs[0])); + DEFINE(FREGS_X2, offsetof(struct ftrace_regs, regs[2])); + DEFINE(FREGS_X4, offsetof(struct ftrace_regs, regs[4])); + DEFINE(FREGS_X6, offsetof(struct ftrace_regs, regs[6])); + DEFINE(FREGS_X8, offsetof(struct ftrace_regs, regs[8])); + DEFINE(FREGS_FP, offsetof(struct ftrace_regs, fp)); + DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr)); + DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp)); + DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc)); + DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs)); + BLANK(); +#endif #ifdef CONFIG_COMPAT DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0)); DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0)); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 795344ab4ec4..4d3050549aa6 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -13,83 +13,58 @@ #include #include -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS /* * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before * the regular function prologue. For an enabled callsite, ftrace_init_nop() and * ftrace_make_call() have patched those NOPs to: * * MOV X9, LR - * BL - * - * ... where is either ftrace_caller or ftrace_regs_caller. + * BL ftrace_caller * * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to * clobber. * - * We save the callsite's context into a pt_regs before invoking any ftrace - * callbacks. So that we can get a sensible backtrace, we create a stack record - * for the callsite and the ftrace entry assembly. This is not sufficient for - * reliable stacktrace: until we create the callsite stack record, its caller - * is missing from the LR and existing chain of frame records. + * We save the callsite's context into a struct ftrace_regs before invoking any + * ftrace callbacks. So that we can get a sensible backtrace, we create frame + * records for the callsite and the ftrace entry assembly. This is not + * sufficient for reliable stacktrace: until we create the callsite stack + * record, its caller is missing from the LR and existing chain of frame + * records. */ - .macro ftrace_regs_entry, allregs=0 - /* Make room for pt_regs, plus a callee frame */ - sub sp, sp, #(PT_REGS_SIZE + 16) - - /* Save function arguments (and x9 for simplicity) */ - stp x0, x1, [sp, #S_X0] - stp x2, x3, [sp, #S_X2] - stp x4, x5, [sp, #S_X4] - stp x6, x7, [sp, #S_X6] - stp x8, x9, [sp, #S_X8] - - /* Optionally save the callee-saved registers, always save the FP */ - .if \allregs == 1 - stp x10, x11, [sp, #S_X10] - stp x12, x13, [sp, #S_X12] - stp x14, x15, [sp, #S_X14] - stp x16, x17, [sp, #S_X16] - stp x18, x19, [sp, #S_X18] - stp x20, x21, [sp, #S_X20] - stp x22, x23, [sp, #S_X22] - stp x24, x25, [sp, #S_X24] - stp x26, x27, [sp, #S_X26] - stp x28, x29, [sp, #S_X28] - .else - str x29, [sp, #S_FP] - .endif - - /* Save the callsite's SP and LR */ - add x10, sp, #(PT_REGS_SIZE + 16) - stp x9, x10, [sp, #S_LR] +SYM_CODE_START(ftrace_caller) + bti c - /* Save the PC after the ftrace callsite */ - str x30, [sp, #S_PC] + /* Save original SP */ + mov x10, sp - /* Create a frame record for the callsite above pt_regs */ - stp x29, x9, [sp, #PT_REGS_SIZE] - add x29, sp, #PT_REGS_SIZE + /* Make room for ftrace regs, plus two frame records */ + sub sp, sp, #(FREGS_SIZE + 32) - /* Create our frame record within pt_regs. */ - stp x29, x30, [sp, #S_STACKFRAME] - add x29, sp, #S_STACKFRAME - .endm + /* Save function arguments */ + stp x0, x1, [sp, #FREGS_X0] + stp x2, x3, [sp, #FREGS_X2] + stp x4, x5, [sp, #FREGS_X4] + stp x6, x7, [sp, #FREGS_X6] + str x8, [sp, #FREGS_X8] -SYM_CODE_START(ftrace_regs_caller) - bti c - ftrace_regs_entry 1 - b ftrace_common -SYM_CODE_END(ftrace_regs_caller) + /* Save the callsite's FP, LR, SP */ + str x29, [sp, #FREGS_FP] + str x9, [sp, #FREGS_LR] + str x10, [sp, #FREGS_SP] -SYM_CODE_START(ftrace_caller) - bti c - ftrace_regs_entry 0 - b ftrace_common -SYM_CODE_END(ftrace_caller) + /* Save the PC after the ftrace callsite */ + str x30, [sp, #FREGS_PC] + + /* Create a frame record for the callsite above the ftrace regs */ + stp x29, x9, [sp, #FREGS_SIZE + 16] + add x29, sp, #FREGS_SIZE + 16 + + /* Create our frame record above the ftrace regs */ + stp x29, x30, [sp, #FREGS_SIZE] + add x29, sp, #FREGS_SIZE -SYM_CODE_START(ftrace_common) sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) mov x1, x9 // parent_ip (callsite's LR) ldr_l x2, function_trace_op // op @@ -104,24 +79,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) * to restore x0-x8, x29, and x30. */ /* Restore function arguments */ - ldp x0, x1, [sp] - ldp x2, x3, [sp, #S_X2] - ldp x4, x5, [sp, #S_X4] - ldp x6, x7, [sp, #S_X6] - ldr x8, [sp, #S_X8] + ldp x0, x1, [sp, #FREGS_X0] + ldp x2, x3, [sp, #FREGS_X2] + ldp x4, x5, [sp, #FREGS_X4] + ldp x6, x7, [sp, #FREGS_X6] + ldr x8, [sp, #FREGS_X8] /* Restore the callsite's FP, LR, PC */ - ldr x29, [sp, #S_FP] - ldr x30, [sp, #S_LR] - ldr x9, [sp, #S_PC] + ldr x29, [sp, #FREGS_FP] + ldr x30, [sp, #FREGS_LR] + ldr x9, [sp, #FREGS_PC] /* Restore the callsite's SP */ - add sp, sp, #PT_REGS_SIZE + 16 + add sp, sp, #FREGS_SIZE + 32 ret x9 -SYM_CODE_END(ftrace_common) +SYM_CODE_END(ftrace_caller) -#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ /* * Gcc with -pg will put the following code in the beginning of each function: @@ -293,7 +268,7 @@ SYM_FUNC_START(ftrace_graph_caller) mcount_exit SYM_FUNC_END(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ SYM_TYPED_FUNC_START(ftrace_stub) ret diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 8745175f4a75..5cf990d052ba 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -17,6 +17,49 @@ #include #include +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS +struct fregs_offset { + const char *name; + int offset; +}; + +#define FREGS_OFFSET(n, field) \ +{ \ + .name = n, \ + .offset = offsetof(struct ftrace_regs, field), \ +} + +static const struct fregs_offset fregs_offsets[] = { + FREGS_OFFSET("x0", regs[0]), + FREGS_OFFSET("x1", regs[1]), + FREGS_OFFSET("x2", regs[2]), + FREGS_OFFSET("x3", regs[3]), + FREGS_OFFSET("x4", regs[4]), + FREGS_OFFSET("x5", regs[5]), + FREGS_OFFSET("x6", regs[6]), + FREGS_OFFSET("x7", regs[7]), + FREGS_OFFSET("x8", regs[8]), + + FREGS_OFFSET("x29", fp), + FREGS_OFFSET("x30", lr), + FREGS_OFFSET("lr", lr), + + FREGS_OFFSET("sp", sp), + FREGS_OFFSET("pc", pc), +}; + +int ftrace_regs_query_register_offset(const char *name) +{ + for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) { + const struct fregs_offset *roff = &fregs_offsets[i]; + if (!strcmp(roff->name, name)) + return roff->offset; + } + + return -EINVAL; +} +#endif + #ifdef CONFIG_DYNAMIC_FTRACE /* * Replace a single instruction, which may be a branch or NOP. @@ -70,9 +113,6 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) if (addr == FTRACE_ADDR) return &plt[FTRACE_PLT_IDX]; - if (addr == FTRACE_REGS_ADDR && - IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) - return &plt[FTRACE_REGS_PLT_IDX]; #endif return NULL; } @@ -154,25 +194,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return ftrace_modify_code(pc, old, new, true); } -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS -int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - unsigned long addr) -{ - unsigned long pc = rec->ip; - u32 old, new; - - if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) - return -EINVAL; - if (!ftrace_find_callable_addr(rec, NULL, &addr)) - return -EINVAL; - - old = aarch64_insn_gen_branch_imm(pc, old_addr, - AARCH64_INSN_BRANCH_LINK); - new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); - - return ftrace_modify_code(pc, old, new, true); -} - +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS /* * The compiler has inserted two NOPs before the regular function prologue. * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live, @@ -228,7 +250,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, * * Note: 'mod' is only set at module load time. */ - if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { return aarch64_insn_patch_text_nosync((void *)pc, new); } @@ -279,19 +301,11 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, #ifdef CONFIG_DYNAMIC_FTRACE -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { - /* - * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL - * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs - * in which we can safely modify the LR. - */ - struct pt_regs *regs = arch_ftrace_get_regs(fregs); - unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs); - - prepare_ftrace_return(ip, parent, frame_pointer(regs)); + prepare_ftrace_return(ip, &fregs->lr, fregs->fp); } #else /* @@ -323,6 +337,6 @@ int ftrace_disable_ftrace_graph_caller(void) { return ftrace_modify_graph_caller(false); } -#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 76b41e4ca9fa..acd0d883e9ca 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -497,9 +497,6 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr, __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) - __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR); - mod->arch.ftrace_trampolines = plts; #endif return 0; -- cgit v1.2.3-70-g09d2