diff options
-rw-r--r-- | Documentation/arm64/booting.rst | 2 | ||||
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 25 | ||||
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/lse.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sigcontext.h | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/alternative.c | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/entry-common.c | 16 | ||||
-rw-r--r-- | arch/arm64/kernel/paravirt.c | 4 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 4 |
14 files changed, 32 insertions, 59 deletions
diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 5a764fabfea8..96fe10ec6c24 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -349,7 +349,7 @@ Before jumping into the kernel, the following conditions must be met: - HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01. - For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64) + For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64): - If EL3 is present: diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 957ec4d02428..c6d15dd6cae3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1737,7 +1737,6 @@ config ARM64_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS bool "Atomic instructions" - depends on JUMP_LABEL default y help As part of the Large System Extensions, ARMv8.1 introduces new diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 89038067ef34..376a980f2bad 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -34,11 +34,6 @@ wx\n .req w\n .endr - .macro save_and_disable_daif, flags - mrs \flags, daif - msr daifset, #0xf - .endm - .macro disable_daif msr daifset, #0xf .endm @@ -47,15 +42,6 @@ msr daifclr, #0xf .endm - .macro restore_daif, flags:req - msr daif, \flags - .endm - - /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */ - .macro enable_da - msr daifclr, #(8 | 4) - .endm - /* * Save/restore interrupts. */ @@ -620,17 +606,6 @@ alternative_endif .endm /* - * Perform the reverse of offset_ttbr1. - * bic is used as it can cover the immediate value and, in future, won't need - * to be nop'ed out when dealing with 52-bit kernel VAs. - */ - .macro restore_ttbr1, ttbr -#ifdef CONFIG_ARM64_VA_BITS_52 - bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -#endif - .endm - -/* * Arrange a physical address in a TTBR register, taking care of 52-bit * addresses. * diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 32d14f481f0c..fcd14197756f 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,11 +18,6 @@ * with 4K (section size = 2M) but not with 16K (section size = 32M) or * 64K (section size = 512M). */ -#ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_KERNEL_USES_PMD_MAPS 1 -#else -#define ARM64_KERNEL_USES_PMD_MAPS 0 -#endif /* * The idmap and swapper page tables need some space reserved in the kernel @@ -34,7 +29,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #else #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) @@ -96,7 +91,7 @@ #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) /* Initial memory map size */ -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT @@ -112,7 +107,7 @@ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_KERNEL_USES_PMD_MAPS +#ifdef CONFIG_ARM64_4K_PAGES #define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) #else diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index c503db8e73b0..f99d74826a7e 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -10,7 +10,6 @@ #include <linux/compiler_types.h> #include <linux/export.h> -#include <linux/jump_label.h> #include <linux/stringify.h> #include <asm/alternative.h> #include <asm/alternative-macros.h> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index d3f8b5df0c1f..72dbd6400549 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/cpufeature.h> +#include <asm/daifflags.h> #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> @@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; + unsigned long daif; /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); @@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); __cpu_install_idmap(idmap); + + /* + * We really don't want to take *any* exceptions while TTBR1 is + * in the process of being replaced so mask everything. + */ + daif = local_daif_save(); replace_phys(ttbr1); + local_daif_restore(daif); + cpu_uninstall_idmap(); } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8ef2a03ee553..0dd3cd38f54d 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; -extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 0dcb1f7f485d..d51b32a69309 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -315,13 +315,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 4aaf31e3bf16..9525041e4a14 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -62,6 +62,10 @@ struct sigcontext { * context. Such structures must be placed after the rt_sigframe on the stack * and be 16-byte aligned. The last structure must be a dummy one with the * magic and size set to 0. + * + * Note that the values allocated for use as magic should be chosen to + * be meaningful in ASCII to aid manual parsing, ZA doesn't follow this + * convention due to oversight but it should be observed for future additions. */ struct _aarch64_ctx { __u32 magic; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 91263d09ea65..d32d4ed5519b 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region, } } -void apply_alternatives_vdso(void) +static void __init apply_alternatives_vdso(void) { struct alt_region region; const struct elf64_hdr *hdr; @@ -220,7 +220,7 @@ void apply_alternatives_vdso(void) __apply_alternatives(®ion, false, &all_capabilities[0]); } -static const struct alt_region kernel_alternatives = { +static const struct alt_region kernel_alternatives __initconst = { .begin = (struct alt_instr *)__alt_instructions, .end = (struct alt_instr *)__alt_instructions_end, }; @@ -229,7 +229,7 @@ static const struct alt_region kernel_alternatives = { * We might be patching the stop_machine state machine, so implement a * really simple polling protocol here. */ -static int __apply_alternatives_multi_stop(void *unused) +static int __init __apply_alternatives_multi_stop(void *unused) { /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 27369fa1c032..9a553b185e63 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -30,7 +30,7 @@ /* * Handle IRQ/context state management when entering from kernel mode. * Before this function is called it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. * * This is intended to match the logic in irqentry_enter(), handling the kernel * mode transitions only. @@ -63,7 +63,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting to kernel mode. * After this function returns it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. * * This is intended to match the logic in irqentry_exit(), handling the kernel * mode transitions only, and with preemption handled elsewhere. @@ -97,7 +97,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when entering from user mode. * Before this function is called it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. */ static __always_inline void __enter_from_user_mode(void) { @@ -116,7 +116,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting to user mode. * After this function returns it is not safe to call regular kernel code, - * intrumentable code, or any code which may trigger an exception. + * instrumentable code, or any code which may trigger an exception. */ static __always_inline void __exit_to_user_mode(void) { @@ -152,7 +152,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) /* * Handle IRQ/context state management when entering an NMI from user/kernel * mode. Before this function is called it is not safe to call regular kernel - * code, intrumentable code, or any code which may trigger an exception. + * code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_enter_nmi(struct pt_regs *regs) { @@ -170,7 +170,7 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting an NMI from user/kernel * mode. After this function returns it is not safe to call regular kernel - * code, intrumentable code, or any code which may trigger an exception. + * code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_exit_nmi(struct pt_regs *regs) { @@ -192,7 +192,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) /* * Handle IRQ/context state management when entering a debug exception from * kernel mode. Before this function is called it is not safe to call regular - * kernel code, intrumentable code, or any code which may trigger an exception. + * kernel code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) { @@ -207,7 +207,7 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) /* * Handle IRQ/context state management when exiting a debug exception from * kernel mode. After this function returns it is not safe to call regular - * kernel code, intrumentable code, or any code which may trigger an exception. + * kernel code, instrumentable code, or any code which may trigger an exception. */ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 57c7c211f8c7..aa718d6a9274 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -141,10 +141,6 @@ static bool __init has_pv_steal_clock(void) { struct arm_smccc_res res; - /* To detect the presence of PV time support we require SMCCC 1.1+ */ - if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) - return false; - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES, &res); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5a19950e7289..2368e4daa23d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1196,7 +1196,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - if (!ARM64_KERNEL_USES_PMD_MAPS) + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return vmemmap_populate_basepages(start, end, node, altmap); do { diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b9ecbbae1e1a..066fa60b93d2 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume) * called by anything else. It can only be executed from a TTBR0 mapping. */ SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) - save_and_disable_daif flags=x2 - __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb - restore_daif x2 - ret SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection |