summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 08:49:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 08:49:37 -0800
commit9b9e211360044c12d7738973c944d6f300134881 (patch)
treef994a379135e70397df3c653e6578ad7e5d295fe /arch/arm64/include
parenta7ac314061375c7805e0d3a26aad6eb0c41100df (diff)
parent945409a6ef442cfe5f2f14e5626d4306d53100f0 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - KCSAN enabled for arm64. - Additional kselftests to exercise the syscall ABI w.r.t. SVE/FPSIMD. - Some more SVE clean-ups and refactoring in preparation for SME support (scalable matrix extensions). - BTI clean-ups (SYM_FUNC macros etc.) - arm64 atomics clean-up and codegen improvements. - HWCAPs for FEAT_AFP (alternate floating point behaviour) and FEAT_RPRESS (increased precision of reciprocal estimate and reciprocal square root estimate). - Use SHA3 instructions to speed-up XOR. - arm64 unwind code refactoring/unification. - Avoid DC (data cache maintenance) instructions when DCZID_EL0.DZP == 1 (potentially set by a hypervisor; user-space already does this). - Perf updates for arm64: support for CI-700, HiSilicon PCIe PMU, Marvell CN10K LLC-TAD PMU, miscellaneous clean-ups. - Other fixes and clean-ups; highlights: fix the handling of erratum 1418040, correct the calculation of the nomap region boundaries, introduce io_stop_wc() mapped to the new DGH instruction (data gathering hint). * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (81 commits) arm64: Use correct method to calculate nomap region boundaries arm64: Drop outdated links in comments arm64: perf: Don't register user access sysctl handler multiple times drivers: perf: marvell_cn10k: fix an IS_ERR() vs NULL check perf/smmuv3: Fix unused variable warning when CONFIG_OF=n arm64: errata: Fix exec handling in erratum 1418040 workaround arm64: Unhash early pointer print plus improve comment asm-generic: introduce io_stop_wc() and add implementation for ARM64 arm64: Ensure that the 'bti' macro is defined where linkage.h is included arm64: remove __dma_*_area() aliases docs/arm64: delete a space from tagged-address-abi arm64: Enable KCSAN kselftest/arm64: Add pidbench for floating point syscall cases arm64/fp: Add comments documenting the usage of state restore functions kselftest/arm64: Add a test program to exercise the syscall ABI kselftest/arm64: Allow signal tests to trigger from a function kselftest/arm64: Parameterise ptrace vector length information arm64/sve: Minor clarification of ABI documentation arm64/sve: Generalise vector length configuration prctl() for SME arm64/sve: Make sysctl interface for SVE reusable by SME ...
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/assembler.h10
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h86
-rw-r--r--arch/arm64/include/asm/atomic_lse.h270
-rw-r--r--arch/arm64/include/asm/barrier.h9
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/fpsimd.h6
-rw-r--r--arch/arm64/include/asm/hwcap.h2
-rw-r--r--arch/arm64/include/asm/linkage.h35
-rw-r--r--arch/arm64/include/asm/mte-kasan.h8
-rw-r--r--arch/arm64/include/asm/stacktrace.h10
-rw-r--r--arch/arm64/include/asm/sysreg.h16
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h2
12 files changed, 193 insertions, 262 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 136d13f3d6e9..e8bd0af0141c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -791,6 +791,16 @@ alternative_endif
.endm
/*
+ * Branch Target Identifier (BTI)
+ */
+ .macro bti, targets
+ .equ .L__bti_targets_c, 34
+ .equ .L__bti_targets_j, 36
+ .equ .L__bti_targets_jc,38
+ hint #.L__bti_targets_\targets
+ .endm
+
+/*
* This macro emits a program property note section identifying
* architecture features which require special handling, mainly for
* use in assembly files included in the VDSO.
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 13869b76b58c..fe0db8d416fb 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
\
asm volatile("// atomic_" #op "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ldxr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" stxr %w1, %w0, %2\n" \
-" cbnz %w1, 1b\n") \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " stxr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b\n") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
\
asm volatile("// atomic_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ld" #acq "xr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" st" #rel "xr %w1, %w0, %2\n" \
-" cbnz %w1, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " st" #rel "xr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
\
asm volatile("// atomic_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %3\n" \
-"1: ld" #acq "xr %w0, %3\n" \
-" " #asm_op " %w1, %w0, %w4\n" \
-" st" #rel "xr %w2, %w1, %3\n" \
-" cbnz %w2, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %w0, %3\n" \
+ " " #asm_op " %w1, %w0, %w4\n" \
+ " st" #rel "xr %w2, %w1, %3\n" \
+ " cbnz %w2, 1b\n" \
+ " " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
\
asm volatile("// atomic64_" #op "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ldxr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" stxr %w1, %0, %2\n" \
-" cbnz %w1, 1b") \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " stxr %w1, %0, %2\n" \
+ " cbnz %w1, 1b") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
\
asm volatile("// atomic64_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ld" #acq "xr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" st" #rel "xr %w1, %0, %2\n" \
-" cbnz %w1, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " st" #rel "xr %w1, %0, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \
-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
\
asm volatile("// atomic64_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
-" prfm pstl1strm, %3\n" \
-"1: ld" #acq "xr %0, %3\n" \
-" " #asm_op " %1, %0, %4\n" \
-" st" #rel "xr %w2, %1, %3\n" \
-" cbnz %w2, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %0, %3\n" \
+ " " #asm_op " %1, %0, %4\n" \
+ " st" #rel "xr %w2, %1, %3\n" \
+ " cbnz %w2, 1b\n" \
+ " " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
asm volatile("// atomic64_dec_if_positive\n"
__LL_SC_FALLBACK(
-" prfm pstl1strm, %2\n"
-"1: ldxr %0, %2\n"
-" subs %0, %0, #1\n"
-" b.lt 2f\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b\n"
-" dmb ish\n"
-"2:")
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " subs %0, %0, #1\n"
+ " b.lt 2f\n"
+ " stlxr %w1, %0, %2\n"
+ " cbnz %w1, 1b\n"
+ " dmb ish\n"
+ "2:")
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
: "cc", "memory");
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index da3280f639cd..d955ade5df7c 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -11,13 +11,13 @@
#define __ASM_ATOMIC_LSE_H
#define ATOMIC_OP(op, asm_op) \
-static inline void __lse_atomic_##op(int i, atomic_t *v) \
+static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op " %w[i], %[v]\n" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v)); \
+ " " #asm_op " %w[i], %[v]\n" \
+ : [v] "+Q" (v->counter) \
+ : [i] "r" (i)); \
}
ATOMIC_OP(andnot, stclr)
@@ -25,19 +25,27 @@ ATOMIC_OP(or, stset)
ATOMIC_OP(xor, steor)
ATOMIC_OP(add, stadd)
+static inline void __lse_atomic_sub(int i, atomic_t *v)
+{
+ __lse_atomic_add(-i, v);
+}
+
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
+ int old; \
+ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op #mb " %w[i], %w[i], %[v]" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
+ " " #asm_op #mb " %w[i], %w[old], %[v]" \
+ : [v] "+Q" (v->counter), \
+ [old] "=r" (old) \
+ : [i] "r" (i) \
: cl); \
\
- return i; \
+ return old; \
}
#define ATOMIC_FETCH_OPS(op, asm_op) \
@@ -54,51 +62,46 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_FETCH_OPS
-#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
+#define ATOMIC_FETCH_OP_SUB(name) \
+static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
+{ \
+ return __lse_atomic_fetch_add##name(-i, v); \
+}
+
+ATOMIC_FETCH_OP_SUB(_relaxed)
+ATOMIC_FETCH_OP_SUB(_acquire)
+ATOMIC_FETCH_OP_SUB(_release)
+ATOMIC_FETCH_OP_SUB( )
+
+#undef ATOMIC_FETCH_OP_SUB
+
+#define ATOMIC_OP_ADD_SUB_RETURN(name) \
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
{ \
- u32 tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
- " add %w[i], %w[i], %w[tmp]" \
- : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
+ return __lse_atomic_fetch_add##name(i, v) + i; \
+} \
\
- return i; \
+static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
+{ \
+ return __lse_atomic_fetch_sub(i, v) - i; \
}
-ATOMIC_OP_ADD_RETURN(_relaxed, )
-ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
-ATOMIC_OP_ADD_RETURN(_release, l, "memory")
-ATOMIC_OP_ADD_RETURN( , al, "memory")
+ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC_OP_ADD_SUB_RETURN(_release)
+ATOMIC_OP_ADD_SUB_RETURN( )
-#undef ATOMIC_OP_ADD_RETURN
+#undef ATOMIC_OP_ADD_SUB_RETURN
static inline void __lse_atomic_and(int i, atomic_t *v)
{
- asm volatile(
- __LSE_PREAMBLE
- " mvn %w[i], %w[i]\n"
- " stclr %w[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
+ return __lse_atomic_andnot(~i, v);
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " mvn %w[i], %w[i]\n" \
- " ldclr" #mb " %w[i], %w[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
+ return __lse_atomic_fetch_andnot##name(~i, v); \
}
ATOMIC_FETCH_OP_AND(_relaxed, )
@@ -108,69 +111,14 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
-static inline void __lse_atomic_sub(int i, atomic_t *v)
-{
- asm volatile(
- __LSE_PREAMBLE
- " neg %w[i], %w[i]\n"
- " stadd %w[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
-}
-
-#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
-static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
-{ \
- u32 tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %w[i], %w[i]\n" \
- " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
- " add %w[i], %w[i], %w[tmp]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC_OP_SUB_RETURN(_relaxed, )
-ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
-ATOMIC_OP_SUB_RETURN(_release, l, "memory")
-ATOMIC_OP_SUB_RETURN( , al, "memory")
-
-#undef ATOMIC_OP_SUB_RETURN
-
-#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
-static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %w[i], %w[i]\n" \
- " ldadd" #mb " %w[i], %w[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC_FETCH_OP_SUB(_relaxed, )
-ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
-ATOMIC_FETCH_OP_SUB(_release, l, "memory")
-ATOMIC_FETCH_OP_SUB( , al, "memory")
-
-#undef ATOMIC_FETCH_OP_SUB
-
#define ATOMIC64_OP(op, asm_op) \
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op " %[i], %[v]\n" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v)); \
+ " " #asm_op " %[i], %[v]\n" \
+ : [v] "+Q" (v->counter) \
+ : [i] "r" (i)); \
}
ATOMIC64_OP(andnot, stclr)
@@ -178,19 +126,27 @@ ATOMIC64_OP(or, stset)
ATOMIC64_OP(xor, steor)
ATOMIC64_OP(add, stadd)
+static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+{
+ __lse_atomic64_add(-i, v);
+}
+
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \
+ s64 old; \
+ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op #mb " %[i], %[i], %[v]" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
+ " " #asm_op #mb " %[i], %[old], %[v]" \
+ : [v] "+Q" (v->counter), \
+ [old] "=r" (old) \
+ : [i] "r" (i) \
: cl); \
\
- return i; \
+ return old; \
}
#define ATOMIC64_FETCH_OPS(op, asm_op) \
@@ -207,51 +163,46 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
-#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
+#define ATOMIC64_FETCH_OP_SUB(name) \
+static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
+{ \
+ return __lse_atomic64_fetch_add##name(-i, v); \
+}
+
+ATOMIC64_FETCH_OP_SUB(_relaxed)
+ATOMIC64_FETCH_OP_SUB(_acquire)
+ATOMIC64_FETCH_OP_SUB(_release)
+ATOMIC64_FETCH_OP_SUB( )
+
+#undef ATOMIC64_FETCH_OP_SUB
+
+#define ATOMIC64_OP_ADD_SUB_RETURN(name) \
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
{ \
- unsigned long tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
- " add %[i], %[i], %x[tmp]" \
- : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
+ return __lse_atomic64_fetch_add##name(i, v) + i; \
+} \
\
- return i; \
+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
+{ \
+ return __lse_atomic64_fetch_sub##name(i, v) - i; \
}
-ATOMIC64_OP_ADD_RETURN(_relaxed, )
-ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
-ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
-ATOMIC64_OP_ADD_RETURN( , al, "memory")
+ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC64_OP_ADD_SUB_RETURN(_release)
+ATOMIC64_OP_ADD_SUB_RETURN( )
-#undef ATOMIC64_OP_ADD_RETURN
+#undef ATOMIC64_OP_ADD_SUB_RETURN
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{
- asm volatile(
- __LSE_PREAMBLE
- " mvn %[i], %[i]\n"
- " stclr %[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
+ return __lse_atomic64_andnot(~i, v);
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " mvn %[i], %[i]\n" \
- " ldclr" #mb " %[i], %[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
+ return __lse_atomic64_fetch_andnot##name(~i, v); \
}
ATOMIC64_FETCH_OP_AND(_relaxed, )
@@ -261,61 +212,6 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
-static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
-{
- asm volatile(
- __LSE_PREAMBLE
- " neg %[i], %[i]\n"
- " stadd %[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
-}
-
-#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
-{ \
- unsigned long tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
- " add %[i], %[i], %x[tmp]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_OP_SUB_RETURN(_relaxed, )
-ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
-ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
-ATOMIC64_OP_SUB_RETURN( , al, "memory")
-
-#undef ATOMIC64_OP_SUB_RETURN
-
-#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
-static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], %[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_FETCH_OP_SUB(_relaxed, )
-ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
-ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
-ATOMIC64_FETCH_OP_SUB( , al, "memory")
-
-#undef ATOMIC64_FETCH_OP_SUB
-
static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long tmp;
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 1c5a00598458..62217be36217 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -26,6 +26,14 @@
#define __tsb_csync() asm volatile("hint #18" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
+/*
+ * Data Gathering Hint:
+ * This instruction prevents merging memory accesses with Normal-NC or
+ * Device-GRE attributes before the hint instruction with any memory accesses
+ * appearing after the hint instruction.
+ */
+#define dgh() asm volatile("hint #6" : : : "memory")
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
@@ -46,6 +54,7 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+#define io_stop_wc() dgh()
#define tsb_csync() \
do { \
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 0f6d16faa540..a58e366f0b07 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -51,6 +51,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64dfr1;
u64 reg_id_aa64isar0;
u64 reg_id_aa64isar1;
+ u64 reg_id_aa64isar2;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
u64 reg_id_aa64mmfr2;
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index dbb4b30a5648..cb24385e3632 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -51,8 +51,8 @@ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
-/* Maximum VL that SVE VL-agnostic software can transparently support */
-#define SVE_VL_ARCH_MAX 0x100
+/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
+#define VL_ARCH_MAX 0x100
/* Offset of FFR in the SVE register dump */
static inline size_t sve_ffr_offset(int vl)
@@ -122,7 +122,7 @@ extern void fpsimd_sync_to_sve(struct task_struct *task);
extern void sve_sync_to_fpsimd(struct task_struct *task);
extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
-extern int sve_set_vector_length(struct task_struct *task,
+extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags);
extern int sve_set_current_vl(unsigned long arg);
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index b100e0055eab..f68fbb207473 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -106,6 +106,8 @@
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
+#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
+#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 9906541a6861..b77e9b3f5371 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -1,48 +1,43 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
+#ifdef __ASSEMBLY__
+#include <asm/assembler.h>
+#endif
+
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
-#if defined(CONFIG_ARM64_BTI_KERNEL) && defined(__aarch64__)
-
-/*
- * Since current versions of gas reject the BTI instruction unless we
- * set the architecture version to v8.5 we use the hint instruction
- * instead.
- */
-#define BTI_C hint 34 ;
-
/*
- * When using in-kernel BTI we need to ensure that PCS-conformant assembly
- * functions have suitable annotations. Override SYM_FUNC_START to insert
- * a BTI landing pad at the start of everything.
+ * When using in-kernel BTI we need to ensure that PCS-conformant
+ * assembly functions have suitable annotations. Override
+ * SYM_FUNC_START to insert a BTI landing pad at the start of
+ * everything, the override is done unconditionally so we're more
+ * likely to notice any drift from the overridden definitions.
*/
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
- BTI_C
+ bti c ;
#define SYM_FUNC_START_NOALIGN(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
- BTI_C
+ bti c ;
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \
- BTI_C
+ bti c ;
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
- BTI_C
+ bti c ;
#define SYM_FUNC_START_WEAK(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \
- BTI_C
+ bti c ;
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
- BTI_C
-
-#endif
+ bti c ;
/*
* Annotate a function as position independent, i.e., safe to be called before
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 478b9bcf69ad..e4704a403237 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -84,10 +84,12 @@ static inline void __dc_gzva(u64 p)
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
bool init)
{
- u64 curr, mask, dczid_bs, end1, end2, end3;
+ u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
/* Read DC G(Z)VA block size from the system register. */
- dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
+ dczid = read_cpuid(DCZID_EL0);
+ dczid_bs = 4ul << (dczid & 0xf);
+ dczid_dzp = (dczid >> 4) & 1;
curr = (u64)__tag_set(addr, tag);
mask = dczid_bs - 1;
@@ -106,7 +108,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
*/
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
do { \
- if (size >= 2 * dczid_bs) { \
+ if (!dczid_dzp && size >= 2 * dczid_bs) {\
do { \
curr = stg_post(curr); \
} while (curr < end1); \
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 6564a01cc085..e77cdef9ca29 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -47,6 +47,10 @@ struct stack_info {
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
+ *
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
*/
struct stackframe {
unsigned long fp;
@@ -59,9 +63,6 @@ struct stackframe {
#endif
};
-extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
-extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
- bool (*fn)(void *, unsigned long), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
@@ -146,7 +147,4 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
return false;
}
-void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc);
-
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 16b3f1a1d468..4704f5893458 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -182,6 +182,7 @@
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
+#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
@@ -771,6 +772,20 @@
#define ID_AA64ISAR1_GPI_NI 0x0
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+/* id_aa64isar2 */
+#define ID_AA64ISAR2_RPRES_SHIFT 4
+#define ID_AA64ISAR2_WFXT_SHIFT 0
+
+#define ID_AA64ISAR2_RPRES_8BIT 0x0
+#define ID_AA64ISAR2_RPRES_12BIT 0x1
+/*
+ * Value 0x1 has been removed from the architecture, and is
+ * reserved, but has not yet been removed from the ARM ARM
+ * as of ARM DDI 0487G.b.
+ */
+#define ID_AA64ISAR2_WFXT_NI 0x0
+#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
+
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
@@ -889,6 +904,7 @@
#endif
/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 7b23b16f21ce..f03731847d9d 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -76,5 +76,7 @@
#define HWCAP2_BTI (1 << 17)
#define HWCAP2_MTE (1 << 18)
#define HWCAP2_ECV (1 << 19)
+#define HWCAP2_AFP (1 << 20)
+#define HWCAP2_RPRES (1 << 21)
#endif /* _UAPI__ASM_HWCAP_H */