summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/acpi.h12
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h15
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/esr.h33
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/mte.h4
-rw-r--r--arch/arm64/include/asm/ptrace.h35
-rw-r--r--arch/arm64/include/asm/smp.h13
-rw-r--r--arch/arm64/include/asm/sysreg.h4
13 files changed, 61 insertions, 75 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 6792a1f83f2a..a407f9cd549e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
return acpi_cpu_get_madt_gicc(cpu)->uid;
}
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (acpi_cpu_get_madt_gicc(cpu) &&
+ uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
int apei_claim_sea(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 5f172611654b..9e96f024b2f1 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -175,21 +175,6 @@ static inline bool gic_prio_masking_enabled(void)
static inline void gic_pmr_mask_irqs(void)
{
- BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF |
- GIC_PRIO_PSR_I_SET));
- BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
- /*
- * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
- * and non-secure PMR accesses are not subject to the shifts that
- * are applied to IRQ priorities
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
- /*
- * Same situation as above, but now we make sure that we can mask
- * regular interrupts.
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS |
- GIC_PRIO_PSR_I_SET));
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 934c658ee947..f5794d50f51d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/jump_label.h>
-#include <linux/smp.h>
+#include <linux/percpu.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index c27404fa4418..a4697a0b6835 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -6,7 +6,7 @@
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
-#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 7529c0263933..a6e5b07b64fd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -59,7 +59,7 @@ cpucap_is_possible(const unsigned int cap)
case ARM64_WORKAROUND_REPEAT_TLBI:
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
- return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS);
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
}
return true;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8b904a757bd3..558434267271 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -588,14 +588,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL1_AARCH32;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL0_AARCH32;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7b32b99023a2..1cb0704c6163 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,9 +86,12 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
+#define ARM_CPU_PART_CORTEX_X925 0xD85
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -162,9 +165,12 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7abf09df7033..3f482500f71f 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -121,6 +121,14 @@
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+/* Status codes for individual page table levels */
+#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n)
+#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n)
+
+#define ESR_ELx_FSC_FAULT_nL (0x2C)
+#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \
+ ESR_ELx_FSC_FAULT) + (n))
+
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
@@ -388,20 +396,33 @@ static inline bool esr_is_data_abort(unsigned long esr)
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
- /* Translation fault, level -1 */
- if ((esr & ESR_ELx_FSC) == 0b101011)
- return true;
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_FAULT_L(3)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(2)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(1)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(0)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(-1));
}
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_PERM_L(3)) ||
+ (esr == ESR_ELx_FSC_PERM_L(2)) ||
+ (esr == ESR_ELx_FSC_PERM_L(1)) ||
+ (esr == ESR_ELx_FSC_PERM_L(0));
}
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ACCESS_L(3)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(2)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(1)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(0));
}
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c768d16b81a4..bd19f4c758b7 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -72,11 +72,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
unsigned long tcr = read_sysreg(tcr_el1);
- if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
+ if ((tcr & TCR_T0SZ_MASK) == t0sz)
return;
tcr &= ~TCR_T0SZ_MASK;
- tcr |= t0sz << TCR_T0SZ_OFFSET;
+ tcr |= t0sz;
write_sysreg(tcr, tcr_el1);
isb();
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 91fbd5c8a391..0f84518632b4 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -182,7 +182,7 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
mte_check_tfsr_el1();
@@ -190,7 +190,7 @@ static inline void mte_check_tfsr_entry(void)
static inline void mte_check_tfsr_exit(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
/*
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 47ec58031f11..0abe975d68a8 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,35 +21,12 @@
#define INIT_PSTATE_EL2 \
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
-/*
- * PMR values used to mask/unmask interrupts.
- *
- * GIC priority masking works as follows: if an IRQ's priority is a higher value
- * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
- * means masking more IRQs (or at least that the same IRQs remain masked).
- *
- * To mask interrupts, we clear the most significant bit of PMR.
- *
- * Some code sections either automatically switch back to PSR.I or explicitly
- * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
- * in the priority mask, it indicates that PSR.I should be set and
- * interrupt disabling temporarily does not rely on IRQ priorities.
- */
-#define GIC_PRIO_IRQON 0xe0
-#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
-#define __GIC_PRIO_IRQOFF_NS 0xa0
-#define GIC_PRIO_PSR_I_SET (1 << 4)
-
-#define GIC_PRIO_IRQOFF \
- ({ \
- extern struct static_key_false gic_nonsecure_priorities;\
- u8 __prio = __GIC_PRIO_IRQOFF; \
- \
- if (static_branch_unlikely(&gic_nonsecure_priorities)) \
- __prio = __GIC_PRIO_IRQOFF_NS; \
- \
- __prio; \
- })
+#include <linux/irqchip/arm-gic-v3-prio.h>
+
+#define GIC_PRIO_IRQON GICV3_PRIO_UNMASKED
+#define GIC_PRIO_IRQOFF GICV3_PRIO_IRQ
+
+#define GIC_PRIO_PSR_I_SET GICV3_PRIO_PSR_I_SET
/* Additional SPSR bits not exposed in the UABI */
#define PSR_MODE_THREAD_BIT (1 << 0)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index efb13112b408..2510eec026f7 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -25,22 +25,11 @@
#ifndef __ASSEMBLY__
-#include <asm/percpu.h>
-
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
-
-/*
- * We don't use this_cpu_read(cpu_number) as that has implicit writes to
- * preempt_count, and associated (compiler) barriers, that we'd like to avoid
- * the expense of. If we're preemptible, the value can be stale at use anyway.
- * And we can't use this_cpu_ptr() either, as that winds up recursing back
- * here under CONFIG_DEBUG_PREEMPT=y.
- */
-#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
+#define raw_smp_processor_id() (current_thread_info()->cpu)
/*
* Logical CPU mapping.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index af3b206fa423..1b6e436dbb55 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -872,10 +872,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64pfr0 */
-#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
-
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT