summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-24 10:59:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-24 10:59:17 -0700
commit97d8894b6f4c44762fd48f5d29e73358d6181dbb (patch)
treec0d8d1f342d746c6b71395e292270c8c29173375 /arch/riscv
parent7108fff8848bc3bd958789f4db6beff94bdae521 (diff)
parentb3f835cd7339919561866252a11831ead72e7073 (diff)
Merge tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: - Support using Zkr to seed KASLR - Support IPI-triggered CPU backtracing - Support for generic CPU vulnerabilities reporting to userspace - A few cleanups for missing licenses - The size limit on the XIP kernel has been removed - Support for tracing userspace stacks - Support for the Svvptc extension - Various cleanups and fixes throughout the tree * tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (47 commits) crash: Fix riscv64 crash memory reserve dead loop perf/riscv-sbi: Add platform specific firmware event handling tools: Optimize ring buffer for riscv tools: Add riscv barrier implementation RISC-V: Don't have MAX_PHYSMEM_BITS exceed phys_addr_t ACPI: NUMA: initialize all values of acpi_early_node_map to NUMA_NO_NODE riscv: Enable bitops instrumentation riscv: Omit optimized string routines when using KASAN ACPI: RISCV: Make acpi_numa_get_nid() to be static riscv: Randomize lower bits of stack address selftests: riscv: Allow mmap test to compile on 32-bit riscv: Make riscv_isa_vendor_ext_andes array static riscv: Use LIST_HEAD() to simplify code riscv: defconfig: Disable RZ/Five peripheral support RISC-V: Implement kgdb_roundup_cpus() to enable future NMI Roundup riscv: avoid Imbalance in RAS riscv: cacheinfo: Add back init_cache_level() function riscv: Remove unused _TIF_WORK_MASK drivers/perf: riscv: Remove redundant macro check riscv: define ILLEGAL_POINTER_VALUE for 64bit ...
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig8
-rw-r--r--arch/riscv/configs/defconfig12
-rw-r--r--arch/riscv/errata/sifive/errata_cip_453.S8
-rw-r--r--arch/riscv/include/asm/acpi.h2
-rw-r--r--arch/riscv/include/asm/bitops.h43
-rw-r--r--arch/riscv/include/asm/cacheflush.h18
-rw-r--r--arch/riscv/include/asm/exec.h8
-rw-r--r--arch/riscv/include/asm/fence.h1
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/page.h29
-rw-r--r--arch/riscv/include/asm/pgtable.h28
-rw-r--r--arch/riscv/include/asm/sbi.h1
-rw-r--r--arch/riscv/include/asm/set_memory.h2
-rw-r--r--arch/riscv/include/asm/sparsemem.h2
-rw-r--r--arch/riscv/include/asm/string.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h11
-rw-r--r--arch/riscv/include/asm/vmalloc.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h30
-rw-r--r--arch/riscv/kernel/acpi_numa.c2
-rw-r--r--arch/riscv/kernel/asm-offsets.c7
-rw-r--r--arch/riscv/kernel/cacheinfo.c5
-rw-r--r--arch/riscv/kernel/cpufeature.c1
-rw-r--r--arch/riscv/kernel/elf_kexec.c6
-rw-r--r--arch/riscv/kernel/entry.S91
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/perf_callchain.c46
-rw-r--r--arch/riscv/kernel/pi/Makefile4
-rw-r--r--arch/riscv/kernel/pi/archrandom_early.c30
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c10
-rw-r--r--arch/riscv/kernel/pi/fdt_early.c167
-rw-r--r--arch/riscv/kernel/pi/pi.h20
-rw-r--r--arch/riscv/kernel/process.c9
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c3
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/stacktrace.c43
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/kernel/vendor_extensions/andes.c2
-rw-r--r--arch/riscv/kernel/vmcore_info.c7
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/riscv/lib/memset.S2
-rw-r--r--arch/riscv/lib/strcmp.S2
-rw-r--r--arch/riscv/lib/strlen.S1
-rw-r--r--arch/riscv/lib/strncmp.S2
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/riscv/mm/pgtable.c13
-rw-r--r--arch/riscv/purgatory/Makefile2
48 files changed, 616 insertions, 155 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 91fe3940ac71..b6d515db869b 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -70,6 +70,7 @@ config RISCV
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_SYM_ANNOTATIONS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -94,6 +95,7 @@ config RISCV
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
@@ -204,6 +206,7 @@ config RISCV
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
+ select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
config CLANG_SUPPORTS_DYNAMIC_FTRACE
@@ -323,6 +326,11 @@ config GENERIC_HWEIGHT
config FIX_EARLYCON_MEM
def_bool MMU
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if 32BIT
+ default 0xdead000000000000 if 64BIT
+
config PGTABLE_LEVELS
int
default 5 if 64BIT
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 572eac58f33b..2341393cfac1 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
-CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_MOTORCOMM_PHY=y
-CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
@@ -150,7 +148,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
@@ -160,11 +157,9 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_MV64XXX=m
-CONFIG_I2C_RIIC=y
CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=m
CONFIG_SPI_PL022=m
-CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
@@ -177,7 +172,6 @@ CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
-CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AXP20X_I2C=y
@@ -206,11 +200,11 @@ CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_RCAR is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_USB_CDNS_SUPPORT=m
@@ -222,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y
@@ -240,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
-CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
CONFIG_MMC_SUNXI=y
@@ -258,7 +250,6 @@ CONFIG_CLK_SOPHGO_SG2042_PLL=y
CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y
CONFIG_CLK_SOPHGO_SG2042_RPGATE=y
CONFIG_SUN8I_DE2_CCU=m
-CONFIG_RENESAS_OSTM=y
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
@@ -266,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y
CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y
CONFIG_PHY_SUN4I_USB=m
-CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
CONFIG_PHY_STARFIVE_JH7110_PCIE=m
CONFIG_PHY_STARFIVE_JH7110_USB=m
diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S
index f1b9623fe1de..b1f7b636fe9a 100644
--- a/arch/riscv/errata/sifive/errata_cip_453.S
+++ b/arch/riscv/errata/sifive/errata_cip_453.S
@@ -21,7 +21,7 @@
1:
.endm
-ENTRY(sifive_cip_453_page_fault_trp)
+SYM_FUNC_START(sifive_cip_453_page_fault_trp)
ADD_SIGN_EXT a0, t0, t1
#ifdef CONFIG_MMU
la t0, do_page_fault
@@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp)
la t0, do_trap_unknown
#endif
jr t0
-END(sifive_cip_453_page_fault_trp)
+SYM_FUNC_END(sifive_cip_453_page_fault_trp)
-ENTRY(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_START(sifive_cip_453_insn_fault_trp)
ADD_SIGN_EXT a0, t0, t1
la t0, do_trap_insn_fault
jr t0
-END(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_END(sifive_cip_453_insn_fault_trp)
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index e0a1f84404f3..6e13695120bc 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA
-int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 71af9ecfcfcb..fae152ea0508 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -222,44 +222,44 @@ legacy:
#define __NOT(x) (~(x))
/**
- * test_and_set_bit - Set a bit and return its old value
+ * arch_test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation may be reordered on other architectures than x86.
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
/**
- * test_and_clear_bit - Clear a bit and return its old value
+ * arch_test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation can be reordered on other architectures other than x86.
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
/**
- * test_and_change_bit - Change a bit and return its old value
+ * arch_test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
/**
- * set_bit - Atomically set a bit in memory
+ * arch_set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
/**
- * clear_bit - Clears a bit in memory
+ * arch_clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
@@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
/**
- * change_bit - Toggle a bit in memory
+ * arch_change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
@@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(
+static inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(
+static inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -345,13 +345,13 @@ static inline void clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void __clear_bit_unlock(
+static inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
- clear_bit_unlock(nr, addr);
+ arch_clear_bit_unlock(nr, addr);
}
-static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
@@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
#undef __NOT
#undef __AMO
+#include <asm-generic/bitops/instrumented-atomic.h>
+#include <asm-generic/bitops/instrumented-lock.h>
+
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index ce79c558a4c8..8de73f91bfa3 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -46,7 +46,23 @@ do { \
} while (0)
#ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern char _end[];
+#define flush_cache_vmap flush_cache_vmap
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ int i;
+
+ /*
+ * We don't care if concurrently a cpu resets this value since
+ * the only place this can happen is in handle_exception() where
+ * an sfence.vma is emitted.
+ */
+ for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
+ new_vmalloc[i] = -1ULL;
+ }
+}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif
diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h
new file mode 100644
index 000000000000..07d9942682e0
--- /dev/null
+++ b/arch/riscv/include/asm/exec.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6bcd80325dfc..182db7930edc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5a0bd27fd11a..46d9de54179e 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -92,6 +92,7 @@
#define RISCV_ISA_EXT_ZCF 83
#define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85
+#define RISCV_ISA_EXT_SVVPTC 86
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 7e9a84a005ed..7b038f3b7cb0 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -14,6 +14,11 @@
#define INVALID_CONTEXT UINT_MAX
+#ifdef CONFIG_SMP
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 7ede2111c591..32d308a3355f 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -112,11 +112,13 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
- unsigned long va_kernel_pa_offset;
- unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
+ unsigned long va_kernel_xip_text_pa_offset;
+ unsigned long va_kernel_xip_data_pa_offset;
uintptr_t xiprom;
uintptr_t xiprom_sz;
+#else
+ unsigned long va_kernel_pa_offset;
#endif
};
@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base;
#else
void *linear_mapping_pa_to_va(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ (_y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
+#endif
+
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#ifndef CONFIG_DEBUG_VIRTUAL
@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x);
#else
phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- (_y - kernel_map.va_kernel_xip_pa_offset) : \
- (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
+ (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
+ (_y - kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
+#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 089f3c9f56a3..e79f15293492 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -107,13 +107,6 @@
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_32M
-#define XIP_OFFSET_MASK (SZ_32M - 1)
-#else
-#define XIP_OFFSET 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -142,11 +135,14 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
+ extern char _sdata[], _start[], _end[]; \
+ uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_sdata - (uintptr_t)&_start; \
+ uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && \
- __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
- __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
- __a; \
+ (__a >= __rom_start_data && __a < __rom_end_data) ? \
+ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
})
#else
#define XIP_FIXUP(addr) (addr)
@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
+
+svvptc:;
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 7bd3746028c9..98f631b051db 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+#define RISCV_PLAT_FW_EVENT 0xFFFF
/** General pmu event codes specified in SBI PMU extension */
enum sbi_pmu_hw_generic_events_t {
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index ec11001c3fe0..ab92fc84e1fc 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
index 63acaecc3374..2f901a410586 100644
--- a/arch/riscv/include/asm/sparsemem.h
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_64BIT
#define MAX_PHYSMEM_BITS 56
#else
-#define MAX_PHYSMEM_BITS 34
+#define MAX_PHYSMEM_BITS 32
#endif /* CONFIG_64BIT */
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index a96b1fea24fe..5ba77f60bf0b 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t);
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);
+#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRCMP
extern asmlinkage int strcmp(const char *cs, const char *ct);
@@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+#endif
/* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index fca5c6be2b81..ebe52f96da34 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -61,6 +61,13 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+#ifdef CONFIG_64BIT
+ /*
+ * Used in handle_exception() to save a0, a1 and a2 before knowing if we
+ * can access the kernel stack.
+ */
+ unsigned long a0, a1, a2;
+#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
-#define _TIF_WORK_MASK \
- (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
-
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 51f6dfe19745..fefe94dc98e2 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index b65bf6306f69..f3d56299bc22 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -9,18 +9,36 @@
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
+ /* Fix-up address in Flash into address in RAM early during boot before
+ * MMU is up. Because generated code "thinks" data is in Flash, but it
+ * is actually in RAM (actually data is also in Flash, but Flash is
+ * read-only, thus we need to use the data residing in RAM).
+ *
+ * The start of data in Flash is _sdata and the start of data in RAM is
+ * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
+ * reg += CONFIG_PHYS_RAM_BASE - _start
+ */
+ li t0, CONFIG_PHYS_RAM_BASE
add \reg, \reg, t0
+ la t0, _sdata
+ sub \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
+ /* In linker script, at the transition from read-only section to
+ * writable section, the VMA is increased while LMA remains the same.
+ * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
+ * changed)
+ *
+ * Consequently, early during boot before MMU is up, the generated code
+ * reads the "writable" section at wrong addresses, because VMA is used
+ * by compiler to generate code, but the data is located in Flash using
+ * LMA.
+ */
+ la t0, _sdata
+ sub \reg, \reg, t0
la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
add \reg, \reg, t0
.endm
-
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c
index ff95aeebee3e..130769e3a99c 100644
--- a/arch/riscv/kernel/acpi_numa.c
+++ b/arch/riscv/kernel/acpi_numa.c
@@ -30,7 +30,7 @@
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
-int __init acpi_numa_get_nid(unsigned int cpu)
+static int __init acpi_numa_get_nid(unsigned int cpu)
{
return acpi_early_node_map[cpu];
}
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index b09ca5f944f7..e94180ba432f 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -36,6 +36,8 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+
+ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
@@ -43,6 +45,11 @@ void asm_offsets(void)
#ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif
+#ifdef CONFIG_64BIT
+ OFFSET(TASK_TI_A0, task_struct, thread_info.a0);
+ OFFSET(TASK_TI_A1, task_struct, thread_info.a1);
+ OFFSET(TASK_TI_A2, task_struct, thread_info.a2);
+#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index d6c108c50cba..b320b1d9aa01 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type;
}
+int init_cache_level(unsigned int cpu)
+{
+ return init_of_cache_level(cpu);
+}
+
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b427188b28fc..3a8eeaa9310c 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 11c0d2e0becf..3c37661801f9 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
ENCODE_CJTYPE_IMM(val - addr);
break;
+ case R_RISCV_ADD16:
+ *(u16 *)loc += val;
+ break;
+ case R_RISCV_SUB16:
+ *(u16 *)loc -= val;
+ break;
case R_RISCV_ADD32:
*(u32 *)loc += val;
break;
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index ac2e908d4418..c200d329d4bd 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -19,6 +19,79 @@
.section .irqentry.text, "ax"
+.macro new_vmalloc_check
+ REG_S a0, TASK_TI_A0(tp)
+ csrr a0, CSR_CAUSE
+ /* Exclude IRQs */
+ blt a0, zero, _new_vmalloc_restore_context_a0
+
+ REG_S a1, TASK_TI_A1(tp)
+ /* Only check new_vmalloc if we are in page/protection fault */
+ li a1, EXC_LOAD_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_STORE_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_INST_PAGE_FAULT
+ bne a0, a1, _new_vmalloc_restore_context_a1
+
+_new_vmalloc_kernel_address:
+ /* Is it a kernel address? */
+ csrr a0, CSR_TVAL
+ bge a0, zero, _new_vmalloc_restore_context_a1
+
+ /* Check if a new vmalloc mapping appeared that could explain the trap */
+ REG_S a2, TASK_TI_A2(tp)
+ /*
+ * Computes:
+ * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ * a1 = BIT_MASK(cpu)
+ */
+ REG_L a2, TASK_TI_CPU(tp)
+ /*
+ * Compute the new_vmalloc element position:
+ * (cpu / 64) * 8 = (cpu >> 6) << 3
+ */
+ srli a1, a2, 6
+ slli a1, a1, 3
+ la a0, new_vmalloc
+ add a0, a0, a1
+ /*
+ * Compute the bit position in the new_vmalloc element:
+ * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
+ * = cpu - ((cpu >> 6) << 3) << 3
+ */
+ slli a1, a1, 3
+ sub a1, a2, a1
+ /* Compute the "get mask": 1 << bit_pos */
+ li a2, 1
+ sll a1, a2, a1
+
+ /* Check the value of new_vmalloc for this cpu */
+ REG_L a2, 0(a0)
+ and a2, a2, a1
+ beq a2, zero, _new_vmalloc_restore_context
+
+ /* Atomically reset the current cpu bit in new_vmalloc */
+ amoxor.d a0, a1, (a0)
+
+ /* Only emit a sfence.vma if the uarch caches invalid entries */
+ ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
+
+ REG_L a0, TASK_TI_A0(tp)
+ REG_L a1, TASK_TI_A1(tp)
+ REG_L a2, TASK_TI_A2(tp)
+ csrw CSR_SCRATCH, x0
+ sret
+
+_new_vmalloc_restore_context:
+ REG_L a2, TASK_TI_A2(tp)
+_new_vmalloc_restore_context_a1:
+ REG_L a1, TASK_TI_A1(tp)
+_new_vmalloc_restore_context_a0:
+ REG_L a0, TASK_TI_A0(tp)
+.endm
+
+
SYM_CODE_START(handle_exception)
/*
* If coming from userspace, preserve the user thread pointer and load
@@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception)
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
+
+#ifdef CONFIG_64BIT
+ /*
+ * The RISC-V kernel does not eagerly emit a sfence.vma after each
+ * new vmalloc mapping, which may result in exceptions:
+ * - if the uarch caches invalid entries, the new mapping would not be
+ * observed by the page table walker and an invalidation is needed.
+ * - if the uarch does not cache invalid entries, a reordered access
+ * could "miss" the new mapping and traps: in that case, we only need
+ * to retry the access, no sfence.vma is required.
+ */
+ new_vmalloc_check
+#endif
+
REG_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK
@@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork)
jalr s0
1:
move a0, sp /* pt_regs */
- la ra, ret_from_exception
- tail syscall_exit_to_user_mode
+ call syscall_exit_to_user_mode
+ j ret_from_exception
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 906f9a3a5d65..1cd461f3d872 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
struct hlist_head *relocation_hashtable;
- struct list_head used_buckets_list;
unsigned int hashtable_bits;
+ LIST_HEAD(used_buckets_list);
hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);
@@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
if (!relocation_hashtable)
return -ENOMEM;
- INIT_LIST_HEAD(&used_buckets_list);
-
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 3348a61de7d9..c7468af77c66 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -6,37 +6,9 @@
#include <asm/stacktrace.h>
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
- unsigned long fp, unsigned long reg_ra)
+static bool fill_callchain(void *entry, unsigned long pc)
{
- struct stackframe buftail;
- unsigned long ra = 0;
- unsigned long __user *user_frame_tail =
- (unsigned long __user *)(fp - sizeof(struct stackframe));
-
- /* Check accessibility of one struct frame_tail beyond */
- if (!access_ok(user_frame_tail, sizeof(buftail)))
- return 0;
- if (__copy_from_user_inatomic(&buftail, user_frame_tail,
- sizeof(buftail)))
- return 0;
-
- if (reg_ra != 0)
- ra = reg_ra;
- else
- ra = buftail.ra;
-
- fp = buftail.fp;
- if (ra != 0)
- perf_callchain_store(entry, ra);
- else
- return 0;
-
- return fp;
+ return perf_callchain_store(entry, pc) == 0;
}
/*
@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- unsigned long fp = 0;
-
- fp = regs->s0;
- perf_callchain_store(entry, regs->epc);
-
- fp = user_backtrace(entry, fp, regs->ra);
- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
- fp = user_backtrace(entry, fp, 0);
-}
-
-static bool fill_callchain(void *entry, unsigned long pc)
-{
- return perf_callchain_store(entry, pc) == 0;
+ arch_stack_walk_user(fill_callchain, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 50bc5ef7dd2f..d5bf1bc7de62 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
+ -include $(srctree)/include/linux/hidden.h \
-D__DISABLE_EXPORTS -ffreestanding \
-fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
@@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
+CFLAGS_fdt_early.o += -D__NO_FORTIFY
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
@@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/riscv/kernel/pi/archrandom_early.c b/arch/riscv/kernel/pi/archrandom_early.c
new file mode 100644
index 000000000000..3f05d3cf3b7b
--- /dev/null
+++ b/arch/riscv/kernel/pi/archrandom_early.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/csr.h>
+#include <linux/processor.h>
+
+#include "pi.h"
+
+/*
+ * To avoid rewriting code include asm/archrandom.h and create macros
+ * for the functions that won't be included.
+ */
+#undef riscv_has_extension_unlikely
+#define riscv_has_extension_likely(...) false
+#undef pr_err_once
+#define pr_err_once(...)
+
+#include <asm/archrandom.h>
+
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa)
+{
+ unsigned long seed = 0;
+
+ if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr"))
+ return 0;
+
+ if (!csr_seed_long(&seed))
+ return 0;
+
+ return seed;
+}
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index f6d4dedffb84..fbcdc9e4e143 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -6,15 +6,9 @@
#include <asm/pgtable.h>
#include <asm/setup.h>
-static char early_cmdline[COMMAND_LINE_SIZE];
+#include "pi.h"
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
-bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+static char early_cmdline[COMMAND_LINE_SIZE];
static char *get_early_cmdline(uintptr_t dtb_pa)
{
diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c
index 899610e042ab..9bdee2fafe47 100644
--- a/arch/riscv/kernel/pi/fdt_early.c
+++ b/arch/riscv/kernel/pi/fdt_early.c
@@ -2,13 +2,9 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/libfdt.h>
+#include <linux/ctype.h>
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 get_kaslr_seed(uintptr_t dtb_pa);
+#include "pi.h"
u64 get_kaslr_seed(uintptr_t dtb_pa)
{
@@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa)
*prop = 0;
return ret;
}
+
+/**
+ * fdt_device_is_available - check if a device is available for use
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the node whose property to find
+ *
+ * Returns true if the status property is absent or set to "okay" or "ok",
+ * false otherwise
+ */
+static bool fdt_device_is_available(const void *fdt, int node)
+{
+ const char *status;
+ int statlen;
+
+ status = fdt_getprop(fdt, node, "status", &statlen);
+ if (!status)
+ return true;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return true;
+ }
+
+ return false;
+}
+
+/* Copy of fdt_nodename_eq_ */
+static int fdt_node_name_eq(const void *fdt, int offset,
+ const char *s)
+{
+ int olen;
+ int len = strlen(s);
+ const char *p = fdt_get_name(fdt, offset, &olen);
+
+ if (!p || olen < len)
+ /* short match */
+ return 0;
+
+ if (memcmp(p, s, len) != 0)
+ return 0;
+
+ if (p[len] == '\0')
+ return 1;
+ else if (!memchr(s, '@', len) && (p[len] == '@'))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * isa_string_contains - check if isa string contains an extension
+ *
+ * @isa_str: isa string to search
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the extension is in the given isa string,
+ * false otherwise
+ */
+static bool isa_string_contains(const char *isa_str, const char *ext_name)
+{
+ size_t i, single_end, len = strlen(ext_name);
+ char ext_end;
+
+ /* Error must contain rv32/64 */
+ if (strlen(isa_str) < 4)
+ return false;
+
+ if (len == 1) {
+ single_end = strcspn(isa_str, "sSxXzZ");
+ /* Search for single chars between rv32/64 and multi-letter extensions */
+ for (i = 4; i < single_end; i++) {
+ if (tolower(isa_str[i]) == ext_name[0])
+ return true;
+ }
+ return false;
+ }
+
+ /* Skip to start of multi-letter extensions */
+ isa_str = strpbrk(isa_str, "sSxXzZ");
+ while (isa_str) {
+ if (strncasecmp(isa_str, ext_name, len) == 0) {
+ ext_end = isa_str[len];
+ /* Check if matches the whole extension. */
+ if (ext_end == '\0' || ext_end == '_')
+ return true;
+ }
+ /* Multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ */
+ isa_str = strchr(isa_str, '_');
+ if (isa_str)
+ isa_str++;
+ }
+
+ return false;
+}
+
+/**
+ * early_cpu_isa_ext_available - check if cpu node has an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the cpu node
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the cpu node has the extension,
+ * false otherwise
+ */
+static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len);
+ if (prop && fdt_stringlist_contains(prop, len, ext_name))
+ return true;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa", &len);
+ if (prop && isa_string_contains(prop, ext_name))
+ return true;
+
+ return false;
+}
+
+/**
+ * fdt_early_match_extension_isa - check if all cpu nodes have an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the all available the cpu nodes have the extension,
+ * false otherwise
+ */
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name)
+{
+ int node, parent;
+ bool ret = false;
+
+ parent = fdt_path_offset(fdt, "/cpus");
+ if (parent < 0)
+ return false;
+
+ fdt_for_each_subnode(node, fdt, parent) {
+ if (!fdt_node_name_eq(fdt, node, "cpu"))
+ continue;
+
+ if (!fdt_device_is_available(fdt, node))
+ continue;
+
+ if (!early_cpu_isa_ext_available(fdt, node, ext_name))
+ return false;
+
+ ret = true;
+ }
+
+ return ret;
+}
diff --git a/arch/riscv/kernel/pi/pi.h b/arch/riscv/kernel/pi/pi.h
new file mode 100644
index 000000000000..21141d84fea6
--- /dev/null
+++ b/arch/riscv/kernel/pi/pi.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RISCV_PI_H_
+#define _RISCV_PI_H_
+
+#include <linux/types.h>
+
+/*
+ * The following functions are exported (but prefixed). Declare them here so
+ * that LLVM does not complain it lacks the 'static' keyword (which, if
+ * added, makes LLVM complain because the function is unused).
+ */
+
+u64 get_kaslr_seed(uintptr_t dtb_pa);
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa);
+bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
+
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name);
+
+#endif /* _RISCV_PI_H_ */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e4bc61c4e58a..e3142d8a6e28 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/tick.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/personality.h>
#include <asm/unistd.h>
#include <asm/processor.h>
@@ -26,6 +27,7 @@
#include <asm/cpuidle.h>
#include <asm/vector.h>
#include <asm/cpufeature.h>
+#include <asm/exec.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
@@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs)
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_u32_below(PAGE_SIZE);
+ return sp & ~0xf;
+}
+
#ifdef CONFIG_COMPAT
static bool compat_mode_supported __read_mostly;
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index a72879b4249a..5ab1c7e1a6ed 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -12,9 +12,6 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 8e6eb64459af..c180a647a30e 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kexec.h>
+#include <linux/kgdb.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
@@ -21,6 +22,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
+#include <linux/nmi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -33,6 +35,8 @@ enum ipi_message_type {
IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK,
IPI_TIMER,
+ IPI_CPU_BACKTRACE,
+ IPI_KGDB_ROUNDUP,
IPI_MAX
};
@@ -113,6 +117,7 @@ void arch_irq_work_raise(void)
static irqreturn_t handle_IPI(int irq, void *data)
{
+ unsigned int cpu = smp_processor_id();
int ipi = irq - ipi_virq_base;
switch (ipi) {
@@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data)
ipi_stop();
break;
case IPI_CPU_CRASH_STOP:
- ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs());
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
break;
case IPI_IRQ_WORK:
irq_work_run();
@@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data)
tick_receive_broadcast();
break;
#endif
+ case IPI_CPU_BACKTRACE:
+ nmi_cpu_backtrace(get_irq_regs());
+ break;
+ case IPI_KGDB_ROUNDUP:
+ kgdb_nmicallback(cpu, get_irq_regs());
+ break;
default:
- pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi);
+ pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi);
break;
}
@@ -203,6 +214,8 @@ static const char * const ipi_names[] = {
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
@@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu)
send_ipi_single(cpu, IPI_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+
+static void riscv_backtrace_ipi(cpumask_t *mask)
+{
+ send_ipi_mask(mask, IPI_CPU_BACKTRACE);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+ int this_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ /* No need to roundup ourselves */
+ if (cpu == this_cpu)
+ continue;
+
+ send_ipi_single(cpu, IPI_KGDB_ROUNDUP);
+ }
+}
+#endif
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index c6d5de22463f..153a2db4c5fa 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{
walk_stackframe(task, regs, consume_entry, cookie);
}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f7ef8ad9b550..960feb1526ca 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \
+LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \
--build-id=sha1 --hash-style=both --eh-frame-hdr
# strip rule for the .so file
diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c
index ec688c88456a..51f302b6d503 100644
--- a/arch/riscv/kernel/vendor_extensions/andes.c
+++ b/arch/riscv/kernel/vendor_extensions/andes.c
@@ -8,7 +8,7 @@
#include <linux/types.h>
/* All Andes vendor extensions supported in Linux */
-const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
__RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU),
};
diff --git a/arch/riscv/kernel/vmcore_info.c b/arch/riscv/kernel/vmcore_info.c
index 6d7a22522d63..d5e448aa90e7 100644
--- a/arch/riscv/kernel/vmcore_info.c
+++ b/arch/riscv/kernel/vmcore_info.c
@@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void)
#endif
#endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+#ifdef CONFIG_XIP_KERNEL
+ /* TODO: Communicate with crash-utility developers on the information to
+ * export. The XIP case is more complicated, because the virtual-physical
+ * address offset depends on whether the address is in ROM or in RAM.
+ */
+#else
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
kernel_map.va_kernel_pa_offset);
+#endif
}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 8c3daa1b0531..a7611789bad5 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
+#include <asm/set_memory.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
@@ -65,10 +66,10 @@ SECTIONS
* From this point, stuff is considered writable and will be copied to RAM
*/
__data_loc = ALIGN(PAGE_SIZE); /* location in file */
- . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */
+ . = ALIGN(SECTION_ALIGN); /* location in memory */
#undef LOAD_OFFSET
-#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK))
+#define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc)
_sdata = .; /* Start of data section */
_data = .;
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 2b369f51b0a5..8eec6b69a875 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -3,9 +3,11 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += memmove.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
lib-y += strcmp.o
lib-y += strlen.o
lib-y += strncmp.o
+endif
lib-y += csum.o
ifeq ($(CONFIG_MMU), y)
lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 35f358e70bdb..da23b8347e2d 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -111,3 +111,5 @@ SYM_FUNC_START(__memset)
ret
SYM_FUNC_END(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(__pi_memset, __memset)
+SYM_FUNC_ALIAS(__pi___memset, __memset)
diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S
index 687b2bea5c43..57a5c0066231 100644
--- a/arch/riscv/lib/strcmp.S
+++ b/arch/riscv/lib/strcmp.S
@@ -120,3 +120,5 @@ strcmp_zbb:
.option pop
#endif
SYM_FUNC_END(strcmp)
+SYM_FUNC_ALIAS(__pi_strcmp, strcmp)
+EXPORT_SYMBOL(strcmp)
diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S
index 8ae3064e45ff..962983b73251 100644
--- a/arch/riscv/lib/strlen.S
+++ b/arch/riscv/lib/strlen.S
@@ -131,3 +131,4 @@ strlen_zbb:
#endif
SYM_FUNC_END(strlen)
SYM_FUNC_ALIAS(__pi_strlen, strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S
index aba5b3148621..7b2d0ff9ed6c 100644
--- a/arch/riscv/lib/strncmp.S
+++ b/arch/riscv/lib/strncmp.S
@@ -136,3 +136,5 @@ strncmp_zbb:
.option pop
#endif
SYM_FUNC_END(strncmp)
+SYM_FUNC_ALIAS(__pi_strncmp, strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1785782c2e55..0e8c20adcd98 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,6 +37,8 @@
#include "../kernel/head.h"
+u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+
struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
@@ -917,7 +919,7 @@ static void __init relocate_kernel(void)
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
- uintptr_t va, end_va;
+ uintptr_t va, start_va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
@@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
+ start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
+ for (va = start_va; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
- kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
+ kernel_map.phys_addr + (va - start_va),
PMD_SIZE, PAGE_KERNEL);
}
#else
@@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void)
#ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
+extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa);
static int __init print_nokaslr(char *p)
{
@@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
- u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
+ u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos;
+ if (kaslr_seed == 0)
+ kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
/*
* Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must
@@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
- kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
+ + (uintptr_t)&_sdata - (uintptr_t)&_start;
#else
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
+ kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
@@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
*/
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
0UL : PAGE_OFFSET - kernel_map.phys_addr;
- kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
- /*
- * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
- * kernel, whereas for 64-bit kernel, the end of the virtual address
- * space is occupied by the modules/BPF/kernel mappings which reduces
- * the available size of the linear mapping.
- */
- memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
+ memory_limit = KERN_VIRT_SIZE;
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 533ec9055fa0..4ae67324f992 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
/*
@@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case.
*/
return true;
+
+svvptc:
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index f11945ee2490..fb9c917c9b45 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
purgatory-y += strcmp.o strlen.o strncmp.o
+endif
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))