summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/alternative-macros.h2
-rw-r--r--arch/riscv/include/asm/efi.h2
-rw-r--r--arch/riscv/include/asm/elf.h10
-rw-r--r--arch/riscv/include/asm/errata_list.h8
-rw-r--r--arch/riscv/include/asm/ftrace.h50
-rw-r--r--arch/riscv/include/asm/hwcap.h52
-rw-r--r--arch/riscv/include/asm/insn-def.h58
-rw-r--r--arch/riscv/include/asm/insn.h14
-rw-r--r--arch/riscv/include/asm/jump_label.h2
-rw-r--r--arch/riscv/include/asm/kvm_host.h11
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h107
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h13
-rw-r--r--arch/riscv/include/asm/page.h5
-rw-r--r--arch/riscv/include/asm/patch.h2
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h3
-rw-r--r--arch/riscv/include/asm/pgtable.h34
-rw-r--r--arch/riscv/include/asm/sbi.h7
-rw-r--r--arch/riscv/include/asm/semihost.h26
-rw-r--r--arch/riscv/include/asm/thread_info.h1
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/include/asm/vdso/processor.h28
-rw-r--r--arch/riscv/include/uapi/asm/setup.h8
22 files changed, 351 insertions, 94 deletions
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index cc6a81c00f2f..51c6867e02f3 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -46,7 +46,7 @@
.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
new_c_2, vendor_id_2, errata_id_2, enable_2
- ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1
+ ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1
ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
.endm
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 47d3ab0fcc36..29e9a0d84b16 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -19,7 +19,7 @@ extern void efi_init(void);
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
-int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
#define arch_efi_call_virt_setup() ({ \
sync_kernel_mappings(efi_mm.pgd); \
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index e7acffdf21d2..30e7d2455960 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -14,6 +14,7 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
#include <asm/cacheinfo.h>
+#include <asm/hwcap.h>
/*
* These are used to set parameters in the core dumps.
@@ -59,12 +60,13 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#endif
+
/*
- * This yields a mask that user programs can use to figure out what
- * instruction set this CPU supports. This could be done in user space,
- * but it's not easy, and we've already done it here.
+ * Provides information on the availiable set of ISA extensions to userspace,
+ * via a bitmap that coorespends to each single-letter ISA extension. This is
+ * essentially defunct, but will remain for compatibility with userspace.
*/
-#define ELF_HWCAP (elf_hwcap)
+#define ELF_HWCAP (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1))
extern unsigned long elf_hwcap;
/*
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index e158439029ce..fb1a810f3d8c 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -7,6 +7,7 @@
#include <asm/alternative.h>
#include <asm/csr.h>
+#include <asm/insn-def.h>
#include <asm/hwcap.h>
#include <asm/vendorid_list.h>
@@ -23,11 +24,6 @@
#define ERRATA_THEAD_NUMBER 3
#endif
-#define CPUFEATURE_SVPBMT 0
-#define CPUFEATURE_ZICBOM 1
-#define CPUFEATURE_ZBB 2
-#define CPUFEATURE_NUMBER 3
-
#ifdef __ASSEMBLY__
#define ALT_INSN_FAULT(x) \
@@ -127,7 +123,7 @@ asm volatile(ALTERNATIVE_2( \
"mv a0, %1\n\t" \
"j 2f\n\t" \
"3:\n\t" \
- "cbo." __stringify(_op) " (a0)\n\t" \
+ CBO_##_op(a0) \
"add a0, a0, %0\n\t" \
"2:\n\t" \
"bltu a0, %2, 3b\n\t" \
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 04dad3380041..9e73922e1e2e 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -42,6 +42,14 @@ struct dyn_arch_ftrace {
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
* return address (original pc + 4)
*
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
* Dynamic ftrace generates probes to call sites, so we must deal with
* both auipc and jalr at the same time.
*/
@@ -52,25 +60,43 @@ struct dyn_arch_ftrace {
#define AUIPC_OFFSET_MASK (0xfffff000)
#define AUIPC_PAD (0x00001000)
#define JALR_SHIFT 20
-#define JALR_BASIC (0x000080e7)
-#define AUIPC_BASIC (0x00000097)
+#define JALR_RA (0x000080e7)
+#define AUIPC_RA (0x00000097)
+#define JALR_T0 (0x000282e7)
+#define AUIPC_T0 (0x00000297)
#define NOP4 (0x00000013)
-#define make_call(caller, callee, call) \
+#define to_jalr_t0(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
+
+#define to_auipc_t0(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
+
+#define make_call_t0(caller, callee, call) \
do { \
- call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \
- (unsigned long)caller)); \
- call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \
- (unsigned long)caller)); \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_t0(offset); \
+ call[1] = to_jalr_t0(offset); \
} while (0)
-#define to_jalr_insn(offset) \
- (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
+#define to_jalr_ra(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
-#define to_auipc_insn(offset) \
+#define to_auipc_ra(offset) \
((offset & JALR_SIGN_MASK) ? \
- (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
- ((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
+
+#define make_call_ra(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_ra(offset); \
+ call[1] = to_jalr_ra(offset); \
+} while (0)
/*
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 6e368d3f6631..0ed15c2a2816 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -24,47 +24,33 @@
#define RISCV_ISA_EXT_u ('u' - 'a')
/*
- * Increse this to higher value as kernel support more ISA extensions.
+ * These macros represent the logical IDs of each multi-letter RISC-V ISA
+ * extension and are used in the ISA bitmap. The logical IDs start from
+ * RISCV_ISA_EXT_BASE, which allows the 0-25 range to be reserved for single
+ * letter extensions. The maximum, RISCV_ISA_EXT_MAX, is defined in order
+ * to allocate the bitmap and may be increased when necessary.
+ *
+ * New extensions should just be added to the bottom, rather than added
+ * alphabetically, in order to avoid unnecessary shuffling.
*/
-#define RISCV_ISA_EXT_MAX 64
-#define RISCV_ISA_EXT_NAME_LEN_MAX 32
+#define RISCV_ISA_EXT_BASE 26
-/* The base ID for multi-letter ISA extensions */
-#define RISCV_ISA_EXT_BASE 26
+#define RISCV_ISA_EXT_SSCOFPMF 26
+#define RISCV_ISA_EXT_SSTC 27
+#define RISCV_ISA_EXT_SVINVAL 28
+#define RISCV_ISA_EXT_SVPBMT 29
+#define RISCV_ISA_EXT_ZBB 30
+#define RISCV_ISA_EXT_ZICBOM 31
+#define RISCV_ISA_EXT_ZIHINTPAUSE 32
+#define RISCV_ISA_EXT_SVNAPOT 33
-/*
- * These macros represent the logical ID for each multi-letter RISC-V ISA extension.
- * The logical ID should start from RISCV_ISA_EXT_BASE and must not exceed
- * RISCV_ISA_EXT_MAX. 0-25 range is reserved for single letter
- * extensions while all the multi-letter extensions should define the next
- * available logical extension id.
- * Entries are sorted alphabetically.
- */
-#define RISCV_ISA_EXT_SSCOFPMF 26
-#define RISCV_ISA_EXT_SSTC 27
-#define RISCV_ISA_EXT_SVINVAL 28
-#define RISCV_ISA_EXT_SVNAPOT 29
-#define RISCV_ISA_EXT_SVPBMT 30
-#define RISCV_ISA_EXT_ZBB 31
-#define RISCV_ISA_EXT_ZICBOM 32
-#define RISCV_ISA_EXT_ZIHINTPAUSE 33
+#define RISCV_ISA_EXT_MAX 64
+#define RISCV_ISA_EXT_NAME_LEN_MAX 32
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
-/*
- * This yields a mask that user programs can use to figure out what
- * instruction set this cpu supports.
- */
-#define ELF_HWCAP (elf_hwcap)
-
-enum {
- CAP_HWCAP = 1,
-};
-
-extern unsigned long elf_hwcap;
-
struct riscv_isa_ext_data {
/* Name of the extension displayed to userspace via /proc/cpuinfo */
char uprop[RISCV_ISA_EXT_NAME_LEN_MAX];
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index 16044affa57c..e01ab51f50d2 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -12,6 +12,12 @@
#define INSN_R_RD_SHIFT 7
#define INSN_R_OPCODE_SHIFT 0
+#define INSN_I_SIMM12_SHIFT 20
+#define INSN_I_RS1_SHIFT 15
+#define INSN_I_FUNC3_SHIFT 12
+#define INSN_I_RD_SHIFT 7
+#define INSN_I_OPCODE_SHIFT 0
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_AS_HAS_INSN
@@ -20,6 +26,10 @@
.insn r \opcode, \func3, \func7, \rd, \rs1, \rs2
.endm
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .insn i \opcode, \func3, \rd, \rs1, \simm12
+ .endm
+
#else
#include <asm/gpr-num.h>
@@ -33,9 +43,18 @@
(.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT))
.endm
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .4byte ((\opcode << INSN_I_OPCODE_SHIFT) | \
+ (\func3 << INSN_I_FUNC3_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_I_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_I_RS1_SHIFT) | \
+ (\simm12 << INSN_I_SIMM12_SHIFT))
+ .endm
+
#endif
#define __INSN_R(...) insn_r __VA_ARGS__
+#define __INSN_I(...) insn_i __VA_ARGS__
#else /* ! __ASSEMBLY__ */
@@ -44,6 +63,9 @@
#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n"
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n"
+
#else
#include <linux/stringify.h>
@@ -60,14 +82,32 @@
" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \
" .endm\n"
+#define DEFINE_INSN_I \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_i, opcode, func3, rd, rs1, simm12\n" \
+" .4byte ((\\opcode << " __stringify(INSN_I_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_I_FUNC3_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_I_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_I_RS1_SHIFT) ") |" \
+" (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \
+" .endm\n"
+
#define UNDEFINE_INSN_R \
" .purgem insn_r\n"
+#define UNDEFINE_INSN_I \
+" .purgem insn_i\n"
+
#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
DEFINE_INSN_R \
"insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \
UNDEFINE_INSN_R
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ DEFINE_INSN_I \
+ "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \
+ UNDEFINE_INSN_I
+
#endif
#endif /* ! __ASSEMBLY__ */
@@ -76,9 +116,14 @@
__INSN_R(RV_##opcode, RV_##func3, RV_##func7, \
RV_##rd, RV_##rs1, RV_##rs2)
+#define INSN_I(opcode, func3, rd, rs1, simm12) \
+ __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \
+ RV_##rs1, RV_##simm12)
+
#define RV_OPCODE(v) __ASM_STR(v)
#define RV_FUNC3(v) __ASM_STR(v)
#define RV_FUNC7(v) __ASM_STR(v)
+#define RV_SIMM12(v) __ASM_STR(v)
#define RV_RD(v) __ASM_STR(v)
#define RV_RS1(v) __ASM_STR(v)
#define RV_RS2(v) __ASM_STR(v)
@@ -87,6 +132,7 @@
#define RV___RS1(v) __RV_REG(v)
#define RV___RS2(v) __RV_REG(v)
+#define RV_OPCODE_MISC_MEM RV_OPCODE(15)
#define RV_OPCODE_SYSTEM RV_OPCODE(115)
#define HFENCE_VVMA(vaddr, asid) \
@@ -134,4 +180,16 @@
INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
__RD(0), RS1(gaddr), RS2(vmid))
+#define CBO_inval(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(0))
+
+#define CBO_clean(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(1))
+
+#define CBO_flush(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(2))
+
#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
index 25ef9c0b19e7..8d5c84f2d5ef 100644
--- a/arch/riscv/include/asm/insn.h
+++ b/arch/riscv/include/asm/insn.h
@@ -3,6 +3,9 @@
* Copyright (C) 2020 SiFive
*/
+#ifndef _ASM_RISCV_INSN_H
+#define _ASM_RISCV_INSN_H
+
#include <linux/bits.h>
#define RV_INSN_FUNCT3_MASK GENMASK(14, 12)
@@ -128,6 +131,7 @@
#define RVC_C2_RD_OPOFF 7
/* parts of opcode for RVG*/
+#define RVG_OPCODE_FENCE 0x0f
#define RVG_OPCODE_AUIPC 0x17
#define RVG_OPCODE_BRANCH 0x63
#define RVG_OPCODE_JALR 0x67
@@ -163,6 +167,7 @@
#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC)
#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR)
#define RVG_MATCH_JAL (RVG_OPCODE_JAL)
+#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE)
#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH)
#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH)
#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH)
@@ -182,6 +187,7 @@
#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK)
#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK)
#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
@@ -233,6 +239,13 @@ __RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ)
__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK)
__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK)
__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET)
+__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE);
+
+/* special case to catch _any_ system instruction */
+static __always_inline bool riscv_insn_is_system(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM;
+}
/* special case to catch _any_ branch instruction */
static __always_inline bool riscv_insn_is_branch(u32 code)
@@ -365,3 +378,4 @@ static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype
*utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1);
*itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF);
}
+#endif /* _ASM_RISCV_INSN_H */
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
index 6d58bbb5da46..14a5ea8d8ef0 100644
--- a/arch/riscv/include/asm/jump_label.h
+++ b/arch/riscv/include/asm/jump_label.h
@@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
asm_volatile_goto(
+ " .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
" .option norvc \n\t"
@@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
const bool branch)
{
asm_volatile_goto(
+ " .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
" .option norvc \n\t"
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 93f43a3e7886..cc7da66ee0c0 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -18,6 +18,7 @@
#include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
#define KVM_MAX_VCPUS 1024
@@ -228,9 +229,11 @@ struct kvm_vcpu_arch {
/* Don't run the VCPU (blocked) */
bool pause;
+
+ /* Performance monitoring context */
+ struct kvm_pmu pmu_context;
};
-static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
@@ -297,11 +300,11 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
-void kvm_riscv_gstage_mode_detect(void);
-unsigned long kvm_riscv_gstage_mode(void);
+void __init kvm_riscv_gstage_mode_detect(void);
+unsigned long __init kvm_riscv_gstage_mode(void);
int kvm_riscv_gstage_gpa_bits(void);
-void kvm_riscv_gstage_vmid_detect(void);
+void __init kvm_riscv_gstage_vmid_detect(void);
unsigned long kvm_riscv_gstage_vmid_bits(void);
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
new file mode 100644
index 000000000000..395518a1664e
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ * Atish Patra <atishp@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_PMU_H
+#define __KVM_VCPU_RISCV_PMU_H
+
+#include <linux/perf/riscv_pmu.h>
+#include <asm/sbi.h>
+
+#ifdef CONFIG_RISCV_PMU_SBI
+#define RISCV_KVM_MAX_FW_CTRS 32
+#define RISCV_KVM_MAX_HW_CTRS 32
+#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
+static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
+
+struct kvm_fw_event {
+ /* Current value of the event */
+ unsigned long value;
+
+ /* Event monitoring status */
+ bool started;
+};
+
+/* Per virtual pmu counter data */
+struct kvm_pmc {
+ u8 idx;
+ struct perf_event *perf_event;
+ u64 counter_val;
+ union sbi_pmu_ctr_info cinfo;
+ /* Event monitoring status */
+ bool started;
+ /* Monitoring event ID */
+ unsigned long event_idx;
+};
+
+/* PMU data structure per vcpu */
+struct kvm_pmu {
+ struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
+ struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
+ /* Number of the virtual firmware counters available */
+ int num_fw_ctrs;
+ /* Number of the virtual hardware counters available */
+ int num_hw_ctrs;
+ /* A flag to indicate that pmu initialization is done */
+ bool init_done;
+ /* Bit map of all the virtual counter used */
+ DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+};
+
+#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
+#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
+
+#if defined(CONFIG_32BIT)
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#else
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#endif
+
+int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
+int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags, u64 ival,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ unsigned long eidx, u64 evtdata,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+
+#else
+struct kvm_pmu {
+};
+
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = 0, .count = 0, .func = NULL },
+
+static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_RISCV_PMU_SBI */
+#endif /* !__KVM_VCPU_RISCV_PMU_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index f79478a85d2d..8425556af7d1 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -18,6 +18,13 @@ struct kvm_vcpu_sbi_context {
int return_handled;
};
+struct kvm_vcpu_sbi_return {
+ unsigned long out_val;
+ unsigned long err_val;
+ struct kvm_cpu_trap *utrap;
+ bool uexit;
+};
+
struct kvm_vcpu_sbi_extension {
unsigned long extid_start;
unsigned long extid_end;
@@ -27,8 +34,10 @@ struct kvm_vcpu_sbi_extension {
* specific error codes.
*/
int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
- unsigned long *out_val, struct kvm_cpu_trap *utrap,
- bool *exit);
+ struct kvm_vcpu_sbi_return *retdata);
+
+ /* Extension specific probe function */
+ unsigned long (*probe)(struct kvm_vcpu *vcpu);
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 24a3dd265183..0f3220e253bc 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -166,11 +166,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) \
- (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
-#endif
-
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index 9a7d7346001e..f433121774c0 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -7,6 +7,6 @@
#define _ASM_RISCV_PATCH_H
int patch_text_nosync(void *addr, const void *insns, size_t len);
-int patch_text(void *addr, u32 insn);
+int patch_text(void *addr, u32 *insns, int ninsns);
#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index b9e13a8fe2b7..f896708e8331 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -27,6 +27,9 @@
*/
#define _PAGE_PROT_NONE _PAGE_GLOBAL
+/* Used for swap PTEs only. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
+
#define _PAGE_PFN_SHIFT 10
/*
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 76502bc7bef2..3303fc03d724 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -452,7 +452,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/
- flush_tlb_page(vma, address);
+ local_flush_tlb_page(address);
}
#define __HAVE_ARCH_UPDATE_MMU_TLB
@@ -758,19 +758,25 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Encode and decode a swap entry
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
* bit 1 to 3: _PAGE_LEAF (zero)
* bit 5: _PAGE_PROT_NONE (zero)
- * bits 6 to 10: swap type
- * bits 10 to XLEN-1: swap offset
+ * bit 6: exclusive marker
+ * bits 7 to 11: swap type
+ * bits 11 to XLEN-1: swap offset
*/
-#define __SWP_TYPE_SHIFT 6
+#define __SWP_TYPE_SHIFT 7
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
@@ -781,11 +787,27 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) \
- { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+ { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp) __pmd((swp).val)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 4ca7fbacff42..945b7be249c1 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -169,9 +169,9 @@ enum sbi_pmu_fw_generic_events_t {
SBI_PMU_FW_ILLEGAL_INSN = 4,
SBI_PMU_FW_SET_TIMER = 5,
SBI_PMU_FW_IPI_SENT = 6,
- SBI_PMU_FW_IPI_RECVD = 7,
+ SBI_PMU_FW_IPI_RCVD = 7,
SBI_PMU_FW_FENCE_I_SENT = 8,
- SBI_PMU_FW_FENCE_I_RECVD = 9,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
SBI_PMU_FW_SFENCE_VMA_SENT = 10,
SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
@@ -215,6 +215,9 @@ enum sbi_pmu_ctr_type {
#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
+#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
+#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
+
#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
/* Flags defined for config matching function */
diff --git a/arch/riscv/include/asm/semihost.h b/arch/riscv/include/asm/semihost.h
new file mode 100644
index 000000000000..557a34938193
--- /dev/null
+++ b/arch/riscv/include/asm/semihost.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 tinylab.org
+ * Author: Bin Meng <bmeng@tinylab.org>
+ */
+
+#ifndef _RISCV_SEMIHOST_H_
+#define _RISCV_SEMIHOST_H_
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("addi a1, %0, 0\n"
+ "addi a0, zero, 3\n"
+ ".balign 16\n"
+ ".option push\n"
+ ".option norvc\n"
+ "slli zero, zero, 0x1f\n"
+ "ebreak\n"
+ "srai zero, zero, 0x7\n"
+ ".option pop\n"
+ : : "r" (&c) : "a0", "a1", "memory");
+}
+
+#endif /* _RISCV_SEMIHOST_H_ */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 67322f878e0d..f704c8dd57e0 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -43,6 +43,7 @@
#ifndef __ASSEMBLY__
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+extern unsigned long spin_shadow_stack;
#include <asm/processor.h>
#include <asm/csr.h>
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 855450bed9f5..ec0cab9fbddd 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -165,7 +165,7 @@ do { \
might_fault(); \
access_ok(__p, sizeof(*__p)) ? \
__get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
})
#define __put_user_asm(insn, x, ptr, err) \
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index edf0e25e43d1..14f5d27783b8 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,30 +4,26 @@
#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
#include <asm/barrier.h>
-#include <asm/hwcap.h>
static inline void cpu_relax(void)
{
- if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZIHINTPAUSE)) {
#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
- } else {
- /*
- * Reduce instruction retirement.
- * This assumes the PC changes.
- */
-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
- __asm__ __volatile__ ("pause");
+
+#ifdef __riscv_zihintpause
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+ __asm__ __volatile__ ("pause");
#else
- /* Encoding of the pause instruction */
- __asm__ __volatile__ (".4byte 0x100000F");
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
#endif
- }
barrier();
}
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..66b13a522880
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */