summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/acpi.h2
-rw-r--r--arch/riscv/include/asm/cacheflush.h18
-rw-r--r--arch/riscv/include/asm/exec.h8
-rw-r--r--arch/riscv/include/asm/fence.h1
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/page.h29
-rw-r--r--arch/riscv/include/asm/pgtable.h28
-rw-r--r--arch/riscv/include/asm/set_memory.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h11
-rw-r--r--arch/riscv/include/asm/vmalloc.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h30
12 files changed, 103 insertions, 33 deletions
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index e0a1f84404f3..6e13695120bc 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA
-int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index ce79c558a4c8..8de73f91bfa3 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -46,7 +46,23 @@ do { \
} while (0)
#ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern char _end[];
+#define flush_cache_vmap flush_cache_vmap
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ int i;
+
+ /*
+ * We don't care if concurrently a cpu resets this value since
+ * the only place this can happen is in handle_exception() where
+ * an sfence.vma is emitted.
+ */
+ for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
+ new_vmalloc[i] = -1ULL;
+ }
+}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif
diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h
new file mode 100644
index 000000000000..07d9942682e0
--- /dev/null
+++ b/arch/riscv/include/asm/exec.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6bcd80325dfc..182db7930edc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5a0bd27fd11a..46d9de54179e 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -92,6 +92,7 @@
#define RISCV_ISA_EXT_ZCF 83
#define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85
+#define RISCV_ISA_EXT_SVVPTC 86
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 8e10a94430a2..8330d16b05b5 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -12,6 +12,11 @@
#include <asm-generic/irq.h>
+#ifdef CONFIG_SMP
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 7ede2111c591..32d308a3355f 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -112,11 +112,13 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
- unsigned long va_kernel_pa_offset;
- unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
+ unsigned long va_kernel_xip_text_pa_offset;
+ unsigned long va_kernel_xip_data_pa_offset;
uintptr_t xiprom;
uintptr_t xiprom_sz;
+#else
+ unsigned long va_kernel_pa_offset;
#endif
};
@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base;
#else
void *linear_mapping_pa_to_va(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ (_y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
+#endif
+
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#ifndef CONFIG_DEBUG_VIRTUAL
@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x);
#else
phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- (_y - kernel_map.va_kernel_xip_pa_offset) : \
- (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
+ (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
+ (_y - kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
+#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 089f3c9f56a3..e79f15293492 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -107,13 +107,6 @@
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_32M
-#define XIP_OFFSET_MASK (SZ_32M - 1)
-#else
-#define XIP_OFFSET 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -142,11 +135,14 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
+ extern char _sdata[], _start[], _end[]; \
+ uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_sdata - (uintptr_t)&_start; \
+ uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && \
- __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
- __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
- __a; \
+ (__a >= __rom_start_data && __a < __rom_end_data) ? \
+ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
})
#else
#define XIP_FIXUP(addr) (addr)
@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
+
+svvptc:;
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index ec11001c3fe0..ab92fc84e1fc 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index fca5c6be2b81..ebe52f96da34 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -61,6 +61,13 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+#ifdef CONFIG_64BIT
+ /*
+ * Used in handle_exception() to save a0, a1 and a2 before knowing if we
+ * can access the kernel stack.
+ */
+ unsigned long a0, a1, a2;
+#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
-#define _TIF_WORK_MASK \
- (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
-
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 51f6dfe19745..fefe94dc98e2 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index b65bf6306f69..f3d56299bc22 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -9,18 +9,36 @@
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
+ /* Fix-up address in Flash into address in RAM early during boot before
+ * MMU is up. Because generated code "thinks" data is in Flash, but it
+ * is actually in RAM (actually data is also in Flash, but Flash is
+ * read-only, thus we need to use the data residing in RAM).
+ *
+ * The start of data in Flash is _sdata and the start of data in RAM is
+ * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
+ * reg += CONFIG_PHYS_RAM_BASE - _start
+ */
+ li t0, CONFIG_PHYS_RAM_BASE
add \reg, \reg, t0
+ la t0, _sdata
+ sub \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
+ /* In linker script, at the transition from read-only section to
+ * writable section, the VMA is increased while LMA remains the same.
+ * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
+ * changed)
+ *
+ * Consequently, early during boot before MMU is up, the generated code
+ * reads the "writable" section at wrong addresses, because VMA is used
+ * by compiler to generate code, but the data is located in Flash using
+ * LMA.
+ */
+ la t0, _sdata
+ sub \reg, \reg, t0
la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
add \reg, \reg, t0
.endm
-
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm