diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 26 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/extable.c | 4 | ||||
-rw-r--r-- | arch/riscv/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 28 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 25 |
7 files changed, 68 insertions, 25 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b3a356c80c1f..3c8b33258457 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -6,8 +6,8 @@ CFLAGS_REMOVE_init.o = -pg endif obj-y += init.o -obj-y += fault.o obj-y += extable.o +obj-$(CONFIG_MMU) += fault.o obj-y += cacheflush.o obj-y += context.o obj-y += sifive_l2_cache.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 3f15938dec89..8f1900686640 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -10,9 +10,17 @@ #include <asm/sbi.h> +static void ipi_remote_fence_i(void *info) +{ + return local_flush_icache_all(); +} + void flush_icache_all(void) { - sbi_remote_fence_i(NULL); + if (IS_ENABLED(CONFIG_RISCV_SBI)) + sbi_remote_fence_i(NULL); + else + on_each_cpu(ipi_remote_fence_i, NULL, 1); } /* @@ -28,7 +36,7 @@ void flush_icache_all(void) void flush_icache_mm(struct mm_struct *mm, bool local) { unsigned int cpu; - cpumask_t others, hmask, *mask; + cpumask_t others, *mask; preempt_disable(); @@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) */ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) { - riscv_cpuid_to_hartid_mask(&others, &hmask); - sbi_remote_fence_i(hmask.bits); - } else { + if (mm == current->active_mm && local) { /* * It's assumed that at least one strongly ordered operation is * performed on this hart between setting a hart's cpumask bit @@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local) * with flush_icache_deferred(). */ smp_mb(); + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { + cpumask_t hartid_mask; + + riscv_cpuid_to_hartid_mask(&others, &hartid_mask); + sbi_remote_fence_i(cpumask_bits(&hartid_mask)); + } else { + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); } preempt_enable(); @@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) #endif /* CONFIG_SMP */ +#ifdef CONFIG_MMU void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); @@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte) if (!test_and_set_bit(PG_dcache_clean, &page->flags)) flush_icache_all(); } +#endif /* CONFIG_MMU */ diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index ca66d44156b6..613ec81a8979 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); +#ifdef CONFIG_MMU csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); local_flush_tlb_all(); +#endif flush_icache_deferred(next); } diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 7aed9178d365..2fc729422151 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; - fixup = search_exception_tables(regs->sepc); + fixup = search_exception_tables(regs->epc); if (fixup) { - regs->sepc = fixup->fixup; + regs->epc = fixup->fixup; return 1; } return 0; diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 247b8c859c44..cf7248e07f43 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs) int code = SEGV_MAPERR; vm_fault_t fault; - cause = regs->scause; - addr = regs->sbadaddr; + cause = regs->cause; + addr = regs->badaddr; tsk = current; mm = tsk->mm; @@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) goto vmalloc_fault; /* Enable interrupts if they were enabled in the parent context. */ - if (likely(regs->sstatus & SR_SPIE)) + if (likely(regs->status & SR_PIE)) local_irq_enable(); /* diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 573463d1c799..b2fe9d1be833 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] EXPORT_SYMBOL(empty_zero_page); extern char _start[]; +void *dtb_early_va; static void __init zone_sizes_init(void) { @@ -40,7 +41,7 @@ static void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -void setup_zero_page(void) +static void setup_zero_page(void) { memset((void *)empty_zero_page, 0, PAGE_SIZE); } @@ -142,12 +143,12 @@ void __init setup_bootmem(void) } } +#ifdef CONFIG_MMU unsigned long va_pa_offset; EXPORT_SYMBOL(va_pa_offset); unsigned long pfn_base; EXPORT_SYMBOL(pfn_base); -void *dtb_early_va; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; @@ -273,7 +274,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, #define get_pgd_next_virt(__pa) get_pmd_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) -#define PTE_PARENT_SIZE PMD_SIZE #define fixmap_pgd_next fixmap_pmd #else #define pgd_next_t pte_t @@ -281,7 +281,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, #define get_pgd_next_virt(__pa) get_pte_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pte_mapping(__nextp, __va, __pa, __sz, __prot) -#define PTE_PARENT_SIZE PGDIR_SIZE #define fixmap_pgd_next fixmap_pte #endif @@ -314,14 +313,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp, static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) { - uintptr_t map_size = PAGE_SIZE; - - /* Upgrade to PMD/PGDIR mappings whenever possible */ - if (!(base & (PTE_PARENT_SIZE - 1)) && - !(size & (PTE_PARENT_SIZE - 1))) - map_size = PTE_PARENT_SIZE; + /* Upgrade to PMD_SIZE mappings whenever possible */ + if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) + return PAGE_SIZE; - return map_size; + return PMD_SIZE; } /* @@ -449,6 +445,16 @@ static void __init setup_vm_final(void) csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } +#else +asmlinkage void __init setup_vm(uintptr_t dtb_pa) +{ + dtb_early_va = (void *)dtb_pa; +} + +static inline void setup_vm_final(void) +{ +} +#endif /* CONFIG_MMU */ void __init paging_init(void) { diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 24cd33d2c48f..720b443c4528 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -2,6 +2,7 @@ #include <linux/mm.h> #include <linux/smp.h> +#include <linux/sched.h> #include <asm/sbi.h> void flush_tlb_all(void) @@ -9,13 +10,33 @@ void flush_tlb_all(void) sbi_remote_sfence_vma(NULL, 0, -1); } +/* + * This function must not be called with cmask being null. + * Kernel may panic if cmask is NULL. + */ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, unsigned long size) { struct cpumask hmask; + unsigned int cpuid; - riscv_cpuid_to_hartid_mask(cmask, &hmask); - sbi_remote_sfence_vma(hmask.bits, start, size); + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + /* local cpu is the only cpu present in cpumask */ + if (size <= PAGE_SIZE) + local_flush_tlb_page(start); + else + local_flush_tlb_all(); + } else { + riscv_cpuid_to_hartid_mask(cmask, &hmask); + sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size); + } + + put_cpu(); } void flush_tlb_mm(struct mm_struct *mm) |