diff options
Diffstat (limited to 'arch/riscv/mm')
| -rw-r--r-- | arch/riscv/mm/Makefile | 3 | ||||
| -rw-r--r-- | arch/riscv/mm/fault.c | 49 | ||||
| -rw-r--r-- | arch/riscv/mm/init.c | 62 |
3 files changed, 96 insertions, 18 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b85e9e82f082..9c454f90fd3d 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -13,8 +13,7 @@ endif KCOV_INSTRUMENT_init.o := n obj-y += init.o -obj-y += extable.o -obj-$(CONFIG_MMU) += fault.o pageattr.o +obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o obj-y += cacheflush.o obj-y += context.o obj-y += pgtable.o diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 35a84ec69a9f..6ea2cce4cc17 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -247,24 +247,12 @@ void handle_page_fault(struct pt_regs *regs) * only copy the information from the master page table, * nothing more. */ - if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) { + if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) && + unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) { vmalloc_fault(regs, code, addr); return; } -#ifdef CONFIG_64BIT - /* - * Modules in 64bit kernels lie in their own virtual region which is not - * in the vmalloc region, but dealing with page faults in this region - * or the vmalloc region amounts to doing the same thing: checking that - * the mapping exists in init_mm.pgd and updating user page table, so - * just use vmalloc_fault. - */ - if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { - vmalloc_fault(regs, code, addr); - return; - } -#endif /* Enable interrupts if they were enabled in the parent context. */ if (!regs_irqs_disabled(regs)) local_irq_enable(); @@ -295,6 +283,36 @@ void handle_page_fault(struct pt_regs *regs) flags |= FAULT_FLAG_WRITE; else if (cause == EXC_INST_PAGE_FAULT) flags |= FAULT_FLAG_INSTRUCTION; +#ifdef CONFIG_PER_VMA_LOCK + if (!(flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, addr); + if (!vma) + goto lock_mmap; + + if (unlikely(access_error(cause, vma))) { + vma_end_read(vma); + goto lock_mmap; + } + + fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + count_vm_vma_lock_event(VMA_LOCK_RETRY); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); + return; + } +lock_mmap: +#endif /* CONFIG_PER_VMA_LOCK */ + retry: vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { @@ -350,6 +368,9 @@ retry: mmap_read_unlock(mm); +#ifdef CONFIG_PER_VMA_LOCK +done: +#endif if (unlikely(fault & VM_FAULT_ERROR)) { tsk->thread.bad_cause = cause; mm_fault_error(regs, addr, fault); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4fa420faa780..4b95d8999120 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -357,7 +357,7 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); + BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr))); return __pa(vaddr); } @@ -440,7 +440,7 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr))); + BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr))); return __pa(vaddr); } @@ -1389,3 +1389,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return vmemmap_populate_basepages(start, end, node, NULL); } #endif + +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) +/* + * Pre-allocates page-table pages for a specific area in the kernel + * page-table. Only the level which needs to be synchronized between + * all page-tables is allocated because the synchronization can be + * expensive. + */ +static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, + const char *area) +{ + unsigned long addr; + const char *lvl; + + for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + lvl = "p4d"; + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + goto failed; + + if (pgtable_l5_enabled) + continue; + + lvl = "pud"; + pud = pud_alloc(&init_mm, p4d, addr); + if (!pud) + goto failed; + + if (pgtable_l4_enabled) + continue; + + lvl = "pmd"; + pmd = pmd_alloc(&init_mm, pud, addr); + if (!pmd) + goto failed; + } + return; + +failed: + /* + * The pages have to be there now or they will be missing in + * process page-tables later. + */ + panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); +} + +void __init pgtable_cache_init(void) +{ + preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); + if (IS_ENABLED(CONFIG_MODULES)) + preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); +} +#endif |
