diff options
author | Alexandre Ghiti <alexghiti@rivosinc.com> | 2024-02-02 13:47:11 +0100 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2024-03-20 08:56:08 -0700 |
commit | 01261e24cfab69c65043e1e61168348ae23a64c2 (patch) | |
tree | 448b71b90c07b95ce5f83d9eff3b7c9697d97c7d /arch/riscv | |
parent | 28e4748e5e3d313056ed38abeff4b455abd02c1b (diff) |
riscv: Only flush the mm icache when setting an exec pte
We used to emit a flush_icache_all() whenever a dirty executable
mapping is set in the page table but we can instead call
flush_icache_mm() which will only send IPIs to cores that currently run
this mm and add a deferred icache flush to the others.
The number of calls to sbi_remote_fence_i() (tested without IPI
support):
With a simple buildroot rootfs:
* Before: ~5k
* After : 4 (!)
Tested on HW, the boot to login is ~4.5% faster.
With an ubuntu rootfs:
* Before: ~24k
* After : ~13k
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
Link: https://lore.kernel.org/r/20240202124711.256146-1-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 14 | ||||
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 4 | ||||
-rw-r--r-- | arch/riscv/mm/pgtable.c | 2 |
3 files changed, 10 insertions, 10 deletions
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9c45810806ff..b2e6965748f2 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -513,12 +513,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) WRITE_ONCE(*ptep, pteval); } -void flush_icache_pte(pte_t pte); +void flush_icache_pte(struct mm_struct *mm, pte_t pte); -static inline void __set_pte_at(pte_t *ptep, pte_t pteval) +static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) { if (pte_present(pteval) && pte_exec(pteval)) - flush_icache_pte(pteval); + flush_icache_pte(mm, pteval); set_pte(ptep, pteval); } @@ -529,7 +529,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, page_table_check_ptes_set(mm, ptep, pteval, nr); for (;;) { - __set_pte_at(ptep, pteval); + __set_pte_at(mm, ptep, pteval); if (--nr == 0) break; ptep++; @@ -541,7 +541,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - __set_pte_at(ptep, __pte(0)); + __set_pte_at(mm, ptep, __pte(0)); } #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ @@ -713,14 +713,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd)); + return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at((pte_t *)pudp, pud_pte(pud)); + return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); } #ifdef CONFIG_PAGE_TABLE_CHECK diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 55a34f2020a8..bc61ee5975e4 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -82,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local) #endif /* CONFIG_SMP */ #ifdef CONFIG_MMU -void flush_icache_pte(pte_t pte) +void flush_icache_pte(struct mm_struct *mm, pte_t pte) { struct folio *folio = page_folio(pte_page(pte)); if (!test_bit(PG_dcache_clean, &folio->flags)) { - flush_icache_all(); + flush_icache_mm(mm, false); set_bit(PG_dcache_clean, &folio->flags); } } diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index ef887efcb679..533ec9055fa0 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -10,7 +10,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { if (!pte_same(ptep_get(ptep), entry)) - __set_pte_at(ptep, entry); + __set_pte_at(vma->vm_mm, ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case. |