From 895a37028a4854962def6e2f2820a23c84062f66 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 19 Jun 2024 13:18:56 +0100 Subject: arm64: mm: Permit PTE SW bits to change in live mappings Previously pgattr_change_is_safe() was overly-strict and complained (e.g. "[ 116.262743] __check_safe_pte_update: unsafe attribute change: 0x0560000043768fc3 -> 0x0160000043768fc3") if it saw any SW bits change in a live PTE. There is no such restriction on SW bits in the Arm ARM. Until now, no SW bits have been updated in live mappings via the set_ptes() route. PTE_DIRTY would be updated live, but this is handled by ptep_set_access_flags() which does not call pgattr_change_is_safe(). However, with the introduction of uffd-wp for arm64, there is core-mm code that does ptep_get(); pte_clear_uffd_wp(); set_ptes(); which triggers this false warning. Silence this warning by masking out the SW bits during checks. The bug isn't technically in the highlighted commit below, but that's where bisecting would likely lead as its what made the bug user-visible. Signed-off-by: Ryan Roberts Fixes: 5b32510af77b ("arm64/mm: Add uffd write-protect support") Link: https://lore.kernel.org/r/20240619121859.4153966-1-ryan.roberts@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/mmu.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 9943ff0af4c9..1f60aa1bc750 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -170,6 +170,7 @@ #define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ +#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55))) #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) #ifdef CONFIG_ARM64_PA_BITS_52 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c927e9312f10..353ea5dc32b8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -124,7 +124,8 @@ bool pgattr_change_is_safe(u64 old, u64 new) * The following mapping attributes may be updated in live * kernel mappings without the need for break-before-make. */ - pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; + pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG | + PTE_SWBITS_MASK; /* creating or taking down mappings is always safe */ if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) -- cgit v1.2.3-70-g09d2 From ecc54006f158ae0245a13e59026da2f0239c1b86 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Fri, 21 Jun 2024 17:28:09 +0800 Subject: arm64: Clear the initial ID map correctly before remapping In the attempt to clear and recreate the initial ID map for LPA2, we wrongly use 'start - end' as the map size and make the memset() almost a nop. Fix it by passing the correct map size. Fixes: 9684ec186f8f ("arm64: Enable LPA2 at boot if supported by the system") Signed-off-by: Zenghui Yu Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20240621092809.162-1-yuzenghui@huawei.com Signed-off-by: Will Deacon --- arch/arm64/kernel/pi/map_kernel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c index 5fa08e13e17e..f374a3e5a5fe 100644 --- a/arch/arm64/kernel/pi/map_kernel.c +++ b/arch/arm64/kernel/pi/map_kernel.c @@ -173,7 +173,7 @@ static void __init remap_idmap_for_lpa2(void) * Don't bother with the FDT, we no longer need it after this. */ memset(init_idmap_pg_dir, 0, - (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end); + (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir); create_init_idmap(init_idmap_pg_dir, mask); dsb(ishst); -- cgit v1.2.3-70-g09d2