summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-04-19 13:04:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-04-19 13:04:21 -0700
commit25ec51ec86a75fbe27e08bf0887a2ecaeb04b2ef (patch)
tree95cbed082f681488b70c012ce5481daca12e9734 /arch
parent3cdb455946193bb7ad13df15333c7fe0054db6c3 (diff)
parent50449ca66cc5a8cbc64749cf4b9f3d3fc5f4b457 (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas: - Fix a kernel fault during page table walking in huge_pte_alloc() with PTABLE_LEVELS=5 due to using p4d_offset() instead of p4d_alloc() - head.S fix and cleanup to disable the MMU before toggling the HCR_EL2.E2H bit when entering the kernel with the MMU on from the EFI stub. Changing this bit (currently from VHE to nVHE) causes some system registers as well as page table descriptors to be interpreted differently, potentially resulting in spurious MMU faults - Fix translation fault in swsusp_save() accessing MEMBLOCK_NOMAP memory ranges due to kernel_page_present() returning true in most configurations other than rodata_full == true, CONFIG_DEBUG_PAGEALLOC=y or CONFIG_KFENCE=y * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hibernate: Fix level3 translation fault in swsusp_save() arm64/head: Disable MMU at EL2 before clearing HCR_EL2.E2H arm64/head: Drop unnecessary pre-disable-MMU workaround arm64/hugetlb: Fix page table walk in huge_pte_alloc()
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/head.S7
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/arm64/mm/pageattr.c3
3 files changed, 9 insertions, 6 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 06234c3a15f3..cb68adcabe07 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -289,6 +289,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2
+
+ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
+ pre_disable_mmu_workaround
+ msr sctlr_el2, x0
+ isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
@@ -323,13 +328,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
cbz x0, 2f
/* Set a sane SCTLR_EL1, the VHE way */
- pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H
b 3f
2:
- pre_disable_mmu_workaround
msr sctlr_el1, x1
mov x2, xzr
3:
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0f0e10bb0a95..b872b003a55f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -276,7 +276,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
+ p4dp = p4d_alloc(mm, pgdp, addr);
+ if (!p4dp)
+ return NULL;
+
pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0c4e3ecf989d..0e270a1c51e6 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!can_set_direct_map())
- return true;
-
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;