diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 12 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 3 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 4 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 5 | ||||
-rw-r--r-- | arch/arm64/mm/trans_pgd.c | 2 |
6 files changed, 26 insertions, 7 deletions
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 3215adf48a1b..98a2a0e64e25 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -519,6 +519,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, static int __init hugetlbpage_init(void) { + /* + * HugeTLB pages are supported on maximum four page table + * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base + * page size, corresponding to hugetlb_add_hstate() calls + * here. + * + * HUGE_MAX_HSTATE should at least match maximum supported + * HugeTLB page sizes on the platform. Any new addition to + * supported HugeTLB page sizes will also require changing + * HUGE_MAX_HSTATE as well. + */ + BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); if (pud_sect_supported()) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ccdef53872a0..9c0b8d9558fc 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -279,7 +279,12 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + + /* + * Use the sanitised version of id_aa64mmfr0_el1 so that linear + * map randomization can be enabled by shrinking the IPA space. + */ + u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); int parange = cpuid_feature_extract_unsigned_field( mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); s64 range = linear_region_size - diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e2739b69e11b..b4df5bc5b1b8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1169,7 +1169,8 @@ int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) { vmemmap_verify((pte_t *)pmdp, node, addr, next); - return 1; + + return pmd_sect(READ_ONCE(*pmdp)); } int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 0c501cabc238..8160cff35089 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -33,7 +33,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) gfp_t gfp = GFP_PGTABLE_USER; if (pgdir_is_page_size()) - return (pgd_t *)__get_free_page(gfp); + return __pgd_alloc(mm, 0); else return kmem_cache_alloc(pgd_cache, gfp); } @@ -41,7 +41,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { if (pgdir_is_page_size()) - free_page((unsigned long)pgd); + __pgd_free(mm, pgd); else kmem_cache_free(pgd_cache, pgd); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b8edc5765441..fb30c8804f87 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -501,7 +501,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_HAFT cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT b.lt 1f - orr tcr2, tcr2, TCR2_EL1x_HAFT + orr tcr2, tcr2, TCR2_EL1_HAFT #endif /* CONFIG_ARM64_HAFT */ 1: #endif /* CONFIG_ARM64_HW_AFDBM */ @@ -532,7 +532,8 @@ alternative_else_nop_endif #undef PTE_MAYBE_NG #undef PTE_MAYBE_SHARED - orr tcr2, tcr2, TCR2_EL1x_PIE + orr tcr2, tcr2, TCR2_EL1_PIE + msr REG_TCR2_EL1, x0 .Lskip_indirection: diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 0f7b484cb2ff..19c67ed1a21f 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -57,7 +57,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) */ BUG_ON(!pfn_valid(pte_pfn(pte))); - __set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte))); + __set_pte(dst_ptep, pte_mkvalid(pte_mkwrite_novma(pte))); } } |