summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/flush.c14
-rw-r--r--arch/arm64/mm/hugetlbpage.c12
-rw-r--r--arch/arm64/mm/mmap.c25
3 files changed, 44 insertions, 7 deletions
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index a06c6ac770d4..fc4f710e9820 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -75,6 +75,20 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
*/
void flush_dcache_page(struct page *page)
{
+ /*
+ * Only the head page's flags of HugeTLB can be cleared since the tail
+ * vmemmap pages associated with each HugeTLB page are mapped with
+ * read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more
+ * details can refer to vmemmap_remap_pte()). Although
+ * __sync_icache_dcache() only set PG_dcache_clean flag on the head
+ * page struct, there is more than one page struct with PG_dcache_clean
+ * associated with the HugeTLB page since the head vmemmap page frame
+ * is reused (more details can refer to the comments above
+ * page_fixed_fake_head()).
+ */
+ if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
+ page = compound_head(page);
+
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 64bb078e2e7b..0f0c17dfeb9c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -502,19 +502,17 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
-void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
size_t pgsize;
int ncontig;
- if (!pte_cont(READ_ONCE(*ptep))) {
- ptep_clear_flush(vma, addr, ptep);
- return;
- }
+ if (!pte_cont(READ_ONCE(*ptep)))
+ return ptep_clear_flush(vma, addr, ptep);
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
- clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ return get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
}
static int __init hugetlbpage_init(void)
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 77ada00280d9..78e9490f748d 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -55,3 +55,28 @@ static int __init adjust_protection_map(void)
return 0;
}
arch_initcall(adjust_protection_map);
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ pteval_t prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);