summaryrefslogtreecommitdiff
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h106
1 files changed, 46 insertions, 60 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index de29821231c9..768e5261fdae 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -116,9 +116,30 @@ extern struct kobj_attribute shmem_enabled_attr;
extern unsigned long transparent_hugepage_flags;
+#define hugepage_flags_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define hugepage_flags_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * HPAGE_PMD_NR aligned within the file. The hugepage is
+ * guaranteed to be hugepage-aligned within the file, but we must
+ * check that the PMD-aligned addresses in the VMA map to
+ * PMD-aligned offsets within the file, else the hugepage will
+ * not be PMD-mappable.
+ * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * area.
+ */
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+ unsigned long addr)
{
+ unsigned long haddr;
+
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
@@ -126,53 +147,13 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
return false;
}
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
- return false;
- return true;
-}
+ haddr = addr & HPAGE_PMD_MASK;
-static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- /* Explicitly disabled through madvise. */
- if ((vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return false;
return true;
}
-/*
- * to be used on vmas which are known to support THP.
- * Use transparent_hugepage_active otherwise
- */
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
-{
-
- /*
- * If the hardware/firmware marked hugepage support disabled.
- */
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
- return false;
-
- if (!transhuge_vma_enabled(vma, vma->vm_flags))
- return false;
-
- if (vma_is_temporary_stack(vma))
- return false;
-
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
-
- if (vma_is_dax(vma))
- return true;
-
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
-
- return false;
-}
-
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -187,7 +168,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool transparent_hugepage_active(struct vm_area_struct *vma);
+bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool smaps, bool in_pf);
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -290,7 +273,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
+ return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -311,8 +294,8 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page)
{
/*
- * Global or memcg deferred list in the second tail pages is
- * occupied by compound_head.
+ * See organization of tail pages of compound page in
+ * "struct page" definition.
*/
return &page[2].deferred_list;
}
@@ -331,24 +314,15 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
-{
- return false;
-}
-
-static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
-{
- return false;
-}
-
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+ unsigned long addr)
{
return false;
}
-static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool smaps, bool in_pf)
{
return false;
}
@@ -461,4 +435,16 @@ static inline int split_folio_to_list(struct folio *folio,
return split_huge_page_to_list(&folio->page, list);
}
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
+ */
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
+{
+ return true;
+}
+#endif
+
#endif /* _LINUX_HUGE_MM_H */