summaryrefslogtreecommitdiff
path: root/mm/memfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memfd.c')
-rw-r--r--mm/memfd.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/mm/memfd.c b/mm/memfd.c
index 7d8d3ab3fa37..e7b7c5294d59 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -60,6 +60,51 @@ static void memfd_tag_pins(struct xa_state *xas)
}
/*
+ * This is a helper function used by memfd_pin_user_pages() in GUP (gup.c).
+ * It is mainly called to allocate a folio in a memfd when the caller
+ * (memfd_pin_folios()) cannot find a folio in the page cache at a given
+ * index in the mapping.
+ */
+struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ struct folio *folio;
+ gfp_t gfp_mask;
+ int err;
+
+ if (is_file_hugepages(memfd)) {
+ /*
+ * The folio would most likely be accessed by a DMA driver,
+ * therefore, we have zone memory constraints where we can
+ * alloc from. Also, the folio will be pinned for an indefinite
+ * amount of time, so it is not expected to be migrated away.
+ */
+ gfp_mask = htlb_alloc_mask(hstate_file(memfd));
+ gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
+
+ folio = alloc_hugetlb_folio_nodemask(hstate_file(memfd),
+ numa_node_id(),
+ NULL,
+ gfp_mask,
+ false);
+ if (folio && folio_try_get(folio)) {
+ err = hugetlb_add_to_page_cache(folio,
+ memfd->f_mapping,
+ idx);
+ if (err) {
+ folio_put(folio);
+ free_huge_folio(folio);
+ return ERR_PTR(err);
+ }
+ return folio;
+ }
+ return ERR_PTR(-ENOMEM);
+ }
+#endif
+ return shmem_read_folio(memfd->f_mapping, idx);
+}
+
+/*
* Setting SEAL_WRITE requires us to verify there's no pending writer. However,
* via get_user_pages(), drivers might have some pending I/O without any active
* user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios