summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-08-07 20:37:32 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-09-01 20:26:05 -0700
commit94dc8bffd8b7fe83ba8382a3410a2f218dc20cb0 (patch)
treeefb2372e8f788ee781ef1770654ed0787ed007b5 /mm/memory.c
parent09022bc196d23484a7a5d48cf373f8583e3fcf23 (diff)
mm: return the folio from swapin_readahead
The unuse_pte_range() caller only wants the folio while do_swap_page() wants both the page and the folio. Since do_swap_page() already has logic for handling both the folio and the page, move the folio-to-page logic there. This also lets us allocate larger folios in the SWP_SYNCHRONOUS_IO path in future. Link: https://lkml.kernel.org/r/20240807193734.1865400-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 46a44dc702fa..2ca87ceafede 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4091,7 +4091,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* skip swapcache */
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vma, vmf->address, false);
- page = &folio->page;
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
@@ -4116,10 +4115,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio->private = NULL;
}
} else {
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
- if (page)
- folio = page_folio(page);
swapcache = folio;
}
@@ -4140,6 +4137,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+ page = folio_file_page(folio, swp_offset(entry));
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing