diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-08-07 20:37:32 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-01 20:26:05 -0700 |
commit | 94dc8bffd8b7fe83ba8382a3410a2f218dc20cb0 (patch) | |
tree | efb2372e8f788ee781ef1770654ed0787ed007b5 /mm/swap.h | |
parent | 09022bc196d23484a7a5d48cf373f8583e3fcf23 (diff) |
mm: return the folio from swapin_readahead
The unuse_pte_range() caller only wants the folio while do_swap_page()
wants both the page and the folio. Since do_swap_page() already has logic
for handling both the folio and the page, move the folio-to-page logic
there. This also lets us allocate larger folios in the SWP_SYNCHRONOUS_IO
path in future.
Link: https://lkml.kernel.org/r/20240807193734.1865400-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swap.h')
-rw-r--r-- | mm/swap.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/swap.h b/mm/swap.h index 7c6330561d84..f8711ff82f84 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -73,8 +73,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, bool skip_if_exists); struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct mempolicy *mpol, pgoff_t ilx); -struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, - struct vm_fault *vmf); +struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); static inline unsigned int folio_swap_flags(struct folio *folio) { @@ -109,7 +109,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry, return NULL; } -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, +static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; |