diff options
author | Vishal Moola (Oracle) <vishal.moola@gmail.com> | 2023-01-04 13:14:41 -0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:33:16 -0800 |
commit | 580e7a4926089ea735fa09d42030d90e21537f7f (patch) | |
tree | d34e659d1ca0c0c2fb57aee6203eaf0b51a8feac | |
parent | 4f4a4f0febe6009a4cdc8acac52cc5dc980f185c (diff) |
f2fs: convert f2fs_sync_meta_pages() to use filemap_get_folios_tag()
Convert function to use folios throughout. This is in preparation for the
removal of find_get_pages_range_tag(). This change removes 5 calls to
compound_head().
Initially the function was checking if the previous page index is truly
the previous page i.e. 1 index behind the current page. To convert to
folios and maintain this check we need to make the check folio->index !=
prev + folio_nr_pages(previous folio) since we don't know how many pages
are in a folio.
At index i == 0 the check is guaranteed to succeed, so to workaround
indexing bounds we can simply ignore the check for that specific index.
This makes the initial assignment of prev trivial, so I removed that as
well.
Also modify a comment in commit_checkpoint for consistency.
Link: https://lkml.kernel.org/r/20230104211448.4804-17-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: Chao Yu <chao@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | fs/f2fs/checkpoint.c | 49 |
1 files changed, 26 insertions, 23 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 56f7d0d6a8b2..5a5515d83a1b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -395,59 +395,62 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, { struct address_space *mapping = META_MAPPING(sbi); pgoff_t index = 0, prev = ULONG_MAX; - struct pagevec pvec; + struct folio_batch fbatch; long nwritten = 0; - int nr_pages; + int nr_folios; struct writeback_control wbc = { .for_reclaim = 0, }; struct blk_plug plug; - pagevec_init(&pvec); + folio_batch_init(&fbatch); blk_start_plug(&plug); - while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY))) { + while ((nr_folios = filemap_get_folios_tag(mapping, &index, + (pgoff_t)-1, + PAGECACHE_TAG_DIRTY, &fbatch))) { int i; - for (i = 0; i < nr_pages; i++) { - struct page *page = pvec.pages[i]; + for (i = 0; i < nr_folios; i++) { + struct folio *folio = fbatch.folios[i]; - if (prev == ULONG_MAX) - prev = page->index - 1; - if (nr_to_write != LONG_MAX && page->index != prev + 1) { - pagevec_release(&pvec); + if (nr_to_write != LONG_MAX && i != 0 && + folio->index != prev + + folio_nr_pages(fbatch.folios[i-1])) { + folio_batch_release(&fbatch); goto stop; } - lock_page(page); + folio_lock(folio); - if (unlikely(page->mapping != mapping)) { + if (unlikely(folio->mapping != mapping)) { continue_unlock: - unlock_page(page); + folio_unlock(folio); continue; } - if (!PageDirty(page)) { + if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } - f2fs_wait_on_page_writeback(page, META, true, true); + f2fs_wait_on_page_writeback(&folio->page, META, + true, true); - if (!clear_page_dirty_for_io(page)) + if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; - if (__f2fs_write_meta_page(page, &wbc, io_type)) { - unlock_page(page); + if (__f2fs_write_meta_page(&folio->page, &wbc, + io_type)) { + folio_unlock(folio); break; } - nwritten++; - prev = page->index; + nwritten += folio_nr_pages(folio); + prev = folio->index; if (unlikely(nwritten >= nr_to_write)) break; } - pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); } stop: @@ -1403,7 +1406,7 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi, }; /* - * pagevec_lookup_tag and lock_page again will take + * filemap_get_folios_tag and lock_page again will take * some extra time. Therefore, f2fs_update_meta_pages and * f2fs_sync_meta_pages are combined in this function. */ |