diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 66 |
1 files changed, 25 insertions, 41 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 91d163f8d36b..7e2da284e427 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -324,18 +324,6 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) } /* - * Unreclaimable memory (kernel memory or anonymous memory - * without swap) can bring down the dirtyable pages below - * the zone's dirty balance reserve and the above calculation - * will underflow. However we still want to add in nodes - * which are below threshold (negative values) to get a more - * accurate calculation but make sure that the total never - * underflows. - */ - if ((long)x < 0) - x = 0; - - /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure @@ -2430,13 +2418,13 @@ EXPORT_SYMBOL(folio_write_one); /* * For address_spaces which do not use buffers nor write back. */ -int __set_page_dirty_no_writeback(struct page *page) +bool noop_dirty_folio(struct address_space *mapping, struct folio *folio) { - if (!PageDirty(page)) - return !TestSetPageDirty(page); - return 0; + if (!folio_test_dirty(folio)) + return !folio_test_set_dirty(folio); + return false; } -EXPORT_SYMBOL(__set_page_dirty_no_writeback); +EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. @@ -2477,16 +2465,14 @@ static void folio_account_dirtied(struct folio *folio, * * Caller must hold lock_page_memcg(). */ -void folio_account_cleaned(struct folio *folio, struct address_space *mapping, - struct bdi_writeback *wb) +void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { - if (mapping_can_writeback(mapping)) { - long nr = folio_nr_pages(folio); - lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); - wb_stat_mod(wb, WB_RECLAIMABLE, -nr); - task_io_account_cancelled_write(nr * PAGE_SIZE); - } + long nr = folio_nr_pages(folio); + + lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); + wb_stat_mod(wb, WB_RECLAIMABLE, -nr); + task_io_account_cancelled_write(nr * PAGE_SIZE); } /* @@ -2530,7 +2516,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, * This is also sometimes used by filesystems which use buffer_heads when * a single buffer is being dirtied: we want to set the folio dirty in * that case, but not all the buffers. This is a "bottom-up" dirtying, - * whereas __set_page_dirty_buffers() is a "top-down" dirtying. + * whereas block_dirty_folio() is a "top-down" dirtying. * * The caller must ensure this doesn't race with truncation. Most will * simply hold the folio lock, but e.g. zap_pte_range() calls with the @@ -2616,7 +2602,7 @@ EXPORT_SYMBOL(folio_redirty_for_writepage); * folio_mark_dirty - Mark a folio as being modified. * @folio: The folio. * - * For folios with a mapping this should be done under the page lock + * For folios with a mapping this should be done with the folio lock held * for the benefit of asynchronous memory errors who prefer a consistent * dirty state. This rule can be broken in some special cases, * but should be better not to. @@ -2630,23 +2616,21 @@ bool folio_mark_dirty(struct folio *folio) if (likely(mapping)) { /* * readahead/lru_deactivate_page could remain - * PG_readahead/PG_reclaim due to race with end_page_writeback - * About readahead, if the page is written, the flags would be + * PG_readahead/PG_reclaim due to race with folio_end_writeback + * About readahead, if the folio is written, the flags would be * reset. So no problem. - * About lru_deactivate_page, if the page is redirty, the flag - * will be reset. So no problem. but if the page is used by readahead - * it will confuse readahead and make it restart the size rampup - * process. But it's a trivial problem. + * About lru_deactivate_page, if the folio is redirtied, + * the flag will be reset. So no problem. but if the + * folio is used by readahead it will confuse readahead + * and make it restart the size rampup process. But it's + * a trivial problem. */ if (folio_test_reclaim(folio)) folio_clear_reclaim(folio); - return mapping->a_ops->set_page_dirty(&folio->page); + return mapping->a_ops->dirty_folio(mapping, folio); } - if (!folio_test_dirty(folio)) { - if (!folio_test_set_dirty(folio)) - return true; - } - return false; + + return noop_dirty_folio(mapping, folio); } EXPORT_SYMBOL(folio_mark_dirty); @@ -2697,7 +2681,7 @@ void __folio_cancel_dirty(struct folio *folio) wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) - folio_account_cleaned(folio, mapping, wb); + folio_account_cleaned(folio, wb); unlocked_inode_to_wb_end(inode, &cookie); folio_memcg_unlock(folio); |