Add __folio_clear_dirty_for_io() which takes in an arg for whether the folio and wb stats should be updated as part of the call or not. Signed-off-by: Joanne Koong --- mm/page-writeback.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1f862ab3c68d..fe39137f01d6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2955,7 +2955,7 @@ EXPORT_SYMBOL(__folio_cancel_dirty); * This incoherency between the folio's dirty flag and xarray tag is * unfortunate, but it only exists while the folio is locked. */ -bool folio_clear_dirty_for_io(struct folio *folio) +static bool __folio_clear_dirty_for_io(struct folio *folio, bool update_stats) { struct address_space *mapping = folio_mapping(folio); bool ret = false; @@ -3004,10 +3004,14 @@ bool folio_clear_dirty_for_io(struct folio *folio) */ wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) { - long nr = folio_nr_pages(folio); - lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); - wb_stat_mod(wb, WB_RECLAIMABLE, -nr); + if (update_stats) { + long nr = folio_nr_pages(folio); + lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, + -nr); + zone_stat_mod_folio(folio, + NR_ZONE_WRITE_PENDING, -nr); + wb_stat_mod(wb, WB_RECLAIMABLE, -nr); + } ret = true; } unlocked_inode_to_wb_end(inode, &cookie); @@ -3015,6 +3019,11 @@ bool folio_clear_dirty_for_io(struct folio *folio) } return folio_test_clear_dirty(folio); } + +bool folio_clear_dirty_for_io(struct folio *folio) +{ + return __folio_clear_dirty_for_io(folio, true); +} EXPORT_SYMBOL(folio_clear_dirty_for_io); static void wb_inode_writeback_start(struct bdi_writeback *wb) -- 2.47.3