Now that the zero range pagecache flush is purely isolated to providing zeroing correctness in this case, we can remove it and replace it with the folio batch mechanism that is used for handling unwritten extents. This is still slightly odd in that XFS reports a hole vs. a mapping that reflects the COW fork extents, but that has always been the case in this situation and so a separate issue. We drop the iomap warning that assumes the folio batch is always associated with unwritten mappings, but this is mainly a development assertion as otherwise the core iomap fbatch code doesn't care much about the mapping type if it's handed the set of folios to process. Signed-off-by: Brian Foster Reviewed-by: Christoph Hellwig --- fs/iomap/buffered-io.c | 4 ---- fs/xfs/xfs_iomap.c | 20 ++++++-------------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 0999aca6e5cc..4422a6d477d7 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1633,10 +1633,6 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, while ((ret = iomap_iter(&iter, ops)) > 0) { const struct iomap *srcmap = iomap_iter_srcmap(&iter); - if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && - srcmap->type != IOMAP_UNWRITTEN)) - return -EIO; - if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)) { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ce342b9ce2f0..df240931f07a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1781,7 +1781,6 @@ xfs_buffered_write_iomap_begin( { struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); - struct address_space *mapping = inode->i_mapping; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); @@ -1813,7 +1812,6 @@ xfs_buffered_write_iomap_begin( if (error) return error; -restart: error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; @@ -1866,8 +1864,8 @@ xfs_buffered_write_iomap_begin( /* * We may need to zero over a hole in the data fork if it's fronted by - * COW blocks and dirty pagecache. To make sure zeroing occurs, force - * writeback to remap pending blocks and restart the lookup. + * COW blocks and dirty pagecache. Scan such file ranges for dirty + * cache and fill the iomap batch with folios that need zeroing. */ if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) { loff_t start, end; @@ -1889,16 +1887,10 @@ xfs_buffered_write_iomap_begin( xfs_trim_extent(&imap, offset_fsb, cmap.br_startoff + cmap.br_blockcount - offset_fsb); start = XFS_FSB_TO_B(mp, imap.br_startoff); - end = XFS_FSB_TO_B(mp, - imap.br_startoff + imap.br_blockcount) - 1; - if (filemap_range_needs_writeback(mapping, start, end)) { - xfs_iunlock(ip, lockmode); - error = filemap_write_and_wait_range(mapping, start, - end); - if (error) - return error; - goto restart; - } + end = XFS_FSB_TO_B(mp, imap.br_startoff + imap.br_blockcount); + iomap_fill_dirty_folios(iter, &start, end, &iomap_flags); + xfs_trim_extent(&imap, offset_fsb, + XFS_B_TO_FSB(mp, start) - offset_fsb); goto found_imap; } -- 2.52.0