Move the iomap_readpage_iter() async bio read logic into a separate helper function. This is needed to make iomap read/readahead more generically usable, especially for filesystems that do not require CONFIG_BLOCK. Signed-off-by: Joanne Koong --- fs/iomap/buffered-io.c | 68 ++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 26 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index fd827398afd2..13854fb6ad86 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -357,36 +357,21 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static int iomap_readpage_iter(struct iomap_iter *iter, - struct iomap_readpage_ctx *ctx) +/** + * Read in a folio range asynchronously through bios. + * + * This should only be used for read/readahead, not for buffered writes. + * Buffered writes must read in the folio synchronously. + */ +static void iomap_read_folio_range_bio_async(const struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx, loff_t pos, size_t plen) { + struct folio *folio = ctx->cur_folio; const struct iomap *iomap = &iter->iomap; - loff_t pos = iter->pos; + struct iomap_folio_state *ifs = folio->private; + size_t poff = offset_in_folio(folio, pos); loff_t length = iomap_length(iter); - struct folio *folio = ctx->cur_folio; - struct iomap_folio_state *ifs; - size_t poff, plen; sector_t sector; - int ret; - - if (iomap->type == IOMAP_INLINE) { - ret = iomap_read_inline_data(iter, folio); - if (ret) - return ret; - return iomap_iter_advance(iter, &length); - } - - /* zero post-eof blocks as the page may be mapped */ - ifs = ifs_alloc(iter->inode, folio, iter->flags); - iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); - if (plen == 0) - goto done; - - if (iomap_block_needs_zeroing(iter, pos)) { - folio_zero_range(folio, poff, plen); - iomap_set_range_uptodate(folio, poff, plen); - goto done; - } ctx->cur_folio_in_bio = true; if (ifs) { @@ -425,6 +410,37 @@ static int iomap_readpage_iter(struct iomap_iter *iter, ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio_nofail(ctx->bio, folio, plen, poff); } +} + +static int iomap_readpage_iter(struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx) +{ + const struct iomap *iomap = &iter->iomap; + loff_t pos = iter->pos; + loff_t length = iomap_length(iter); + struct folio *folio = ctx->cur_folio; + size_t poff, plen; + int ret; + + if (iomap->type == IOMAP_INLINE) { + ret = iomap_read_inline_data(iter, folio); + if (ret) + return ret; + return iomap_iter_advance(iter, &length); + } + + /* zero post-eof blocks as the page may be mapped */ + ifs_alloc(iter->inode, folio, iter->flags); + iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); + if (plen == 0) + goto done; + + if (iomap_block_needs_zeroing(iter, pos)) { + folio_zero_range(folio, poff, plen); + iomap_set_range_uptodate(folio, poff, plen); + } else { + iomap_read_folio_range_bio_async(iter, ctx, pos, plen); + } done: /* -- 2.47.3