Move the read/readahead bio submission logic into a separate helper This is needed to make iomap read/readahead more generically usable, especially for filesystems that do not require CONFIG_BLOCK. Signed-off-by: Joanne Koong --- fs/iomap/buffered-io.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 13854fb6ad86..a3b02ed5328f 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -357,6 +357,14 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; +static void iomap_submit_read_bio(struct iomap_readpage_ctx *ctx) +{ + struct bio *bio = ctx->bio; + + if (bio) + submit_bio(bio); +} + /** * Read in a folio range asynchronously through bios. * @@ -388,8 +396,7 @@ static void iomap_read_folio_range_bio_async(const struct iomap_iter *iter, gfp_t orig_gfp = gfp; unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); - if (ctx->bio) - submit_bio(ctx->bio); + iomap_submit_read_bio(ctx); if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; @@ -484,13 +491,10 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = iomap_read_folio_iter(&iter, &ctx); - if (ctx.bio) { - submit_bio(ctx.bio); - WARN_ON_ONCE(!ctx.cur_folio_in_bio); - } else { - WARN_ON_ONCE(ctx.cur_folio_in_bio); + iomap_submit_read_bio(&ctx); + + if (!ctx.cur_folio_in_bio) folio_unlock(folio); - } /* * Just like mpage_readahead and block_read_full_folio, we always @@ -556,12 +560,10 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) while (iomap_iter(&iter, ops) > 0) iter.status = iomap_readahead_iter(&iter, &ctx); - if (ctx.bio) - submit_bio(ctx.bio); - if (ctx.cur_folio) { - if (!ctx.cur_folio_in_bio) - folio_unlock(ctx.cur_folio); - } + iomap_submit_read_bio(&ctx); + + if (ctx.cur_folio && !ctx.cur_folio_in_bio) + folio_unlock(ctx.cur_folio); } EXPORT_SYMBOL_GPL(iomap_readahead); -- 2.47.3