Iterate over all non-uptodate ranges in a single call to iomap_readpage_iter() instead of leaving the partial folio iteration to the caller. This will be useful for supporting caller-provided async folio read callbacks (added in later commit) because that will require tracking when the first and last async read request for a folio is sent, in order to prevent premature read completion of the folio. This additionally makes the iomap_readahead_iter() logic a bit simpler. Signed-off-by: Joanne Koong --- fs/iomap/buffered-io.c | 67 ++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 51d204f0e077..fc8fa24ae7db 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -431,6 +431,7 @@ static int iomap_readpage_iter(struct iomap_iter *iter, loff_t length = iomap_length(iter); struct folio *folio = ctx->cur_folio; size_t poff, plen; + loff_t count; int ret; if (iomap->type == IOMAP_INLINE) { @@ -442,39 +443,29 @@ static int iomap_readpage_iter(struct iomap_iter *iter, /* zero post-eof blocks as the page may be mapped */ ifs_alloc(iter->inode, folio, iter->flags); - iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); - if (plen == 0) - goto done; - if (iomap_block_needs_zeroing(iter, pos)) { - folio_zero_range(folio, poff, plen); - iomap_set_range_uptodate(folio, poff, plen); - } else { - iomap_read_folio_range_bio_async(iter, ctx, pos, plen); - } - -done: - /* - * Move the caller beyond our range so that it keeps making progress. - * For that, we have to include any leading non-uptodate ranges, but - * we can skip trailing ones as they will be handled in the next - * iteration. - */ - length = pos - iter->pos + plen; - return iomap_iter_advance(iter, &length); -} + length = min_t(loff_t, length, + folio_size(folio) - offset_in_folio(folio, pos)); + while (length) { + iomap_adjust_read_range(iter->inode, folio, &pos, + length, &poff, &plen); + count = pos - iter->pos + plen; + if (plen == 0) + return iomap_iter_advance(iter, &count); -static int iomap_read_folio_iter(struct iomap_iter *iter, - struct iomap_readpage_ctx *ctx) -{ - int ret; + if (iomap_block_needs_zeroing(iter, pos)) { + folio_zero_range(folio, poff, plen); + iomap_set_range_uptodate(folio, poff, plen); + } else { + iomap_read_folio_range_bio_async(iter, ctx, pos, plen); + } - while (iomap_length(iter)) { - ret = iomap_readpage_iter(iter, ctx); + length -= count; + ret = iomap_iter_advance(iter, &count); if (ret) return ret; + pos = iter->pos; } - return 0; } @@ -493,7 +484,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) trace_iomap_readpage(iter.inode, 1); while ((ret = iomap_iter(&iter, ops)) > 0) - iter.status = iomap_read_folio_iter(&iter, &ctx); + iter.status = iomap_readpage_iter(&iter, &ctx); iomap_submit_read_bio(&ctx); @@ -510,16 +501,16 @@ static int iomap_readahead_iter(struct iomap_iter *iter, int ret; while (iomap_length(iter)) { - if (ctx->cur_folio && - offset_in_folio(ctx->cur_folio, iter->pos) == 0) { - if (!ctx->folio_owned) - folio_unlock(ctx->cur_folio); - ctx->cur_folio = NULL; - } - if (!ctx->cur_folio) { - ctx->cur_folio = readahead_folio(ctx->rac); - ctx->folio_owned = false; - } + if (ctx->cur_folio && !ctx->folio_owned) + folio_unlock(ctx->cur_folio); + ctx->cur_folio = readahead_folio(ctx->rac); + /* + * We should never in practice hit this case since + * the iter length matches the readahead length. + */ + if (WARN_ON_ONCE(!ctx->cur_folio)) + return -EINVAL; + ctx->folio_owned = false; ret = iomap_readpage_iter(iter, ctx); if (ret) return ret; -- 2.47.3