Store the iomap_readpage_ctx bio generically as a "void *read_ctx". This makes the read/readahead interface more generic, which allows it to be used by filesystems that may not be block-based and may not have CONFIG_BLOCK set. Signed-off-by: Joanne Koong Reviewed-by: "Darrick J. Wong" --- fs/iomap/buffered-io.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index f8b985bb5a6b..b06b532033ad 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -363,13 +363,13 @@ static void iomap_read_end_io(struct bio *bio) struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; - struct bio *bio; + void *read_ctx; struct readahead_control *rac; }; static void iomap_bio_submit_read(struct iomap_readpage_ctx *ctx) { - struct bio *bio = ctx->bio; + struct bio *bio = ctx->read_ctx; if (bio) submit_bio(bio); @@ -384,6 +384,7 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter, size_t poff = offset_in_folio(folio, pos); loff_t length = iomap_length(iter); sector_t sector; + struct bio *bio = ctx->read_ctx; ctx->cur_folio_in_bio = true; if (ifs) { @@ -393,9 +394,8 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter, } sector = iomap_sector(iomap, pos); - if (!ctx->bio || - bio_end_sector(ctx->bio) != sector || - !bio_add_folio(ctx->bio, folio, plen, poff)) { + if (!bio || bio_end_sector(bio) != sector || + !bio_add_folio(bio, folio, plen, poff)) { gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); gfp_t orig_gfp = gfp; unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); @@ -404,22 +404,21 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter, if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; - ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), - REQ_OP_READ, gfp); + bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, + gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_read_folio does. */ - if (!ctx->bio) { - ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, - orig_gfp); - } + if (!bio) + bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); if (ctx->rac) - ctx->bio->bi_opf |= REQ_RAHEAD; - ctx->bio->bi_iter.bi_sector = sector; - ctx->bio->bi_end_io = iomap_read_end_io; - bio_add_folio_nofail(ctx->bio, folio, plen, poff); + bio->bi_opf |= REQ_RAHEAD; + bio->bi_iter.bi_sector = sector; + bio->bi_end_io = iomap_read_end_io; + bio_add_folio_nofail(bio, folio, plen, poff); + ctx->read_ctx = bio; } } -- 2.47.3