There is no longer a dependency on CONFIG_BLOCK in the iomap read and readahead logic. Move this logic out of the CONFIG_BLOCK guard. This allows non-block-based filesystems to use iomap for reads/readahead. Signed-off-by: Joanne Koong --- fs/iomap/buffered-io.c | 151 +++++++++++++++++++++-------------------- 1 file changed, 76 insertions(+), 75 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index f673e03f4ffb..c424e8c157dd 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -358,81 +358,6 @@ void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, } EXPORT_SYMBOL_GPL(iomap_finish_folio_read); -#ifdef CONFIG_BLOCK -static void iomap_read_end_io(struct bio *bio) -{ - int error = blk_status_to_errno(bio->bi_status); - struct folio_iter fi; - - bio_for_each_folio_all(fi, bio) - iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); - bio_put(bio); -} - -static int iomap_submit_read_bio(struct iomap_read_folio_ctx *ctx) -{ - struct bio *bio = ctx->private; - - if (bio) - submit_bio(bio); - - return 0; -} - -/** - * Read in a folio range asynchronously through bios. - * - * This should only be used for read/readahead, not for buffered writes. - * Buffered writes must read in the folio synchronously. - */ -static int iomap_read_folio_range_bio_async(const struct iomap_iter *iter, - struct iomap_read_folio_ctx *ctx, loff_t pos, size_t plen) -{ - struct folio *folio = ctx->cur_folio; - const struct iomap *iomap = &iter->iomap; - size_t poff = offset_in_folio(folio, pos); - loff_t length = iomap_length(iter); - sector_t sector; - struct bio *bio = ctx->private; - - iomap_start_folio_read(folio, plen); - - sector = iomap_sector(iomap, pos); - if (!bio || bio_end_sector(bio) != sector || - !bio_add_folio(bio, folio, plen, poff)) { - gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); - gfp_t orig_gfp = gfp; - unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); - - iomap_submit_read_bio(ctx); - - if (ctx->rac) /* same as readahead_gfp_mask */ - gfp |= __GFP_NORETRY | __GFP_NOWARN; - bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), - REQ_OP_READ, gfp); - /* - * If the bio_alloc fails, try it again for a single page to - * avoid having to deal with partial page reads. This emulates - * what do_mpage_read_folio does. - */ - if (!bio) - bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); - if (ctx->rac) - bio->bi_opf |= REQ_RAHEAD; - bio->bi_iter.bi_sector = sector; - bio->bi_end_io = iomap_read_end_io; - bio_add_folio_nofail(bio, folio, plen, poff); - ctx->private = bio; - } - return 0; -} - -const struct iomap_read_ops iomap_read_bios_ops = { - .read_folio_range = iomap_read_folio_range_bio_async, - .read_submit = iomap_submit_read_bio, -}; -EXPORT_SYMBOL_GPL(iomap_read_bios_ops); - static int iomap_read_folio_iter(struct iomap_iter *iter, struct iomap_read_folio_ctx *ctx, bool *cur_folio_owned) { @@ -601,6 +526,82 @@ void iomap_readahead(const struct iomap_ops *ops, } EXPORT_SYMBOL_GPL(iomap_readahead); +#ifdef CONFIG_BLOCK +static void iomap_read_end_io(struct bio *bio) +{ + int error = blk_status_to_errno(bio->bi_status); + struct folio_iter fi; + + bio_for_each_folio_all(fi, bio) + iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); + bio_put(bio); +} + +static int iomap_submit_read_bio(struct iomap_read_folio_ctx *ctx) +{ + struct bio *bio = ctx->private; + + if (bio) + submit_bio(bio); + + return 0; +} + +/** + * Read in a folio range asynchronously through bios. + * + * This should only be used for read/readahead, not for buffered writes. + * Buffered writes must read in the folio synchronously. + */ +static int iomap_read_folio_range_bio_async(const struct iomap_iter *iter, + struct iomap_read_folio_ctx *ctx, loff_t pos, size_t plen) +{ + struct folio *folio = ctx->cur_folio; + const struct iomap *iomap = &iter->iomap; + size_t poff = offset_in_folio(folio, pos); + loff_t length = iomap_length(iter); + sector_t sector; + struct bio *bio = ctx->private; + + iomap_start_folio_read(folio, plen); + + sector = iomap_sector(iomap, pos); + if (!bio || bio_end_sector(bio) != sector || + !bio_add_folio(bio, folio, plen, poff)) { + gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); + gfp_t orig_gfp = gfp; + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); + + if (bio) + submit_bio(bio); + + if (ctx->rac) /* same as readahead_gfp_mask */ + gfp |= __GFP_NORETRY | __GFP_NOWARN; + bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), + REQ_OP_READ, gfp); + /* + * If the bio_alloc fails, try it again for a single page to + * avoid having to deal with partial page reads. This emulates + * what do_mpage_read_folio does. + */ + if (!bio) + bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); + if (ctx->rac) + bio->bi_opf |= REQ_RAHEAD; + bio->bi_iter.bi_sector = sector; + bio->bi_end_io = iomap_read_end_io; + bio_add_folio_nofail(bio, folio, plen, poff); + ctx->private = bio; + } + return 0; +} + +const struct iomap_read_ops iomap_read_bios_ops = { + .read_folio_range = iomap_read_folio_range_bio_async, + .read_submit = iomap_submit_read_bio, +}; +EXPORT_SYMBOL_GPL(iomap_read_bios_ops); + static int iomap_read_folio_range(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t len) { -- 2.47.3