iomap: move buffered io bio logic into new file

Move bio logic in the buffered io code into its own file and remove
CONFIG_BLOCK gating for iomap read/readahead.

[1] https://lore.kernel.org/linux-fsdevel/aMK2GuumUf93ep99@infradead.org/

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christoph Hellwig [1]
2025-09-25 17:26:05 -07:00
committed by Christian Brauner
parent b2f35ac414
commit c2b1adc462
4 changed files with 103 additions and 88 deletions

View File

@@ -14,5 +14,6 @@ iomap-y += trace.o \
iomap-$(CONFIG_BLOCK) += direct-io.o \
ioend.o \
fiemap.o \
seek.o
seek.o \
bio.o
iomap-$(CONFIG_SWAP) += swapfile.o

88
fs/iomap/bio.c Normal file
View File

@@ -0,0 +1,88 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (C) 2016-2023 Christoph Hellwig.
*/
#include <linux/iomap.h>
#include <linux/pagemap.h>
#include "internal.h"
#include "trace.h"
static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
bio_for_each_folio_all(fi, bio)
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
bio_put(bio);
}
static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
if (bio)
submit_bio(bio);
}
static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
struct folio *folio = ctx->cur_folio;
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
size_t poff = offset_in_folio(folio, pos);
loff_t length = iomap_length(iter);
sector_t sector;
struct bio *bio = ctx->read_ctx;
sector = iomap_sector(iomap, pos);
if (!bio || bio_end_sector(bio) != sector ||
!bio_add_folio(bio, folio, plen, poff)) {
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
if (bio)
submit_bio(bio);
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
gfp);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_read_folio does.
*/
if (!bio)
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
if (ctx->rac)
bio->bi_opf |= REQ_RAHEAD;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = iomap_read_end_io;
bio_add_folio_nofail(bio, folio, plen, poff);
ctx->read_ctx = bio;
}
return 0;
}
const struct iomap_read_ops iomap_bio_read_ops = {
.read_folio_range = iomap_bio_read_folio_range,
.submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct bio_vec bvec;
struct bio bio;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
return submit_bio_wait(&bio);
}

View File

@@ -8,6 +8,7 @@
#include <linux/writeback.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include "internal.h"
#include "trace.h"
#include "../internal.h"
@@ -327,7 +328,6 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
return 0;
}
#ifdef CONFIG_BLOCK
void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
int error)
{
@@ -351,71 +351,6 @@ void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
}
EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
bio_for_each_folio_all(fi, bio)
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
bio_put(bio);
}
static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
if (bio)
submit_bio(bio);
}
static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
struct folio *folio = ctx->cur_folio;
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
size_t poff = offset_in_folio(folio, pos);
loff_t length = iomap_length(iter);
sector_t sector;
struct bio *bio = ctx->read_ctx;
sector = iomap_sector(iomap, pos);
if (!bio || bio_end_sector(bio) != sector ||
!bio_add_folio(bio, folio, plen, poff)) {
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
iomap_bio_submit_read(ctx);
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
gfp);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_read_folio does.
*/
if (!bio)
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
if (ctx->rac)
bio->bi_opf |= REQ_RAHEAD;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = iomap_read_end_io;
bio_add_folio_nofail(bio, folio, plen, poff);
ctx->read_ctx = bio;
}
return 0;
}
const struct iomap_read_ops iomap_bio_read_ops = {
.read_folio_range = iomap_bio_read_folio_range,
.submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
static void iomap_read_init(struct folio *folio)
{
struct iomap_folio_state *ifs = folio->private;
@@ -656,27 +591,6 @@ void iomap_readahead(const struct iomap_ops *ops,
}
EXPORT_SYMBOL_GPL(iomap_readahead);
static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct bio_vec bvec;
struct bio bio;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
return submit_bio_wait(&bio);
}
#else
static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
WARN_ON_ONCE(1);
return -EIO;
}
#endif /* CONFIG_BLOCK */
/*
* iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.

View File

@@ -6,4 +6,16 @@
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
#ifdef CONFIG_BLOCK
int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len);
#else
static inline int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
WARN_ON_ONCE(1);
return -EIO;
}
#endif /* CONFIG_BLOCK */
#endif /* _IOMAP_INTERNAL_H */