summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-10-30 06:18:51 +0100
committerJens Axboe <axboe@kernel.dk>2024-10-31 10:54:25 -0600
commitcafd00d0e90956c1c570a0a96cd86298897d247b (patch)
tree322663bb86012bf04c31bd6ed588ee787c5fbc83 /block
parent496a51b37143c690a06612a6bd58827ef2341761 (diff)
block: remove zone append special casing from the direct I/O path
This code is unused, and all future zoned file systems should follow the btrfs lead of splitting the bios themselves to the zoned limits in the I/O submission handler, because if they didn't they would be hit by commit ed9832bc08db ("block: introduce folio awareness and add a bigger size from folio") breaking this code when the zone append limit (that is usually the max_hw_sectors limit) is smaller than the largest possible folio size. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://lore.kernel.org/r/20241030051859.280923-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c34
1 files changed, 2 insertions, 32 deletions
diff --git a/block/bio.c b/block/bio.c
index ac4d77c88932..6a60d62a529d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1206,21 +1206,12 @@ EXPORT_SYMBOL_GPL(__bio_release_pages);
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
{
- size_t size = iov_iter_count(iter);
-
WARN_ON_ONCE(bio->bi_max_vecs);
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- size_t max_sectors = queue_max_zone_append_sectors(q);
-
- size = min(size, max_sectors << SECTOR_SHIFT);
- }
-
bio->bi_vcnt = iter->nr_segs;
bio->bi_io_vec = (struct bio_vec *)iter->bvec;
bio->bi_iter.bi_bvec_done = iter->iov_offset;
- bio->bi_iter.bi_size = size;
+ bio->bi_iter.bi_size = iov_iter_count(iter);
bio_set_flag(bio, BIO_CLONED);
}
@@ -1245,20 +1236,6 @@ static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
return 0;
}
-static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
- size_t len, size_t offset)
-{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- bool same_page = false;
-
- if (bio_add_hw_folio(q, bio, folio, len, offset,
- queue_max_zone_append_sectors(q), &same_page) != len)
- return -EINVAL;
- if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
- unpin_user_folio(folio, 1);
- return 0;
-}
-
static unsigned int get_contig_folio_len(unsigned int *num_pages,
struct page **pages, unsigned int i,
struct folio *folio, size_t left,
@@ -1365,14 +1342,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
len = get_contig_folio_len(&num_pages, pages, i,
folio, left, offset);
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- ret = bio_iov_add_zone_append_folio(bio, folio, len,
- folio_offset);
- if (ret)
- break;
- } else
- bio_iov_add_folio(bio, folio, len, folio_offset);
-
+ bio_iov_add_folio(bio, folio, len, folio_offset);
offset = 0;
}