summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-11-08 16:46:51 +0100
committerJens Axboe <axboe@kernel.dk>2024-11-11 09:20:36 -0700
commit559218d43ec9dde3d2847c7aa127e88d6ab1c9ed (patch)
treec17616ebdb0586770eb78eaf26ca136abb89b12b /block
parent0ef2b9e698dbf9ba78f67952a747f35eb7060470 (diff)
block: pre-calculate max_zone_append_sectors
max_zone_append_sectors differs from all other queue limits in that the final value used is not stored in the queue_limits but needs to be obtained using queue_limits_max_zone_append_sectors helper. This not only adds (tiny) extra overhead to the I/O path, but also can be easily forgotten in file system code. Add a new max_hw_zone_append_sectors value to queue_limits which is set by the driver, and calculate max_zone_append_sectors from that and the other inputs in blk_validate_zoned_limits, similar to how max_sectors is calculated to fix this. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20241104073955.112324-3-hch@lst.de Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20241108154657.845768-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-merge.c3
-rw-r--r--block/blk-settings.c27
-rw-r--r--block/blk-sysfs.c17
4 files changed, 18 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4f791a3114a1..0387172e8259 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -607,7 +607,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_IOERR;
/* Make sure the BIO is small enough and will not get split */
- if (nr_sectors > queue_max_zone_append_sectors(q))
+ if (nr_sectors > q->limits.max_zone_append_sectors)
return BLK_STS_IOERR;
bio->bi_opf |= REQ_NOMERGE;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 859875a5ee90..7b0af8317c1c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -392,11 +392,10 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
struct bio *bio_split_zone_append(struct bio *bio,
const struct queue_limits *lim, unsigned *nr_segs)
{
- unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
int split_sectors;
split_sectors = bio_split_rw_at(bio, lim, nr_segs,
- max_sectors << SECTOR_SHIFT);
+ lim->max_zone_append_sectors << SECTOR_SHIFT);
if (WARN_ON_ONCE(split_sectors > 0))
split_sectors = -EINVAL;
return bio_submit_split(bio, split_sectors);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5ee3d6d1448d..7d6b296997c2 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -50,7 +50,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
lim->max_write_zeroes_sectors = UINT_MAX;
- lim->max_zone_append_sectors = UINT_MAX;
+ lim->max_hw_zone_append_sectors = UINT_MAX;
lim->max_user_discard_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -91,17 +91,16 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
if (lim->zone_write_granularity < lim->logical_block_size)
lim->zone_write_granularity = lim->logical_block_size;
- if (lim->max_zone_append_sectors) {
- /*
- * The Zone Append size is limited by the maximum I/O size
- * and the zone size given that it can't span zones.
- */
- lim->max_zone_append_sectors =
- min3(lim->max_hw_sectors,
- lim->max_zone_append_sectors,
- lim->chunk_sectors);
- }
-
+ /*
+ * The Zone Append size is limited by the maximum I/O size and the zone
+ * size given that it can't span zones.
+ *
+ * If no max_hw_zone_append_sectors limit is provided, the block layer
+ * will emulated it, else we're also bound by the hardware limit.
+ */
+ lim->max_zone_append_sectors =
+ min_not_zero(lim->max_hw_zone_append_sectors,
+ min(lim->chunk_sectors, lim->max_hw_sectors));
return 0;
}
@@ -527,8 +526,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
- t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
- queue_limits_max_zone_append_sectors(b));
+ t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
+ b->max_hw_zone_append_sectors);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0ef4e13e247d..d80a202cd170 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -131,6 +131,7 @@ QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
+QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
#define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \
static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
@@ -178,18 +179,6 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
return ret;
}
-/*
- * For zone append queue_max_zone_append_sectors does not just return the
- * underlying queue limits, but actually contains a calculation. Because of
- * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
- */
-static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
-{
- return sprintf(page, "%llu\n",
- (u64)queue_max_zone_append_sectors(disk->queue) <<
- SECTOR_SHIFT);
-}
-
static ssize_t
queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
{
@@ -479,7 +468,7 @@ QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
-QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
+QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
QUEUE_RO_ENTRY(queue_zoned, "zoned");
@@ -607,7 +596,7 @@ static struct attribute *queue_attrs[] = {
&queue_atomic_write_unit_max_entry.attr,
&queue_write_same_max_entry.attr,
&queue_max_write_zeroes_sectors_entry.attr,
- &queue_zone_append_max_entry.attr,
+ &queue_max_zone_append_sectors_entry.attr,
&queue_zone_write_granularity_entry.attr,
&queue_rotational_entry.attr,
&queue_zoned_entry.attr,