diff options
author | Keith Busch <kbusch@kernel.org> | 2024-02-23 07:59:10 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-02-24 12:46:46 -0700 |
commit | 8a08c5fd89b447a7de7eb293a7a274c46b932ba2 (patch) | |
tree | d7c9d860ed39b9419d0bf674ddb89976962e54a6 /block | |
parent | 0eb4db4706603db09644ec3bc9bb0d63ea5d326c (diff) |
blk-lib: check for kill signal
Some of these block operations can access a significant capacity and
take longer than the user expected. A user may change their mind about
wanting to run that command and attempt to kill the process and do
something else with their device. But since the task is uninterruptable,
they have to wait for it to finish, which could be many hours.
Check for a fatal signal at each iteration so the user doesn't have to
wait for their regretted operation to complete naturally.
Reported-by: Conrad Meyer <conradmeyer@meta.com>
Tested-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20240223155910.3622666-5-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-lib.c | 40 |
1 files changed, 39 insertions, 1 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index a6954eafb8c8..dc8e35d0a51d 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -35,6 +35,26 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; } +static void await_bio_endio(struct bio *bio) +{ + complete(bio->bi_private); + bio_put(bio); +} + +/* + * await_bio_chain - ends @bio and waits for every chained bio to complete + */ +static void await_bio_chain(struct bio *bio) +{ + DECLARE_COMPLETION_ONSTACK_MAP(done, + bio->bi_bdev->bd_disk->lockdep_map); + + bio->bi_private = &done; + bio->bi_end_io = await_bio_endio; + bio_endio(bio); + blk_wait_io(&done); +} + int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) { @@ -77,6 +97,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, * is disabled. */ cond_resched(); + if (fatal_signal_pending(current)) { + await_bio_chain(bio); + return -EINTR; + } } *biop = bio; @@ -143,6 +167,10 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, nr_sects -= len; sector += len; cond_resched(); + if (fatal_signal_pending(current)) { + await_bio_chain(bio); + return -EINTR; + } } *biop = bio; @@ -187,6 +215,10 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, break; } cond_resched(); + if (fatal_signal_pending(current)) { + await_bio_chain(bio); + return -EINTR; + } } *biop = bio; @@ -277,7 +309,7 @@ retry: bio_put(bio); } blk_finish_plug(&plug); - if (ret && try_write_zeroes) { + if (ret && ret != -EINTR && try_write_zeroes) { if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { try_write_zeroes = false; goto retry; @@ -329,6 +361,12 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, sector += len; nr_sects -= len; cond_resched(); + if (fatal_signal_pending(current)) { + await_bio_chain(bio); + ret = -EINTR; + bio = NULL; + break; + } } if (bio) { ret = submit_bio_wait(bio); |