diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-02 06:02:16 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-02 06:02:16 +0900 |
commit | e6861be452a53a5de3e1a048eabd811a05a44915 (patch) | |
tree | 065281129fb5e7b1ed51d6298fbd1faa0902063d /drivers/md/bcache/request.c | |
parent | 994d5c58e50e91bb02c7be4a91d5186292a895c8 (diff) | |
parent | 415e5107b0dce0e5407ae4a46700cd7e8859e252 (diff) |
Merge tag 'bcachefs-2023-11-29' of https://evilpiepirate.org/git/bcachefs
Pull more bcachefs bugfixes from Kent Overstreet:
- bcache & bcachefs were broken with CFI enabled; patch for closures to
fix type punning
- mark erasure coding as extra-experimental; there are incompatible
disk space accounting changes coming for erasure coding, and I'm
still seeing checksum errors in some tests
- several fixes for durability-related issues (durability is a device
specific setting where we can tell bcachefs that data on a given
device should be counted as replicated x times)
- a fix for a rare livelock when a btree node merge then updates a
parent node that is almost full
- fix a race in the device removal path, where dropping a pointer in a
btree node to a device would be clobbered by an in flight btree write
updating the btree node key on completion
- fix one SRCU lock hold time warning in the btree gc code - ther's
still a bunch more of these to fix
- fix a rare race where we'd start copygc before initializing the "are
we rw" percpu refcount; copygc would think we were already ro and die
immediately
* tag 'bcachefs-2023-11-29' of https://evilpiepirate.org/git/bcachefs: (23 commits)
bcachefs: Extra kthread_should_stop() calls for copygc
bcachefs: Convert gc_alloc_start() to for_each_btree_key2()
bcachefs: Fix race between btree writes and metadata drop
bcachefs: move journal seq assertion
bcachefs: -EROFS doesn't count as move_extent_start_fail
bcachefs: trace_move_extent_start_fail() now includes errcode
bcachefs: Fix split_race livelock
bcachefs: Fix bucket data type for stripe buckets
bcachefs: Add missing validation for jset_entry_data_usage
bcachefs: Fix zstd compress workspace size
bcachefs: bpos is misaligned on big endian
bcachefs: Fix ec + durability calculation
bcachefs: Data update path won't accidentaly grow replicas
bcachefs: deallocate_extra_replicas()
bcachefs: Proper refcounting for journal_keys
bcachefs: preserve device path as device name
bcachefs: Fix an endianness conversion
bcachefs: Start gc, copygc, rebalance threads after initing writes ref
bcachefs: Don't stop copygc thread on device resize
bcachefs: Make sure bch2_move_ratelimit() also waits for move_ops
...
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a9b1f3896249..83d112bd2b1c 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,7 +25,7 @@ struct kmem_cache *bch_search_cache; -static void bch_data_insert_start(struct closure *cl); +static CLOSURE_CALLBACK(bch_data_insert_start); static unsigned int cache_mode(struct cached_dev *dc) { @@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k) /* Insert data into cache */ -static void bch_data_insert_keys(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_keys) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); atomic_t *journal_ref = NULL; struct bkey *replace_key = op->replace ? &op->replace_key : NULL; int ret; @@ -136,9 +136,9 @@ out: continue_at(cl, bch_data_insert_keys, op->wq); } -static void bch_data_insert_error(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_error) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); /* * Our data write just errored, which means we've got a bunch of keys to @@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl) op->insert_keys.top = dst; - bch_data_insert_keys(cl); + bch_data_insert_keys(&cl->work); } static void bch_data_insert_endio(struct bio *bio) @@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio) bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); } -static void bch_data_insert_start(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_start) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); struct bio *bio = op->bio, *n; if (op->bypass) @@ -305,16 +305,16 @@ err: * If op->bypass is true, instead of inserting the data it invalidates the * region of the cache represented by op->bio and op->inode. */ -void bch_data_insert(struct closure *cl) +CLOSURE_CALLBACK(bch_data_insert) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); trace_bcache_write(op->c, op->inode, op->bio, op->writeback, op->bypass); bch_keylist_init(&op->insert_keys); bio_get(op->bio); - bch_data_insert_start(cl); + bch_data_insert_start(&cl->work); } /* @@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) return n == bio ? MAP_DONE : MAP_CONTINUE; } -static void cache_lookup(struct closure *cl) +static CLOSURE_CALLBACK(cache_lookup) { - struct search *s = container_of(cl, struct search, iop.cl); + closure_type(s, struct search, iop.cl); struct bio *bio = &s->bio.bio; struct cached_dev *dc; int ret; @@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s, bio_cnt_set(bio, 3); } -static void search_free(struct closure *cl) +static CLOSURE_CALLBACK(search_free) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); atomic_dec(&s->iop.c->search_inflight); @@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio, /* Cached devices */ -static void cached_dev_bio_complete(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_bio_complete) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); cached_dev_put(dc); - search_free(cl); + search_free(&cl->work); } /* Process reads */ -static void cached_dev_read_error_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_error_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); if (s->iop.replace_collision) bch_mark_cache_miss_collision(s->iop.c, s->d); @@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl) if (s->iop.bio) bio_free_pages(s->iop.bio); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } -static void cached_dev_read_error(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_error) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bio *bio = &s->bio.bio; /* @@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl) continue_at(cl, cached_dev_read_error_done, NULL); } -static void cached_dev_cache_miss_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_cache_miss_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bcache_device *d = s->d; if (s->iop.replace_collision) @@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl) if (s->iop.bio) bio_free_pages(s->iop.bio); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); closure_put(&d->cl); } -static void cached_dev_read_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); /* @@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl) continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void cached_dev_read_done_bh(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_done_bh) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); bch_mark_cache_accounting(s->iop.c, s->d, @@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) /* Process writes */ -static void cached_dev_write_complete(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_write_complete) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); up_read_non_owner(&dc->writeback_lock); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } static void cached_dev_write(struct cached_dev *dc, struct search *s) @@ -1048,9 +1048,9 @@ insert_data: continue_at(cl, cached_dev_write_complete, NULL); } -static void cached_dev_nodata(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_nodata) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bio *bio = &s->bio.bio; if (s->iop.flush_journal) @@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, return MAP_CONTINUE; } -static void flash_dev_nodata(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_nodata) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); if (s->iop.flush_journal) bch_journal_meta(s->iop.c, cl); |