diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-10-21 16:32:51 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:08:10 -0400 |
commit | 198d67006b6015724a840e8586a484c6590fc975 (patch) | |
tree | 7508961820b1d48faf4cca7d86407c5e63dc28fe /fs | |
parent | 2252aa271c1761589ae584ca738233c7d89c083c (diff) |
bcachefs: add functionality for heaps to update backpointers
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 16 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/clock.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 10 | ||||
-rw-r--r-- | fs/bcachefs/movinggc.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/util.h | 59 |
6 files changed, 59 insertions, 42 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 45e8b124a9f3..88be5f4be4b1 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -583,7 +583,8 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) e.nr++; } else { if (e.nr) - heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); + heap_add_or_replace(&ca->alloc_heap, e, + -bucket_alloc_cmp, NULL); e = (struct alloc_heap_entry) { .bucket = b, @@ -596,14 +597,15 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) } if (e.nr) - heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); + heap_add_or_replace(&ca->alloc_heap, e, + -bucket_alloc_cmp, NULL); for (i = 0; i < ca->alloc_heap.used; i++) nr += ca->alloc_heap.data[i].nr; while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { nr -= ca->alloc_heap.data[0].nr; - heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp); + heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL); } up_read(&ca->bucket_lock); @@ -633,7 +635,7 @@ static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) if (bch2_can_invalidate_bucket(ca, b, m)) { struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; - heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); + heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); if (heap_full(&ca->alloc_heap)) break; } @@ -660,7 +662,7 @@ static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca if (bch2_can_invalidate_bucket(ca, b, m)) { struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; - heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); + heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); if (heap_full(&ca->alloc_heap)) break; } @@ -698,7 +700,7 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) break; } - heap_resort(&ca->alloc_heap, bucket_alloc_cmp); + heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL); for (i = 0; i < ca->alloc_heap.used; i++) nr += ca->alloc_heap.data[i].nr; @@ -719,7 +721,7 @@ static inline long next_alloc_bucket(struct bch_dev *ca) return b; } - heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp); + heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); } return -1; diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index e64e53e9d9ab..8f8e5fab1086 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -35,7 +35,7 @@ void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter, __btree_node_key_to_offset(b, end) }); - __heap_add(iter, n, btree_node_iter_cmp_heap); + __heap_add(iter, n, btree_node_iter_cmp_heap, NULL); } } @@ -48,9 +48,9 @@ void bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter, EBUG_ON(iter->data->k > iter->data->end); if (iter->data->k == iter->data->end) - heap_del(iter, 0, btree_node_iter_cmp_heap); + heap_del(iter, 0, btree_node_iter_cmp_heap, NULL); else - heap_sift_down(iter, 0, btree_node_iter_cmp_heap); + heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL); } static void verify_no_dups(struct btree *b, diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c index 96f8030384fa..e4486fcbea19 100644 --- a/fs/bcachefs/clock.c +++ b/fs/bcachefs/clock.c @@ -22,7 +22,7 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) if (clock->timers.data[i] == timer) goto out; - BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp)); + BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL)); out: spin_unlock(&clock->timer_lock); } @@ -35,7 +35,7 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) for (i = 0; i < clock->timers.used; i++) if (clock->timers.data[i] == timer) { - heap_del(&clock->timers, i, io_timer_cmp); + heap_del(&clock->timers, i, io_timer_cmp, NULL); break; } @@ -128,7 +128,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock, if (clock->timers.used && time_after_eq(now, clock->timers.data[0]->expire)) - heap_pop(&clock->timers, ret, io_timer_cmp); + heap_pop(&clock->timers, ret, io_timer_cmp, NULL); spin_unlock(&clock->timer_lock); diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index ae6b1a17abfa..5dd552bf1d1b 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -88,7 +88,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, memset(&nr, 0, sizeof(nr)); - heap_resort(iter, key_sort_cmp); + heap_resort(iter, key_sort_cmp, NULL); while (!bch2_btree_node_iter_large_end(iter)) { if (!should_drop_next_key(iter, b)) { @@ -101,7 +101,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, } sort_key_next(iter, b, iter->data); - heap_sift_down(iter, 0, key_sort_cmp); + heap_sift_down(iter, 0, key_sort_cmp, NULL); } dst->u64s = cpu_to_le16((u64 *) out - dst->_data); @@ -841,7 +841,7 @@ static bool extent_i_save(struct btree *b, struct bkey_packed *dst, static inline void extent_sort_sift(struct btree_node_iter_large *iter, struct btree *b, size_t i) { - heap_sift_down(iter, i, extent_sort_cmp); + heap_sift_down(iter, i, extent_sort_cmp, NULL); } static inline void extent_sort_next(struct btree_node_iter_large *iter, @@ -849,7 +849,7 @@ static inline void extent_sort_next(struct btree_node_iter_large *iter, struct btree_node_iter_set *i) { sort_key_next(iter, b, i); - heap_sift_down(iter, i - iter->data, extent_sort_cmp); + heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL); } static void extent_sort_append(struct bch_fs *c, @@ -897,7 +897,7 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, memset(&nr, 0, sizeof(nr)); - heap_resort(iter, extent_sort_cmp); + heap_resort(iter, extent_sort_cmp, NULL); while (!bch2_btree_node_iter_large_end(iter)) { lk = __btree_node_offset_to_key(b, _l->k); diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index b7def19bdd85..80577661e008 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -161,7 +161,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) .sectors = bucket_sectors_used(m), .offset = bucket_to_sector(ca, b), }; - heap_add_or_replace(h, e, -sectors_used_cmp); + heap_add_or_replace(h, e, -sectors_used_cmp, NULL); } up_read(&ca->bucket_lock); up_read(&c->gc_lock); @@ -170,7 +170,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) sectors_to_move += i->sectors; while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) { - BUG_ON(!heap_pop(h, e, -sectors_used_cmp)); + BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL)); sectors_to_move -= e.sectors; } diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 44e2c96b6509..9caf2487ee63 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -127,7 +127,19 @@ do { \ (heap)->data = NULL; \ } while (0) -#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) +#define heap_set_backpointer(h, i, _fn) \ +do { \ + void (*fn)(typeof(h), size_t) = _fn; \ + if (fn) \ + fn(h, i); \ +} while (0) + +#define heap_swap(h, i, j, set_backpointer) \ +do { \ + swap((h)->data[i], (h)->data[j]); \ + heap_set_backpointer(h, i, set_backpointer); \ + heap_set_backpointer(h, j, set_backpointer); \ +} while (0) #define heap_peek(h) \ ({ \ @@ -137,7 +149,7 @@ do { \ #define heap_full(h) ((h)->used == (h)->size) -#define heap_sift_down(h, i, cmp) \ +#define heap_sift_down(h, i, cmp, set_backpointer) \ do { \ size_t _c, _j = i; \ \ @@ -149,72 +161,75 @@ do { \ \ if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ break; \ - heap_swap(h, _c, _j); \ + heap_swap(h, _c, _j, set_backpointer); \ } \ } while (0) -#define heap_sift_up(h, i, cmp) \ +#define heap_sift_up(h, i, cmp, set_backpointer) \ do { \ while (i) { \ size_t p = (i - 1) / 2; \ if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ break; \ - heap_swap(h, i, p); \ + heap_swap(h, i, p, set_backpointer); \ i = p; \ } \ } while (0) -#define __heap_add(h, d, cmp) \ -do { \ +#define __heap_add(h, d, cmp, set_backpointer) \ +({ \ size_t _i = (h)->used++; \ (h)->data[_i] = d; \ + heap_set_backpointer(h, _i, set_backpointer); \ \ - heap_sift_up(h, _i, cmp); \ -} while (0) + heap_sift_up(h, _i, cmp, set_backpointer); \ + _i; \ +}) -#define heap_add(h, d, cmp) \ +#define heap_add(h, d, cmp, set_backpointer) \ ({ \ bool _r = !heap_full(h); \ if (_r) \ - __heap_add(h, d, cmp); \ + __heap_add(h, d, cmp, set_backpointer); \ _r; \ }) -#define heap_add_or_replace(h, new, cmp) \ +#define heap_add_or_replace(h, new, cmp, set_backpointer) \ do { \ - if (!heap_add(h, new, cmp) && \ + if (!heap_add(h, new, cmp, set_backpointer) && \ cmp(h, new, heap_peek(h)) >= 0) { \ (h)->data[0] = new; \ - heap_sift_down(h, 0, cmp); \ + heap_set_backpointer(h, 0, set_backpointer); \ + heap_sift_down(h, 0, cmp, set_backpointer); \ } \ } while (0) -#define heap_del(h, i, cmp) \ +#define heap_del(h, i, cmp, set_backpointer) \ do { \ size_t _i = (i); \ \ BUG_ON(_i >= (h)->used); \ (h)->used--; \ - heap_swap(h, _i, (h)->used); \ - heap_sift_up(h, _i, cmp); \ - heap_sift_down(h, _i, cmp); \ + heap_swap(h, _i, (h)->used, set_backpointer); \ + heap_sift_up(h, _i, cmp, set_backpointer); \ + heap_sift_down(h, _i, cmp, set_backpointer); \ } while (0) -#define heap_pop(h, d, cmp) \ +#define heap_pop(h, d, cmp, set_backpointer) \ ({ \ bool _r = (h)->used; \ if (_r) { \ (d) = (h)->data[0]; \ - heap_del(h, 0, cmp); \ + heap_del(h, 0, cmp, set_backpointer); \ } \ _r; \ }) -#define heap_resort(heap, cmp) \ +#define heap_resort(heap, cmp, set_backpointer) \ do { \ ssize_t _i; \ for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ - heap_sift_down(heap, _i, cmp); \ + heap_sift_down(heap, _i, cmp, set_backpointer); \ } while (0) #define ANYSINT_MAX(t) \ |