summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-03-23 23:52:27 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:57 -0400
commita9d79c6e8ba18665bed30702be5fb238c50e8c63 (patch)
tree1e6119b8da52ff6757ea078f75191f02c3bc3fa1 /fs/bcachefs
parent08070cba4a378ca02fdb954c45cf9b8797907fe9 (diff)
bcachefs: Use pcpu mode of six locks for interior nodes
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/btree_cache.c6
-rw-r--r--fs/bcachefs/btree_iter.c16
-rw-r--r--fs/bcachefs/btree_update_interior.c5
3 files changed, 23 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 775b3e8468da..f32fc45c85d2 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -147,6 +147,11 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
b->c.level = level;
b->c.btree_id = id;
+ if (level)
+ six_lock_pcpu_alloc(&b->c.lock);
+ else
+ six_lock_pcpu_free_rcu(&b->c.lock);
+
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
if (!ret)
@@ -393,6 +398,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->freed)) {
b = list_first_entry(&bc->freed, struct btree, list);
list_del(&b->list);
+ six_lock_pcpu_free(&b->c.lock);
kfree(b);
}
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 5f30626d1852..cf41ece0d66e 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -79,11 +79,19 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
+ if (!b->c.lock.readers)
+ atomic64_sub(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_sub(*b->c.lock.readers, readers);
+
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
- atomic64_add(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
+
+ if (!b->c.lock.readers)
+ atomic64_add(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_add(*b->c.lock.readers, readers);
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 4ad8084714f9..2c202dd01766 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -988,6 +988,11 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
list_del_init(&b->list);
mutex_unlock(&c->btree_cache.lock);
+ if (b->c.level)
+ six_lock_pcpu_alloc(&b->c.lock);
+ else
+ six_lock_pcpu_free(&b->c.lock);
+
mutex_lock(&c->btree_root_lock);
BUG_ON(btree_node_root(c, b) &&
(b->c.level < btree_node_root(c, b)->c.level ||