summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-06-08 15:24:14 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2024-07-14 19:00:15 -0400
commitc30402e5483df785a5319ffe07127bfd7238d8d9 (patch)
tree90802d9a66f1b0843f4aac312a93e5e4ae1dfd62 /fs/bcachefs
parent71fdc0b5a678766e874c5001e8bc83bde5931655 (diff)
bcachefs: btree_path_cached_set()
new helper - small refactoring Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/btree_key_cache.c50
1 files changed, 24 insertions, 26 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 2d3c0d45c37f..8b2fd0ae6028 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -39,6 +39,15 @@ static const struct rhashtable_params bch2_btree_key_cache_params = {
.automatic_shrinking = true,
};
+static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_cached *ck,
+ enum btree_node_locked_type lock_held)
+{
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
+ path->l[0].b = (void *) ck;
+ mark_btree_node_locked(trans, path, 0, lock_held);
+}
+
__flatten
inline struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
@@ -259,9 +268,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return ERR_PTR(ret);
}
- path->l[0].b = (void *) ck;
- path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
- mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
+ btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
ret = bch2_btree_node_lock_write(trans, path, &ck->c);
if (unlikely(ret)) {
@@ -489,7 +496,7 @@ retry:
if (!ck)
goto retry;
- mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
+ btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
path->locks_want = 1;
} else {
enum six_lock_type lock_want = __btree_lock_want(path, 0);
@@ -507,12 +514,8 @@ retry:
goto retry;
}
- mark_btree_node_locked(trans, path, 0,
- (enum btree_node_locked_type) lock_want);
+ btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
}
-
- path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
- path->l[0].b = (void *) ck;
fill:
path->uptodate = BTREE_ITER_UPTODATE;
@@ -559,30 +562,25 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
}
retry:
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
- if (!ck) {
+ if (!ck)
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
- } else {
- enum six_lock_type lock_want = __btree_lock_want(path, 0);
- ret = btree_node_lock(trans, path, (void *) ck, 0,
- lock_want, _THIS_IP_);
- EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
+ enum six_lock_type lock_want = __btree_lock_want(path, 0);
- if (ret)
- return ret;
+ ret = btree_node_lock(trans, path, (void *) ck, 0,
+ lock_want, _THIS_IP_);
+ EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
- if (ck->key.btree_id != path->btree_id ||
- !bpos_eq(ck->key.pos, path->pos)) {
- six_unlock_type(&ck->c.lock, lock_want);
- goto retry;
- }
+ if (ret)
+ return ret;
- mark_btree_node_locked(trans, path, 0,
- (enum btree_node_locked_type) lock_want);
+ if (ck->key.btree_id != path->btree_id ||
+ !bpos_eq(ck->key.pos, path->pos)) {
+ six_unlock_type(&ck->c.lock, lock_want);
+ goto retry;
}
- path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
- path->l[0].b = (void *) ck;
+ btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
fill:
if (!ck->valid)
return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);