summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-12-21 18:54:09 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-07-14 19:00:16 -0400
commit375476c41405ff6fc379cdbf1ad1df35c737500c (patch)
tree12a16c18e72755f57e23f143b8275c9decb298f0
parent1a616c2fe96b357894b74b41787d4ea6987f6199 (diff)
bcachefs: Add lockdep support for btree node locks
This adds lockdep tracking for held btree locks with a single dep_map in btree_trans, i.e. tracking all held btree locks as one object. This is more practical and more useful than having lockdep track held btree locks individually, because - we can take more locks than lockdep can track (unbounded, now that we have dynamically resizable btree paths) - there's no lock ordering between btree locks for lockdep to track (we do cycle detection) - and this makes it easy to teach lockdep that btree locks are not safe to hold while invoking memory reclaim. The last rule is one that lockdep would never learn, because we only do trylock() from within shrinkers - but we very much do not want to be invoking memory reclaim while holding btree node locks. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_iter.c24
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/btree_types.h3
3 files changed, 26 insertions, 3 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 9485208b6758..803cc58ff577 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -3171,6 +3171,9 @@ got_trans:
trans->paths_allocated[0] = 1;
+ static struct lock_class_key lockdep_key;
+ lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
+
if (fn_idx < BCH_TRANSACTIONS_NR) {
trans->fn = bch2_btree_transaction_fns[fn_idx];
@@ -3440,7 +3443,22 @@ int bch2_fs_btree_iter_init(struct bch_fs *c)
mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
BTREE_TRANS_MEM_MAX) ?:
init_srcu_struct(&c->btree_trans_barrier);
- if (!ret)
- c->btree_trans_barrier_initialized = true;
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * static annotation (hackily done) for lock ordering of reclaim vs.
+ * btree node locks:
+ */
+#ifdef CONFIG_LOCKDEP
+ fs_reclaim_acquire(GFP_KERNEL);
+ struct btree_trans *trans = bch2_trans_get(c);
+ trans_set_locked(trans);
+ bch2_trans_put(trans);
+ fs_reclaim_release(GFP_KERNEL);
+#endif
+
+ c->btree_trans_barrier_initialized = true;
+ return 0;
+
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 8f5f1973c7d8..8dbceec8ec25 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -197,6 +197,7 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
static inline void trans_set_locked(struct btree_trans *trans)
{
if (!trans->locked) {
+ lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
trans->locked = true;
trans->last_unlock_ip = 0;
@@ -208,6 +209,7 @@ static inline void trans_set_locked(struct btree_trans *trans)
static inline void trans_set_unlocked(struct btree_trans *trans)
{
if (trans->locked) {
+ lock_release(&trans->dep_map, _THIS_IP_);
trans->locked = false;
trans->last_unlock_ip = _RET_IP_;
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 4fe77d7f7242..79898f687772 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -523,6 +523,9 @@ struct btree_trans {
unsigned journal_u64s;
unsigned extra_disk_res; /* XXX kill */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
/* Entries before this are zeroed out on every bch2_trans_get() call */
struct list_head list;