From f05441c7e16e6a720cf24e08999661a76ff77478 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 31 Oct 2023 00:45:40 -0400 Subject: fold the call of retain_dentry() into fast_dput() Calls of retain_dentry() happen immediately after getting false from fast_dput() and getting true from retain_dentry() is treated the same way as non-zero refcount would be treated by fast_dput() - unlock dentry and bugger off. Doing that in fast_dput() itself is simpler. Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- fs/dcache.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 8ce0fe70f303..b69ff3a0b30f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -757,6 +757,8 @@ got_locks: * Try to do a lockless dput(), and return whether that was successful. * * If unsuccessful, we return false, having already taken the dentry lock. + * In that case refcount is guaranteed to be zero and we have already + * decided that it's not worth keeping around. * * The caller needs to hold the RCU read lock, so that the dentry is * guaranteed to stay around even if the refcount goes down to zero! @@ -842,7 +844,7 @@ static inline bool fast_dput(struct dentry *dentry) * don't need to do anything else. */ locked: - if (dentry->d_lockref.count) { + if (dentry->d_lockref.count || retain_dentry(dentry)) { spin_unlock(&dentry->d_lock); return true; } @@ -889,12 +891,6 @@ void dput(struct dentry *dentry) /* Slow case: now with the dentry lock held */ rcu_read_unlock(); - - if (likely(retain_dentry(dentry))) { - spin_unlock(&dentry->d_lock); - return; - } - dentry->d_lockref.count = 1; dentry = dentry_kill(dentry); } @@ -920,8 +916,7 @@ void dput_to_list(struct dentry *dentry, struct list_head *list) return; } rcu_read_unlock(); - if (!retain_dentry(dentry)) - to_shrink_list(dentry, list); + to_shrink_list(dentry, list); spin_unlock(&dentry->d_lock); } -- cgit v1.2.3-70-g09d2