summaryrefslogtreecommitdiff
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:38:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:38:26 -0800
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /fs/dcache.c
parent6098850e7e6978f95a958f79a645a653228d0002 (diff)
parent450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index f90141387f01..bcc9f6981569 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{
/*
* Be careful about RCU walk racing with rename:
- * use 'lockless_dereference' to fetch the name pointer.
+ * use 'READ_ONCE' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
- const unsigned char *cs = lockless_dereference(dentry->d_name.name);
+ const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
@@ -630,7 +630,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
rcu_read_lock();
spin_unlock(&dentry->d_lock);
again:
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
spin_lock(&parent->d_lock);
/*
* We can't blindly lock dentry until we are sure
@@ -721,7 +721,7 @@ static inline bool fast_dput(struct dentry *dentry)
* around with a zero refcount.
*/
smp_rmb();
- d_flags = ACCESS_ONCE(dentry->d_flags);
+ d_flags = READ_ONCE(dentry->d_flags);
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */
@@ -850,11 +850,11 @@ struct dentry *dget_parent(struct dentry *dentry)
* locking.
*/
rcu_read_lock();
- ret = ACCESS_ONCE(dentry->d_parent);
+ ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
- if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
+ if (likely(ret == READ_ONCE(dentry->d_parent)))
return ret;
dput(ret);
}
@@ -3040,7 +3040,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* @buflen: allocated length of the buffer
* @name: name string and length qstr structure
*
- * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
+ * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to
* make sure that either the old or the new name pointer and length are
* fetched. However, there may be mismatch between length and pointer.
* The length cannot be trusted, we need to copy it byte-by-byte until
@@ -3054,8 +3054,8 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
*/
static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
- const char *dname = ACCESS_ONCE(name->name);
- u32 dlen = ACCESS_ONCE(name->len);
+ const char *dname = READ_ONCE(name->name);
+ u32 dlen = READ_ONCE(name->len);
char *p;
smp_read_barrier_depends();
@@ -3120,7 +3120,7 @@ restart:
struct dentry * parent;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
- struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
+ struct mount *parent = READ_ONCE(mnt->mnt_parent);
/* Escaped? */
if (dentry != vfsmnt->mnt_root) {
bptr = *buffer;
@@ -3130,7 +3130,7 @@ restart:
}
/* Global root? */
if (mnt != parent) {
- dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
+ dentry = READ_ONCE(mnt->mnt_mountpoint);
mnt = parent;
vfsmnt = &mnt->mnt;
continue;