diff options
Diffstat (limited to 'kernel/mutex.c')
| -rw-r--r-- | kernel/mutex.c | 32 | 
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 6d647aedffea..d24105b1b794 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,  static __always_inline int __sched  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  		    struct lockdep_map *nest_lock, unsigned long ip, -		    struct ww_acquire_ctx *ww_ctx) +		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)  {  	struct task_struct *task = current;  	struct mutex_waiter waiter; @@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  		struct task_struct *owner;  		struct mspin_node  node; -		if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { +		if (use_ww_ctx && ww_ctx->acquired > 0) {  			struct ww_mutex *ww;  			ww = container_of(lock, struct ww_mutex, base); @@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  		if ((atomic_read(&lock->count) == 1) &&  		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {  			lock_acquired(&lock->dep_map, ip); -			if (!__builtin_constant_p(ww_ctx == NULL)) { +			if (use_ww_ctx) {  				struct ww_mutex *ww;  				ww = container_of(lock, struct ww_mutex, base); @@ -551,7 +551,7 @@ slowpath:  			goto err;  		} -		if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { +		if (use_ww_ctx && ww_ctx->acquired > 0) {  			ret = __mutex_lock_check_stamp(lock, ww_ctx);  			if (ret)  				goto err; @@ -575,7 +575,7 @@ skip_wait:  	lock_acquired(&lock->dep_map, ip);  	mutex_set_owner(lock); -	if (!__builtin_constant_p(ww_ctx == NULL)) { +	if (use_ww_ctx) {  		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);  		struct mutex_waiter *cur; @@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)  {  	might_sleep();  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, -			    subclass, NULL, _RET_IP_, NULL); +			    subclass, NULL, _RET_IP_, NULL, 0);  }  EXPORT_SYMBOL_GPL(mutex_lock_nested); @@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)  {  	might_sleep();  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, -			    0, nest, _RET_IP_, NULL); +			    0, nest, _RET_IP_, NULL, 0);  }  EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); @@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)  {  	might_sleep();  	return __mutex_lock_common(lock, TASK_KILLABLE, -				   subclass, NULL, _RET_IP_, NULL); +				   subclass, NULL, _RET_IP_, NULL, 0);  }  EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); @@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)  {  	might_sleep();  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, -				   subclass, NULL, _RET_IP_, NULL); +				   subclass, NULL, _RET_IP_, NULL, 0);  }  EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); @@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, -				   0, &ctx->dep_map, _RET_IP_, ctx); +				   0, &ctx->dep_map, _RET_IP_, ctx, 1);  	if (!ret && ctx->acquired > 1)  		return ww_mutex_deadlock_injection(lock, ctx); @@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  	might_sleep();  	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, -				  0, &ctx->dep_map, _RET_IP_, ctx); +				  0, &ctx->dep_map, _RET_IP_, ctx, 1);  	if (!ret && ctx->acquired > 1)  		return ww_mutex_deadlock_injection(lock, ctx); @@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)  	struct mutex *lock = container_of(lock_count, struct mutex, count);  	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, -			    NULL, _RET_IP_, NULL); +			    NULL, _RET_IP_, NULL, 0);  }  static noinline int __sched  __mutex_lock_killable_slowpath(struct mutex *lock)  {  	return __mutex_lock_common(lock, TASK_KILLABLE, 0, -				   NULL, _RET_IP_, NULL); +				   NULL, _RET_IP_, NULL, 0);  }  static noinline int __sched  __mutex_lock_interruptible_slowpath(struct mutex *lock)  {  	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, -				   NULL, _RET_IP_, NULL); +				   NULL, _RET_IP_, NULL, 0);  }  static noinline int __sched  __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  {  	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, -				   NULL, _RET_IP_, ctx); +				   NULL, _RET_IP_, ctx, 1);  }  static noinline int __sched @@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,  					    struct ww_acquire_ctx *ctx)  {  	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, -				   NULL, _RET_IP_, ctx); +				   NULL, _RET_IP_, ctx, 1);  }  #endif  | 
