diff options
Diffstat (limited to 'net/core/sock.c')
| -rw-r--r-- | net/core/sock.c | 32 | 
1 files changed, 18 insertions, 14 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index fac2b4d80de5..a515392ba84b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,  		rc = sk_backlog_rcv(sk, skb);  		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); -	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { +	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {  		bh_unlock_sock(sk);  		atomic_inc(&sk->sk_drops);  		goto discard_and_relse; @@ -785,7 +785,8 @@ set_sndbuf:  		 */  		val = min_t(int, val, INT_MAX / 2);  		sk->sk_userlocks |= SOCK_SNDBUF_LOCK; -		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); +		WRITE_ONCE(sk->sk_sndbuf, +			   max_t(int, val * 2, SOCK_MIN_SNDBUF));  		/* Wake up sending tasks if we upped the value. */  		sk->sk_write_space(sk);  		break; @@ -831,7 +832,8 @@ set_rcvbuf:  		 * returning the value we actually used in getsockopt  		 * is the most desirable behavior.  		 */ -		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); +		WRITE_ONCE(sk->sk_rcvbuf, +			   max_t(int, val * 2, SOCK_MIN_RCVBUF));  		break;  	case SO_RCVBUFFORCE: @@ -974,7 +976,7 @@ set_rcvbuf:  		if (sock->ops->set_rcvlowat)  			ret = sock->ops->set_rcvlowat(sk, val);  		else -			sk->sk_rcvlowat = val ? : 1; +			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);  		break;  	case SO_RCVTIMEO_OLD: @@ -2088,8 +2090,10 @@ EXPORT_SYMBOL(sock_i_ino);  struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,  			     gfp_t priority)  { -	if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { +	if (force || +	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {  		struct sk_buff *skb = alloc_skb(size, priority); +  		if (skb) {  			skb_set_owner_w(skb, sk);  			return skb; @@ -2190,7 +2194,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)  			break;  		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);  		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); -		if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) +		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))  			break;  		if (sk->sk_shutdown & SEND_SHUTDOWN)  			break; @@ -2225,7 +2229,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,  		if (sk->sk_shutdown & SEND_SHUTDOWN)  			goto failure; -		if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) +		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))  			break;  		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -2334,8 +2338,8 @@ static void sk_leave_memory_pressure(struct sock *sk)  	} else {  		unsigned long *memory_pressure = sk->sk_prot->memory_pressure; -		if (memory_pressure && *memory_pressure) -			*memory_pressure = 0; +		if (memory_pressure && READ_ONCE(*memory_pressure)) +			WRITE_ONCE(*memory_pressure, 0);  	}  } @@ -2806,7 +2810,7 @@ static void sock_def_write_space(struct sock *sk)  	/* Do not wake up a writer until he can make "significant"  	 * progress.  --DaveM  	 */ -	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { +	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {  		wq = rcu_dereference(sk->sk_wq);  		if (skwq_has_sleeper(wq))  			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | @@ -3204,13 +3208,13 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)  	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);  	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); -	mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; +	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);  	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); -	mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; +	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);  	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; -	mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; +	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);  	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); -	mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; +	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);  	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);  }  | 
