diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 32 | 
1 files changed, 16 insertions, 16 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fec6d67bfd14..0488607c5cd3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -67,7 +67,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)  	struct tcp_sock *tp = tcp_sk(sk);  	unsigned int prior_packets = tp->packets_out; -	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; +	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);  	__skb_unlink(skb, &sk->sk_write_queue);  	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); @@ -1196,10 +1196,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)  	struct tcp_sock *tp = tcp_sk(sk);  	/* Advance write_seq and place onto the write_queue. */ -	tp->write_seq = TCP_SKB_CB(skb)->end_seq; +	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);  	__skb_header_release(skb);  	tcp_add_write_queue_tail(sk, skb); -	sk->sk_wmem_queued += skb->truesize; +	sk_wmem_queued_add(sk, skb->truesize);  	sk_mem_charge(sk, skb->truesize);  } @@ -1333,7 +1333,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,  		return -ENOMEM; /* We'll just try again later. */  	skb_copy_decrypted(buff, skb); -	sk->sk_wmem_queued += buff->truesize; +	sk_wmem_queued_add(sk, buff->truesize);  	sk_mem_charge(sk, buff->truesize);  	nlen = skb->len - len - nsize;  	buff->truesize += nlen; @@ -1443,7 +1443,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)  	if (delta_truesize) {  		skb->truesize	   -= delta_truesize; -		sk->sk_wmem_queued -= delta_truesize; +		sk_wmem_queued_add(sk, -delta_truesize);  		sk_mem_uncharge(sk, delta_truesize);  		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);  	} @@ -1888,7 +1888,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,  		return -ENOMEM;  	skb_copy_decrypted(buff, skb); -	sk->sk_wmem_queued += buff->truesize; +	sk_wmem_queued_add(sk, buff->truesize);  	sk_mem_charge(sk, buff->truesize);  	buff->truesize += nlen;  	skb->truesize -= nlen; @@ -2152,7 +2152,7 @@ static int tcp_mtu_probe(struct sock *sk)  	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);  	if (!nskb)  		return -1; -	sk->sk_wmem_queued += nskb->truesize; +	sk_wmem_queued_add(sk, nskb->truesize);  	sk_mem_charge(sk, nskb->truesize);  	skb = tcp_send_head(sk); @@ -2482,7 +2482,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)  	/* Don't do any loss probe on a Fast Open connection before 3WHS  	 * finishes.  	 */ -	if (tp->fastopen_rsk) +	if (rcu_access_pointer(tp->fastopen_rsk))  		return false;  	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; @@ -3142,7 +3142,7 @@ void tcp_send_fin(struct sock *sk)  			 * if FIN had been sent. This is because retransmit path  			 * does not change tp->snd_nxt.  			 */ -			tp->snd_nxt++; +			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);  			return;  		}  	} else { @@ -3222,7 +3222,7 @@ int tcp_send_synack(struct sock *sk)  			tcp_rtx_queue_unlink_and_free(skb, sk);  			__skb_header_release(nskb);  			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); -			sk->sk_wmem_queued += nskb->truesize; +			sk_wmem_queued_add(sk, nskb->truesize);  			sk_mem_charge(sk, nskb->truesize);  			skb = nskb;  		} @@ -3426,14 +3426,14 @@ static void tcp_connect_init(struct sock *sk)  	tp->snd_una = tp->write_seq;  	tp->snd_sml = tp->write_seq;  	tp->snd_up = tp->write_seq; -	tp->snd_nxt = tp->write_seq; +	WRITE_ONCE(tp->snd_nxt, tp->write_seq);  	if (likely(!tp->repair))  		tp->rcv_nxt = 0;  	else  		tp->rcv_tstamp = tcp_jiffies32;  	tp->rcv_wup = tp->rcv_nxt; -	tp->copied_seq = tp->rcv_nxt; +	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);  	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);  	inet_csk(sk)->icsk_retransmits = 0; @@ -3447,9 +3447,9 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)  	tcb->end_seq += skb->len;  	__skb_header_release(skb); -	sk->sk_wmem_queued += skb->truesize; +	sk_wmem_queued_add(sk, skb->truesize);  	sk_mem_charge(sk, skb->truesize); -	tp->write_seq = tcb->end_seq; +	WRITE_ONCE(tp->write_seq, tcb->end_seq);  	tp->packets_out += tcp_skb_pcount(skb);  } @@ -3586,11 +3586,11 @@ int tcp_connect(struct sock *sk)  	/* We change tp->snd_nxt after the tcp_transmit_skb() call  	 * in order to make this packet get counted in tcpOutSegs.  	 */ -	tp->snd_nxt = tp->write_seq; +	WRITE_ONCE(tp->snd_nxt, tp->write_seq);  	tp->pushed_seq = tp->write_seq;  	buff = tcp_send_head(sk);  	if (unlikely(buff)) { -		tp->snd_nxt	= TCP_SKB_CB(buff)->seq; +		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);  		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;  	}  	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);  | 
