diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 7 | 
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bdaef7fd6e47..5288cec4a2b2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2605,7 +2605,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)  	 * copying overhead: fragmentation, tunneling, mangling etc.  	 */  	if (atomic_read(&sk->sk_wmem_alloc) > -	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) +	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), +		  sk->sk_sndbuf))  		return -EAGAIN;  	if (skb_still_in_host_queue(sk, skb)) @@ -2830,7 +2831,7 @@ begin_fwd:  		if (tcp_retransmit_skb(sk, skb, segs))  			return; -		NET_INC_STATS(sock_net(sk), mib_idx); +		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));  		if (tcp_in_cwnd_reduction(sk))  			tp->prr_out += tcp_skb_pcount(skb); @@ -3567,6 +3568,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)  	if (!res) {  		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); +		if (unlikely(tcp_passive_fastopen(sk))) +			tcp_sk(sk)->total_retrans++;  	}  	return res;  }  | 
