diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 51 | 
1 files changed, 35 insertions, 16 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9c34b97d365d..d1676d8a6ed7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,  {  	struct tcp_sock *tp = tcp_sk(sk); -	if (unlikely(tp->compressed_ack)) { +	if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {  		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, -			      tp->compressed_ack); -		tp->compressed_ack = 0; +			      tp->compressed_ack - TCP_FASTRETRANS_THRESH); +		tp->compressed_ack = TCP_FASTRETRANS_THRESH;  		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)  			__sock_put(sk);  	} @@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,   * This algorithm is from John Heffner.   */  static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, -				 bool *is_cwnd_limited, u32 max_segs) +				 bool *is_cwnd_limited, +				 bool *is_rwnd_limited, +				 u32 max_segs)  {  	const struct inet_connection_sock *icsk = inet_csk(sk);  	u32 age, send_win, cong_win, limit, in_flight; @@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,  	struct sk_buff *head;  	int win_divisor; -	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) -		goto send_now; -  	if (icsk->icsk_ca_state >= TCP_CA_Recovery)  		goto send_now; @@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,  	if (age < (tp->srtt_us >> 4))  		goto send_now; -	/* Ok, it looks like it is advisable to defer. */ +	/* Ok, it looks like it is advisable to defer. +	 * Three cases are tracked : +	 * 1) We are cwnd-limited +	 * 2) We are rwnd-limited +	 * 3) We are application limited. +	 */ +	if (cong_win < send_win) { +		if (cong_win <= skb->len) { +			*is_cwnd_limited = true; +			return true; +		} +	} else { +		if (send_win <= skb->len) { +			*is_rwnd_limited = true; +			return true; +		} +	} -	if (cong_win < send_win && cong_win <= skb->len) -		*is_cwnd_limited = true; +	/* If this packet won't get more data, do not wait. */ +	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) +		goto send_now;  	return true; @@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,  		} else {  			if (!push_one &&  			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited, -						 max_segs)) +						 &is_rwnd_limited, max_segs))  				break;  		} @@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk)  		goto rearm_timer;  	}  	skb = skb_rb_last(&sk->tcp_rtx_queue); +	if (unlikely(!skb)) { +		WARN_ONCE(tp->packets_out, +			  "invalid inflight: %u state %u cwnd %u mss %d\n", +			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); +		inet_csk(sk)->icsk_pending = 0; +		return; +	}  	/* At most one outstanding TLP retransmission. */  	if (tp->tlp_high_seq)  		goto rearm_timer; -	/* Retransmit last segment. */ -	if (WARN_ON(!skb)) -		goto rearm_timer; -  	if (skb_still_in_host_queue(sk, skb))  		goto rearm_timer; @@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)  		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;  		trace_tcp_retransmit_skb(sk, skb);  	} else if (err != -EBUSY) { -		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); +		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);  	}  	return err;  }  | 
