diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 29 | 
1 files changed, 15 insertions, 14 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 58207c7769d0..777306b5bc22 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,  		tcp_move_syn(newtp, req);  		ireq->ireq_opt = NULL;  	} else { +		newinet->inet_opt = NULL; +  		if (!req_unhash && found_dup_sk) {  			/* This code path should only be executed in the  			 * syncookie case only @@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,  			bh_unlock_sock(newsk);  			sock_put(newsk);  			newsk = NULL; -		} else { -			newinet->inet_opt = NULL;  		}  	}  	return newsk; @@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)  bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)  {  	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); +	u32 tail_gso_size, tail_gso_segs;  	struct skb_shared_info *shinfo;  	const struct tcphdr *th;  	struct tcphdr *thtail; @@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)  	unsigned int hdrlen;  	bool fragstolen;  	u32 gso_segs; +	u32 gso_size;  	int delta;  	/* In case all data was pulled from skb frags (in __pskb_pull_tail()), @@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)  	 */  	th = (const struct tcphdr *)skb->data;  	hdrlen = th->doff * 4; -	shinfo = skb_shinfo(skb); - -	if (!shinfo->gso_size) -		shinfo->gso_size = skb->len - hdrlen; - -	if (!shinfo->gso_segs) -		shinfo->gso_segs = 1;  	tail = sk->sk_backlog.tail;  	if (!tail) @@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)  		goto no_coalesce;  	__skb_pull(skb, hdrlen); + +	shinfo = skb_shinfo(skb); +	gso_size = shinfo->gso_size ?: skb->len; +	gso_segs = shinfo->gso_segs ?: 1; + +	shinfo = skb_shinfo(tail); +	tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); +	tail_gso_segs = shinfo->gso_segs ?: 1; +  	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {  		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; @@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)  		}  		/* Not as strict as GRO. We only need to carry mss max value */ -		skb_shinfo(tail)->gso_size = max(shinfo->gso_size, -						 skb_shinfo(tail)->gso_size); - -		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs; -		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF); +		shinfo->gso_size = max(gso_size, tail_gso_size); +		shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);  		sk->sk_backlog.len += delta;  		__NET_INC_STATS(sock_net(sk),  | 
