diff options
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 57 | 
1 files changed, 51 insertions, 6 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c1728771cf89..7bb1b091efd1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)  			if (sk_stream_is_writeable(sk)) {  				mask |= POLLOUT | POLLWRNORM;  			} else {  /* send SIGIO later */ -				set_bit(SOCK_ASYNC_NOSPACE, -					&sk->sk_socket->flags); +				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);  				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);  				/* Race breaker. If space is freed after @@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,  			goto out_err;  	} -	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); +	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);  	mss_now = tcp_send_mss(sk, &size_goal, flags);  	copied = 0; @@ -1019,7 +1018,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,  	ssize_t res;  	if (!(sk->sk_route_caps & NETIF_F_SG) || -	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) +	    !sk_check_csum_caps(sk))  		return sock_no_sendpage(sk->sk_socket, page, offset, size,  					flags); @@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)  	}  	/* This should be in poll */ -	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); +	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);  	mss_now = tcp_send_mss(sk, &size_goal, flags); @@ -1176,7 +1175,7 @@ new_segment:  			/*  			 * Check whether we can use HW checksum.  			 */ -			if (sk->sk_route_caps & NETIF_F_ALL_CSUM) +			if (sk_check_csum_caps(sk))  				skb->ip_summed = CHECKSUM_PARTIAL;  			skb_entail(sk, skb); @@ -3081,6 +3080,52 @@ void tcp_done(struct sock *sk)  }  EXPORT_SYMBOL_GPL(tcp_done); +int tcp_abort(struct sock *sk, int err) +{ +	if (!sk_fullsock(sk)) { +		if (sk->sk_state == TCP_NEW_SYN_RECV) { +			struct request_sock *req = inet_reqsk(sk); + +			local_bh_disable(); +			inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, +							  req); +			local_bh_enable(); +			return 0; +		} +		sock_gen_put(sk); +		return -EOPNOTSUPP; +	} + +	/* Don't race with userspace socket closes such as tcp_close. */ +	lock_sock(sk); + +	if (sk->sk_state == TCP_LISTEN) { +		tcp_set_state(sk, TCP_CLOSE); +		inet_csk_listen_stop(sk); +	} + +	/* Don't race with BH socket closes such as inet_csk_listen_stop. */ +	local_bh_disable(); +	bh_lock_sock(sk); + +	if (!sock_flag(sk, SOCK_DEAD)) { +		sk->sk_err = err; +		/* This barrier is coupled with smp_rmb() in tcp_poll() */ +		smp_wmb(); +		sk->sk_error_report(sk); +		if (tcp_need_reset(sk->sk_state)) +			tcp_send_active_reset(sk, GFP_ATOMIC); +		tcp_done(sk); +	} + +	bh_unlock_sock(sk); +	local_bh_enable(); +	release_sock(sk); +	sock_put(sk); +	return 0; +} +EXPORT_SYMBOL_GPL(tcp_abort); +  extern struct tcp_congestion_ops tcp_reno;  static __initdata unsigned long thash_entries;  | 
