diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-07-04 08:19:21 -0700 | 
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-07-04 08:19:21 -0700 | 
| commit | c318a821b9828ef3be97d5d896d146e7daa43c86 (patch) | |
| tree | 7ed40911ffebcbabf87ec4be692f6fb154053687 /net/tipc/socket.c | |
| parent | 322832f2f19e04c866a0ce4bdac8cff8e695f2b3 (diff) | |
| parent | a99cde438de0c4c0cecc1d1af1a55a75b10bfdef (diff) | |
Merge 4.7-rc6 into usb-next
We want the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net/tipc/socket.c')
| -rw-r--r-- | net/tipc/socket.c | 54 | 
1 files changed, 42 insertions, 12 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 88bfcd707064..c49b8df438cb 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,   * @tsk: receiving socket   * @skb: pointer to message buffer.   */ -static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) +static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, +			      struct sk_buff_head *xmitq)  {  	struct sock *sk = &tsk->sk; +	u32 onode = tsk_own_node(tsk);  	struct tipc_msg *hdr = buf_msg(skb);  	int mtyp = msg_type(hdr);  	bool conn_cong; @@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)  	if (mtyp == CONN_PROBE) {  		msg_set_type(hdr, CONN_PROBE_REPLY); -		tipc_sk_respond(sk, skb, TIPC_OK); +		if (tipc_msg_reverse(onode, &skb, TIPC_OK)) +			__skb_queue_tail(xmitq, skb);  		return;  	} else if (mtyp == CONN_ACK) {  		conn_cong = tsk_conn_cong(tsk); @@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)   *   * Returns true if message was added to socket receive queue, otherwise false   */ -static bool filter_rcv(struct sock *sk, struct sk_buff *skb) +static bool filter_rcv(struct sock *sk, struct sk_buff *skb, +		       struct sk_buff_head *xmitq)  {  	struct socket *sock = sk->sk_socket;  	struct tipc_sock *tsk = tipc_sk(sk); @@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)  	int usr = msg_user(hdr);  	if (unlikely(msg_user(hdr) == CONN_MANAGER)) { -		tipc_sk_proto_rcv(tsk, skb); +		tipc_sk_proto_rcv(tsk, skb, xmitq);  		return false;  	} @@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)  	return true;  reject: -	tipc_sk_respond(sk, skb, err); +	if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err)) +		__skb_queue_tail(xmitq, skb);  	return false;  } @@ -1755,9 +1760,24 @@ reject:  static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)  {  	unsigned int truesize = skb->truesize; +	struct sk_buff_head xmitq; +	u32 dnode, selector; -	if (likely(filter_rcv(sk, skb))) +	__skb_queue_head_init(&xmitq); + +	if (likely(filter_rcv(sk, skb, &xmitq))) {  		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); +		return 0; +	} + +	if (skb_queue_empty(&xmitq)) +		return 0; + +	/* Send response/rejected message */ +	skb = __skb_dequeue(&xmitq); +	dnode = msg_destnode(buf_msg(skb)); +	selector = msg_origport(buf_msg(skb)); +	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);  	return 0;  } @@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)   * Caller must hold socket lock   */  static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, -			    u32 dport) +			    u32 dport, struct sk_buff_head *xmitq)  { +	unsigned long time_limit = jiffies + 2; +	struct sk_buff *skb;  	unsigned int lim;  	atomic_t *dcnt; -	struct sk_buff *skb; -	unsigned long time_limit = jiffies + 2; +	u32 onode;  	while (skb_queue_len(inputq)) {  		if (unlikely(time_after_eq(jiffies, time_limit))) @@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,  		/* Add message directly to receive queue if possible */  		if (!sock_owned_by_user(sk)) { -			filter_rcv(sk, skb); +			filter_rcv(sk, skb, xmitq);  			continue;  		} @@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,  			continue;  		/* Overload => reject message back to sender */ -		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); +		onode = tipc_own_addr(sock_net(sk)); +		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) +			__skb_queue_tail(xmitq, skb);  		break;  	}  } @@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,   */  void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)  { +	struct sk_buff_head xmitq;  	u32 dnode, dport = 0;  	int err;  	struct tipc_sock *tsk;  	struct sock *sk;  	struct sk_buff *skb; +	__skb_queue_head_init(&xmitq);  	while (skb_queue_len(inputq)) {  		dport = tipc_skb_peek_port(inputq, dport);  		tsk = tipc_sk_lookup(net, dport); @@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)  		if (likely(tsk)) {  			sk = &tsk->sk;  			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { -				tipc_sk_enqueue(inputq, sk, dport); +				tipc_sk_enqueue(inputq, sk, dport, &xmitq);  				spin_unlock_bh(&sk->sk_lock.slock);  			} +			/* Send pending response/rejected messages, if any */ +			while ((skb = __skb_dequeue(&xmitq))) { +				dnode = msg_destnode(buf_msg(skb)); +				tipc_node_xmit_skb(net, skb, dnode, dport); +			}  			sock_put(sk);  			continue;  		}  | 
