diff options
Diffstat (limited to 'net/tls/tls_device.c')
| -rw-r--r-- | net/tls/tls_device.c | 63 | 
1 files changed, 56 insertions, 7 deletions
| diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 0e55f8365ce2..b74e2741f74f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,  {  	struct tls_context *tls_ctx = tls_get_ctx(sk);  	struct iov_iter	msg_iter; -	char *kaddr = kmap(page); +	char *kaddr;  	struct kvec iov;  	int rc; @@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,  		goto out;  	} +	kaddr = kmap(page);  	iov.iov_base = kaddr + offset;  	iov.iov_len = size;  	iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); @@ -690,15 +691,55 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,  	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);  } +static bool +tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, +			   s64 resync_req, u32 *seq) +{ +	u32 is_async = resync_req & RESYNC_REQ_ASYNC; +	u32 req_seq = resync_req >> 32; +	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); + +	if (is_async) { +		/* asynchronous stage: log all headers seq such that +		 * req_seq <= seq <= end_seq, and wait for real resync request +		 */ +		if (between(*seq, req_seq, req_end) && +		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) +			resync_async->log[resync_async->loglen++] = *seq; + +		return false; +	} + +	/* synchronous stage: check against the logged entries and +	 * proceed to check the next entries if no match was found +	 */ +	while (resync_async->loglen) { +		if (req_seq == resync_async->log[resync_async->loglen - 1] && +		    atomic64_try_cmpxchg(&resync_async->req, +					 &resync_req, 0)) { +			resync_async->loglen = 0; +			*seq = req_seq; +			return true; +		} +		resync_async->loglen--; +	} + +	if (req_seq == *seq && +	    atomic64_try_cmpxchg(&resync_async->req, +				 &resync_req, 0)) +		return true; + +	return false; +} +  void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)  {  	struct tls_context *tls_ctx = tls_get_ctx(sk);  	struct tls_offload_context_rx *rx_ctx; -	bool is_req_pending, is_force_resync;  	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; +	u32 sock_data, is_req_pending;  	struct tls_prot_info *prot;  	s64 resync_req; -	u32 sock_data;  	u32 req_seq;  	if (tls_ctx->rx_conf != TLS_HW) @@ -713,11 +754,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)  		resync_req = atomic64_read(&rx_ctx->resync_req);  		req_seq = resync_req >> 32;  		seq += TLS_HEADER_SIZE - 1; -		is_req_pending = resync_req & RESYNC_REQ; -		is_force_resync = resync_req & RESYNC_REQ_FORCE; +		is_req_pending = resync_req; -		if (likely(!is_req_pending) || -		    (!is_force_resync && req_seq != seq) || +		if (likely(!is_req_pending) || req_seq != seq ||  		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))  			return;  		break; @@ -739,6 +778,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)  		seq += rcd_len;  		tls_bigint_increment(rcd_sn, prot->rec_seq_size);  		break; +	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: +		resync_req = atomic64_read(&rx_ctx->resync_async->req); +		is_req_pending = resync_req; +		if (likely(!is_req_pending)) +			return; + +		if (!tls_device_rx_resync_async(rx_ctx->resync_async, +						resync_req, &seq)) +			return; +		break;  	}  	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); | 
