diff options
author | Paolo Abeni <pabeni@redhat.com> | 2023-06-01 13:41:40 +0200 |
---|---|---|
committer | Paolo Abeni <pabeni@redhat.com> | 2023-06-01 13:41:40 +0200 |
commit | 4ff3dfc91c8458f65366f283167d1cd6f16be06f (patch) | |
tree | dc8bda1f1b16dd2fb670d9081b751bb82efcb410 | |
parent | 735c9ee9a374769b78c716de3c19a6c9440ede85 (diff) | |
parent | 26acc982c1c5c2835b0c6981d896329efa3557c3 (diff) |
Merge branch 'splice-net-handle-msg_splice_pages-in-chelsio-tls'
David Howells says:
====================
splice, net: Handle MSG_SPLICE_PAGES in Chelsio-TLS
Here are patches to make Chelsio-TLS handle the MSG_SPLICE_PAGES internal
sendmsg flag. MSG_SPLICE_PAGES is an internal hint that tells the protocol
that it should splice the pages supplied if it can. Its sendpage
implementation is then turned into a wrapper around that.
====================
Link: https://lore.kernel.org/r/20230531110008.642903-1-dhowells@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r-- | drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c | 121 |
1 files changed, 18 insertions, 103 deletions
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index ae6b17b96bf1..5724bbbb6ee0 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -1092,7 +1092,17 @@ new_buf: if (copy > size) copy = size; - if (skb_tailroom(skb) > 0) { + if (msg->msg_flags & MSG_SPLICE_PAGES) { + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, + sk->sk_allocation); + if (err < 0) { + if (err == -EMSGSIZE) + goto new_buf; + goto do_fault; + } + copy = err; + sk_wmem_queued_add(sk, copy); + } else if (skb_tailroom(skb) > 0) { copy = min(copy, skb_tailroom(skb)); if (is_tls_tx(csk)) copy = min_t(int, copy, csk->tlshws.txleft); @@ -1230,110 +1240,15 @@ out_err: int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct chtls_sock *csk; - struct chtls_dev *cdev; - int mss, err, copied; - struct tcp_sock *tp; - long timeo; - - tp = tcp_sk(sk); - copied = 0; - csk = rcu_dereference_sk_user_data(sk); - cdev = csk->cdev; - lock_sock(sk); - timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - - err = sk_stream_wait_connect(sk, &timeo); - if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && - err != 0) - goto out_err; - - mss = csk->mss; - csk_set_flag(csk, CSK_TX_MORE_DATA); - - while (size > 0) { - struct sk_buff *skb = skb_peek_tail(&csk->txq); - int copy, i; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; + struct bio_vec bvec; - if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || - (copy = mss - skb->len) <= 0) { -new_buf: - if (!csk_mem_free(cdev, sk)) - goto wait_for_sndbuf; - - if (is_tls_tx(csk)) { - skb = get_record_skb(sk, - select_size(sk, size, - flags, - TX_TLSHDR_LEN), - true); - } else { - skb = get_tx_skb(sk, 0); - } - if (!skb) - goto wait_for_memory; - copy = mss; - } - if (copy > size) - copy = size; - - i = skb_shinfo(skb)->nr_frags; - if (skb_can_coalesce(skb, i, page, offset)) { - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); - } else if (i < MAX_SKB_FRAGS) { - get_page(page); - skb_fill_page_desc(skb, i, page, offset, copy); - } else { - tx_skb_finalize(skb); - push_frames_if_head(sk); - goto new_buf; - } + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - skb->len += copy; - if (skb->len == mss) - tx_skb_finalize(skb); - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - tp->write_seq += copy; - copied += copy; - offset += copy; - size -= copy; - - if (corked(tp, flags) && - (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))) - ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND; - - if (!size) - break; - - if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)) - push_frames_if_head(sk); - continue; -wait_for_sndbuf: - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); -wait_for_memory: - err = csk_wait_memory(cdev, sk, &timeo); - if (err) - goto do_error; - } -out: - csk_reset_flag(csk, CSK_TX_MORE_DATA); - if (copied) - chtls_tcp_push(sk, flags); -done: - release_sock(sk); - return copied; - -do_error: - if (copied) - goto out; - -out_err: - if (csk_conn_inline(csk)) - csk_reset_flag(csk, CSK_TX_MORE_DATA); - copied = sk_stream_error(sk, flags, err); - goto done; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return chtls_sendmsg(sk, &msg, size); } static void chtls_select_window(struct sock *sk) |