diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/ccm.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/ccm_mbox.c | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/crypto/fw.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/crypto/tls.c | 93 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 32 |
7 files changed, 128 insertions, 45 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index f8b93b62a7d2..ca07c86427a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev, direction == TLS_OFFLOAD_CTX_DIR_TX); } -static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, - u32 seq, u8 *rcd_sn_data, - enum tls_offload_ctx_dir direction) +static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, + u32 seq, u8 *rcd_sn_data, + enum tls_offload_ctx_dir direction) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct mlx5e_priv *priv = netdev_priv(netdev); @@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, be64_to_cpu(rcd_sn)); mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); + + return 0; } static const struct tlsdev_ops mlx5e_tls_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h index da1b1e20df51..a460c75522be 100644 --- a/drivers/net/ethernet/netronome/nfp/ccm.h +++ b/drivers/net/ethernet/netronome/nfp/ccm.h @@ -118,6 +118,10 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size); struct sk_buff * nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size, unsigned int reply_size, gfp_t flags); +int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, + enum nfp_ccm_type type, + unsigned int reply_size, + unsigned int max_reply_size, bool critical); int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, enum nfp_ccm_type type, unsigned int reply_size, diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c index 02fccd90961d..f0783aa9e66e 100644 --- a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c +++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c @@ -13,7 +13,7 @@ * form a batch. Threads come in with CMSG formed in an skb, then * enqueue that skb onto the request queue. If threads skb is first * in queue this thread will handle the mailbox operation. It copies - * up to 16 messages into the mailbox (making sure that both requests + * up to 64 messages into the mailbox (making sure that both requests * and replies will fit. After FW is done processing the batch it * copies the data out and wakes waiting threads. * If a thread is waiting it either gets its the message completed @@ -23,9 +23,9 @@ * to limit potential cache line bounces. */ -#define NFP_CCM_MBOX_BATCH_LIMIT 16 +#define NFP_CCM_MBOX_BATCH_LIMIT 64 #define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000) -#define NFP_CCM_MAX_QLEN 256 +#define NFP_CCM_MAX_QLEN 1024 enum nfp_net_mbox_cmsg_state { NFP_NET_MBOX_CMSG_STATE_QUEUED, @@ -515,13 +515,13 @@ nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb, static int nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb, - enum nfp_ccm_type type) + enum nfp_ccm_type type, bool critical) { struct nfp_ccm_hdr *hdr; assert_spin_locked(&nn->mbox_cmsg.queue.lock); - if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) { + if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) { nn_dp_warn(&nn->dp, "mailbox request queue too long\n"); return -EBUSY; } @@ -536,10 +536,10 @@ nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb, return 0; } -int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, - enum nfp_ccm_type type, - unsigned int reply_size, - unsigned int max_reply_size) +int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, + enum nfp_ccm_type type, + unsigned int reply_size, + unsigned int max_reply_size, bool critical) { int err; @@ -550,7 +550,7 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, spin_lock_bh(&nn->mbox_cmsg.queue.lock); - err = nfp_ccm_mbox_msg_enqueue(nn, skb, type); + err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical); if (err) goto err_unlock; @@ -594,6 +594,15 @@ err_free_skb: return err; } +int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb, + enum nfp_ccm_type type, + unsigned int reply_size, + unsigned int max_reply_size) +{ + return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size, + max_reply_size, false); +} + static void nfp_ccm_mbox_post_runq_work(struct work_struct *work) { struct sk_buff *skb; @@ -650,7 +659,7 @@ int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb, spin_lock_bh(&nn->mbox_cmsg.queue.lock); - err = nfp_ccm_mbox_msg_enqueue(nn, skb, type); + err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false); if (err) goto err_unlock; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h index 192ba907d91b..67413d946c4a 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h +++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h @@ -31,6 +31,8 @@ struct nfp_crypto_req_add_front { u8 key_len; __be16 ipver_vlan __packed; u8 l4_proto; +#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8 + u8 l3_addrs[0]; }; struct nfp_crypto_req_add_back { diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 9f7ccb7da417..96a96b35c0ca 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -4,6 +4,7 @@ #include <linux/bitfield.h> #include <linux/ipv6.h> #include <linux/skbuff.h> +#include <linux/string.h> #include <net/tls.h> #include "../ccm.h" @@ -112,8 +113,9 @@ nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb, struct nfp_crypto_reply_simple *reply; int err; - err = nfp_ccm_mbox_communicate(nn, skb, type, - sizeof(*reply), sizeof(*reply)); + err = __nfp_ccm_mbox_communicate(nn, skb, type, + sizeof(*reply), sizeof(*reply), + type == NFP_CCM_TYPE_CRYPTO_DEL); if (err) { nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err); return err; @@ -146,20 +148,38 @@ static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle) NFP_CCM_TYPE_CRYPTO_DEL); } +static void +nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver) +{ + front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) | + FIELD_PREP(NFP_NET_TLS_VLAN, + NFP_NET_TLS_VLAN_UNUSED)); +} + +static void +nfp_net_tls_assign_conn_id(struct nfp_net *nn, + struct nfp_crypto_req_add_front *front) +{ + u32 len; + u64 id; + + id = atomic64_inc_return(&nn->ktls_conn_id_gen); + len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN; + + memcpy(front->l3_addrs, &id, sizeof(id)); + memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id)); +} + static struct nfp_crypto_req_add_back * -nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, - int direction) +nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req, + struct sock *sk, int direction) { struct inet_sock *inet = inet_sk(sk); req->front.key_len += sizeof(__be32) * 2; - req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) | - FIELD_PREP(NFP_NET_TLS_VLAN, - NFP_NET_TLS_VLAN_UNUSED)); if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - req->src_ip = inet->inet_saddr; - req->dst_ip = inet->inet_daddr; + nfp_net_tls_assign_conn_id(nn, &req->front); } else { req->src_ip = inet->inet_daddr; req->dst_ip = inet->inet_saddr; @@ -169,20 +189,16 @@ nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk, } static struct nfp_crypto_req_add_back * -nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk, - int direction) +nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req, + struct sock *sk, int direction) { #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = inet6_sk(sk); req->front.key_len += sizeof(struct in6_addr) * 2; - req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) | - FIELD_PREP(NFP_NET_TLS_VLAN, - NFP_NET_TLS_VLAN_UNUSED)); if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip)); - memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip)); + nfp_net_tls_assign_conn_id(nn, &req->front); } else { memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip)); memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip)); @@ -202,8 +218,8 @@ nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front, front->l4_proto = IPPROTO_TCP; if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - back->src_port = inet->inet_sport; - back->dst_port = inet->inet_dport; + back->src_port = 0; + back->dst_port = 0; } else { back->src_port = inet->inet_dport; back->dst_port = inet->inet_sport; @@ -257,6 +273,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, struct nfp_crypto_reply_add *reply; struct sk_buff *skb; size_t req_sz; + void *req; bool ipv6; int err; @@ -299,14 +316,17 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, front = (void *)skb->data; front->ep_id = 0; - front->key_len = 8; + front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN; front->opcode = nfp_tls_1_2_dir_to_opcode(direction); memset(front->resv, 0, sizeof(front->resv)); + nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4); + + req = (void *)skb->data; if (ipv6) - back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction); + back = nfp_net_tls_set_ipv6(nn, req, sk, direction); else - back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction); + back = nfp_net_tls_set_ipv4(nn, req, sk, direction); nfp_net_tls_set_l4(front, back, sk, direction); @@ -321,15 +341,29 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq)); + /* Get an extra ref on the skb so we can wipe the key after */ + skb_get(skb); + err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD, sizeof(*reply), sizeof(*reply)); + reply = (void *)skb->data; + + /* We depend on CCM MBOX code not reallocating skb we sent + * so we can clear the key material out of the memory. + */ + if (!WARN_ON_ONCE((u8 *)back < skb->head || + (u8 *)back > skb_end_pointer(skb)) && + !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back)) + memzero_explicit(back, sizeof(*back)); + dev_consume_skb_any(skb); /* the extra ref from skb_get() above */ + if (err) { - nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err); + nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n", + err, direction == TLS_OFFLOAD_CTX_DIR_TX); /* communicate frees skb on error */ goto err_conn_remove; } - reply = (void *)skb->data; err = -be32_to_cpu(reply->error); if (err) { if (err == -ENOSPC) { @@ -383,7 +417,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx, nfp_net_tls_del_fw(nn, ntls->fw_handle); } -static void +static int nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn, enum tls_offload_ctx_dir direction) { @@ -392,11 +426,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, struct nfp_crypto_req_update *req; struct sk_buff *skb; gfp_t flags; + int err; flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC; skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags); if (!skb) - return; + return -ENOMEM; ntls = tls_driver_ctx(sk, direction); req = (void *)skb->data; @@ -408,13 +443,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no)); if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - nfp_net_tls_communicate_simple(nn, skb, "sync", - NFP_CCM_TYPE_CRYPTO_UPDATE); + err = nfp_net_tls_communicate_simple(nn, skb, "sync", + NFP_CCM_TYPE_CRYPTO_UPDATE); + if (err) + return err; ntls->next_seq = seq; } else { nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE, sizeof(struct nfp_crypto_reply_simple)); } + + return 0; } static const struct tlsdev_ops nfp_net_tls_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 0659756bf2bb..5d6c3738b494 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -583,6 +583,7 @@ struct nfp_net_dp { * @tlv_caps: Parsed TLV capabilities * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections + * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX) * @ktls_no_space: Counter of firmware rejecting kTLS connection due to * lack of space * @mbox_cmsg: Common Control Message via vNIC mailbox state @@ -670,6 +671,8 @@ struct nfp_net { unsigned int ktls_tx_conn_cnt; unsigned int ktls_rx_conn_cnt; + atomic64_t ktls_conn_id_gen; + atomic_t ktls_no_space; struct { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 270334427448..9903805717da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -822,11 +822,11 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp, u64_stats_update_end(&r_vec->tx_sync); } -#ifdef CONFIG_TLS_DEVICE static struct sk_buff * nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, struct sk_buff *skb, u64 *tls_handle, int *nr_frags) { +#ifdef CONFIG_TLS_DEVICE struct nfp_net_tls_offload_ctx *ntls; struct sk_buff *nskb; bool resync_pending; @@ -880,15 +880,40 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, if (datalen) { u64_stats_update_begin(&r_vec->tx_sync); - r_vec->hw_tls_tx++; + if (!skb_is_gso(skb)) + r_vec->hw_tls_tx++; + else + r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs; u64_stats_update_end(&r_vec->tx_sync); } memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle)); ntls->next_seq += datalen; +#endif return skb; } + +static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) +{ +#ifdef CONFIG_TLS_DEVICE + struct nfp_net_tls_offload_ctx *ntls; + u32 datalen, seq; + + if (!tls_handle) + return; + if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))) + return; + + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + seq = ntohl(tcp_hdr(skb)->seq); + + ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); + if (ntls->next_seq == seq + datalen) + ntls->next_seq = seq; + else + WARN_ON_ONCE(1); #endif +} static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) { @@ -982,13 +1007,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } -#ifdef CONFIG_TLS_DEVICE skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags); if (unlikely(!skb)) { nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; } -#endif md_bytes = nfp_net_prep_tx_meta(skb, tls_handle); if (unlikely(md_bytes < 0)) @@ -1101,6 +1124,7 @@ err_flush: u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_errors++; u64_stats_update_end(&r_vec->tx_sync); + nfp_net_tls_tx_undo(skb, tls_handle); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } |