diff options
Diffstat (limited to 'net/tls')
-rw-r--r-- | net/tls/tls.h | 60 | ||||
-rw-r--r-- | net/tls/tls_device.c | 58 | ||||
-rw-r--r-- | net/tls/tls_device_fallback.c | 62 | ||||
-rw-r--r-- | net/tls/tls_main.c | 274 | ||||
-rw-r--r-- | net/tls/tls_strp.c | 3 | ||||
-rw-r--r-- | net/tls/tls_sw.c | 318 |
6 files changed, 326 insertions, 449 deletions
diff --git a/net/tls/tls.h b/net/tls/tls.h index 86cef1c68e03..28a8c0e80e3c 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -39,6 +39,7 @@ #include <linux/types.h> #include <linux/skmsg.h> #include <net/tls.h> +#include <net/tls_prot.h> #define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \ TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT)) @@ -50,6 +51,59 @@ #define TLS_DEC_STATS(net, field) \ SNMP_DEC_STATS((net)->mib.tls_statistics, field) +struct tls_cipher_desc { + unsigned int nonce; + unsigned int iv; + unsigned int key; + unsigned int salt; + unsigned int tag; + unsigned int rec_seq; + unsigned int iv_offset; + unsigned int key_offset; + unsigned int salt_offset; + unsigned int rec_seq_offset; + char *cipher_name; + bool offloadable; + size_t crypto_info; +}; + +#define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128 +#define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256 +extern const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN]; + +static inline const struct tls_cipher_desc *get_cipher_desc(u16 cipher_type) +{ + if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX) + return NULL; + + return &tls_cipher_desc[cipher_type - TLS_CIPHER_MIN]; +} + +static inline char *crypto_info_iv(struct tls_crypto_info *crypto_info, + const struct tls_cipher_desc *cipher_desc) +{ + return (char *)crypto_info + cipher_desc->iv_offset; +} + +static inline char *crypto_info_key(struct tls_crypto_info *crypto_info, + const struct tls_cipher_desc *cipher_desc) +{ + return (char *)crypto_info + cipher_desc->key_offset; +} + +static inline char *crypto_info_salt(struct tls_crypto_info *crypto_info, + const struct tls_cipher_desc *cipher_desc) +{ + return (char *)crypto_info + cipher_desc->salt_offset; +} + +static inline char *crypto_info_rec_seq(struct tls_crypto_info *crypto_info, + const struct tls_cipher_desc *cipher_desc) +{ + return (char *)crypto_info + cipher_desc->rec_seq_offset; +} + + /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages * allocated or mapped for each TLS record. After encryption, the records are * stores in a linked list. @@ -86,10 +140,6 @@ void tls_ctx_free(struct sock *sk, struct tls_context *ctx); void update_sk_prot(struct sock *sk, struct tls_context *ctx); int wait_on_pending_writer(struct sock *sk, long *timeo); -int tls_sk_query(struct sock *sk, int optname, char __user *optval, - int __user *optlen); -int tls_sk_attach(struct sock *sk, int optname, char __user *optval, - unsigned int optlen); void tls_err_abort(struct sock *sk, int err); int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); @@ -110,6 +160,8 @@ bool tls_sw_sock_is_readable(struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t read_actor); int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); void tls_device_splice_eof(struct socket *sock); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 529101eb20bd..8c94c926606a 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -440,9 +440,13 @@ static int tls_push_data(struct sock *sk, long timeo; if (flags & - ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SPLICE_PAGES)) + ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_SPLICE_PAGES | MSG_EOR)) return -EOPNOTSUPP; + if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR)) + return -EINVAL; + if (unlikely(sk->sk_err)) return -sk->sk_err; @@ -880,7 +884,7 @@ static int tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) { struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); - const struct tls_cipher_size_desc *cipher_sz; + const struct tls_cipher_desc *cipher_desc; int err, offset, copy, data_len, pos; struct sk_buff *skb, *skb_iter; struct scatterlist sg[1]; @@ -894,10 +898,10 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) default: return -EINVAL; } - cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type]; + cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type); rxm = strp_msg(tls_strp_msg(sw_ctx)); - orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv, + orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv, sk->sk_allocation); if (!orig_buf) return -ENOMEM; @@ -913,8 +917,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) sg_init_table(sg, 1); sg_set_buf(&sg[0], buf, - rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv); - err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv); + rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv); + err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv); if (err) goto free_buf; @@ -925,7 +929,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) else err = 0; - data_len = rxm->full_len - cipher_sz->tag; + data_len = rxm->full_len - cipher_desc->tag; if (skb_pagelen(skb) > offset) { copy = min_t(int, skb_pagelen(skb) - offset, data_len); @@ -1042,7 +1046,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; - const struct tls_cipher_size_desc *cipher_sz; + const struct tls_cipher_desc *cipher_desc; struct tls_record_info *start_marker_record; struct tls_offload_context_tx *offload_ctx; struct tls_crypto_info *crypto_info; @@ -1075,46 +1079,32 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) goto release_netdev; } - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; - rec_seq = - ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; - break; - case TLS_CIPHER_AES_GCM_256: - iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; - rec_seq = - ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; - break; - default: + cipher_desc = get_cipher_desc(crypto_info->cipher_type); + if (!cipher_desc || !cipher_desc->offloadable) { rc = -EINVAL; goto release_netdev; } - cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type]; - /* Sanity-check the rec_seq_size for stack allocations */ - if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) { - rc = -EINVAL; - goto release_netdev; - } + iv = crypto_info_iv(crypto_info, cipher_desc); + rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); prot->version = crypto_info->version; prot->cipher_type = crypto_info->cipher_type; - prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv; - prot->tag_size = cipher_sz->tag; + prot->prepend_size = TLS_HEADER_SIZE + cipher_desc->iv; + prot->tag_size = cipher_desc->tag; prot->overhead_size = prot->prepend_size + prot->tag_size; - prot->iv_size = cipher_sz->iv; - prot->salt_size = cipher_sz->salt; - ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL); + prot->iv_size = cipher_desc->iv; + prot->salt_size = cipher_desc->salt; + ctx->tx.iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL); if (!ctx->tx.iv) { rc = -ENOMEM; goto release_netdev; } - memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv); + memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv); - prot->rec_seq_size = cipher_sz->rec_seq; - ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL); + prot->rec_seq_size = cipher_desc->rec_seq; + ctx->tx.rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL); if (!ctx->tx.rec_seq) { rc = -ENOMEM; goto free_iv; diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index b28c5e296dfd..1d743f310f4f 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -55,7 +55,7 @@ static int tls_enc_record(struct aead_request *aead_req, struct tls_prot_info *prot) { unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE]; - const struct tls_cipher_size_desc *cipher_sz; + const struct tls_cipher_desc *cipher_desc; struct scatterlist sg_in[3]; struct scatterlist sg_out[3]; unsigned int buf_size; @@ -69,9 +69,9 @@ static int tls_enc_record(struct aead_request *aead_req, default: return -EINVAL; } - cipher_sz = &tls_cipher_size_desc[prot->cipher_type]; + cipher_desc = get_cipher_desc(prot->cipher_type); - buf_size = TLS_HEADER_SIZE + cipher_sz->iv; + buf_size = TLS_HEADER_SIZE + cipher_desc->iv; len = min_t(int, *in_len, buf_size); scatterwalk_copychunks(buf, in, len, 0); @@ -85,11 +85,11 @@ static int tls_enc_record(struct aead_request *aead_req, scatterwalk_pagedone(out, 1, 1); len = buf[4] | (buf[3] << 8); - len -= cipher_sz->iv; + len -= cipher_desc->iv; - tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot); + tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot); - memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv); + memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv); sg_init_table(sg_in, ARRAY_SIZE(sg_in)); sg_init_table(sg_out, ARRAY_SIZE(sg_out)); @@ -100,7 +100,7 @@ static int tls_enc_record(struct aead_request *aead_req, *in_len -= len; if (*in_len < 0) { - *in_len += cipher_sz->tag; + *in_len += cipher_desc->tag; /* the input buffer doesn't contain the entire record. * trim len accordingly. The resulting authentication tag * will contain garbage, but we don't care, so we won't @@ -121,7 +121,7 @@ static int tls_enc_record(struct aead_request *aead_req, scatterwalk_pagedone(out, 1, 1); } - len -= cipher_sz->tag; + len -= cipher_desc->tag; aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv); rc = crypto_aead_encrypt(aead_req); @@ -309,14 +309,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf, int sync_size, void *dummy_buf) { - const struct tls_cipher_size_desc *cipher_sz = - &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type]; + const struct tls_cipher_desc *cipher_desc = + get_cipher_desc(tls_ctx->crypto_send.info.cipher_type); sg_set_buf(&sg_out[0], dummy_buf, sync_size); sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len); /* Add room for authentication tag produced by crypto */ dummy_buf += sync_size; - sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag); + sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag); } static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, @@ -328,7 +328,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int tcp_payload_offset = skb_tcp_all_headers(skb); int payload_len = skb->len - tcp_payload_offset; - const struct tls_cipher_size_desc *cipher_sz; + const struct tls_cipher_desc *cipher_desc; void *buf, *iv, *aad, *dummy_buf, *salt; struct aead_request *aead_req; struct sk_buff *nskb = NULL; @@ -348,16 +348,16 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, default: goto free_req; } - cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type]; - buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE + - sync_size + cipher_sz->tag; + cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type); + buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE + + sync_size + cipher_desc->tag; buf = kmalloc(buf_len, GFP_ATOMIC); if (!buf) goto free_req; iv = buf; - memcpy(iv, salt, cipher_sz->salt); - aad = buf + cipher_sz->salt + cipher_sz->iv; + memcpy(iv, salt, cipher_desc->salt); + aad = buf + cipher_desc->salt + cipher_desc->iv; dummy_buf = aad + TLS_AAD_SPACE_SIZE; nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC); @@ -471,12 +471,15 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info) { - const struct tls_cipher_size_desc *cipher_sz; - const u8 *key; + const struct tls_cipher_desc *cipher_desc; int rc; + cipher_desc = get_cipher_desc(crypto_info->cipher_type); + if (!cipher_desc || !cipher_desc->offloadable) + return -EINVAL; + offload_ctx->aead_send = - crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(offload_ctx->aead_send)) { rc = PTR_ERR(offload_ctx->aead_send); pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc); @@ -484,24 +487,13 @@ int tls_sw_fallback_init(struct sock *sk, goto err_out; } - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key; - break; - case TLS_CIPHER_AES_GCM_256: - key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key; - break; - default: - rc = -EINVAL; - goto free_aead; - } - cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type]; - - rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key); + rc = crypto_aead_setkey(offload_ctx->aead_send, + crypto_info_key(crypto_info, cipher_desc), + cipher_desc->key); if (rc) goto free_aead; - rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag); + rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag); if (rc) goto free_aead; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 4a8ee2f6badb..02f583ff9239 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -58,23 +58,66 @@ enum { TLS_NUM_PROTS, }; -#define CIPHER_SIZE_DESC(cipher) [cipher] = { \ +#define CHECK_CIPHER_DESC(cipher,ci) \ + static_assert(cipher ## _IV_SIZE <= MAX_IV_SIZE); \ + static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \ + static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \ + static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \ + static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \ + static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \ + static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE); + +#define __CIPHER_DESC(ci) \ + .iv_offset = offsetof(struct ci, iv), \ + .key_offset = offsetof(struct ci, key), \ + .salt_offset = offsetof(struct ci, salt), \ + .rec_seq_offset = offsetof(struct ci, rec_seq), \ + .crypto_info = sizeof(struct ci) + +#define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \ + .nonce = cipher ## _IV_SIZE, \ .iv = cipher ## _IV_SIZE, \ .key = cipher ## _KEY_SIZE, \ .salt = cipher ## _SALT_SIZE, \ .tag = cipher ## _TAG_SIZE, \ .rec_seq = cipher ## _REC_SEQ_SIZE, \ + .cipher_name = algname, \ + .offloadable = _offloadable, \ + __CIPHER_DESC(ci), \ } -const struct tls_cipher_size_desc tls_cipher_size_desc[] = { - CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_128), - CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_256), - CIPHER_SIZE_DESC(TLS_CIPHER_AES_CCM_128), - CIPHER_SIZE_DESC(TLS_CIPHER_CHACHA20_POLY1305), - CIPHER_SIZE_DESC(TLS_CIPHER_SM4_GCM), - CIPHER_SIZE_DESC(TLS_CIPHER_SM4_CCM), +#define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \ + .nonce = 0, \ + .iv = cipher ## _IV_SIZE, \ + .key = cipher ## _KEY_SIZE, \ + .salt = cipher ## _SALT_SIZE, \ + .tag = cipher ## _TAG_SIZE, \ + .rec_seq = cipher ## _REC_SEQ_SIZE, \ + .cipher_name = algname, \ + .offloadable = _offloadable, \ + __CIPHER_DESC(ci), \ +} + +const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = { + CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true), + CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true), + CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false), + CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false), + CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false), + CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false), + CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false), + CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false), }; +CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128); +CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256); +CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128); +CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305); +CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm); +CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm); +CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128); +CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256); + static const struct proto *saved_tcpv6_prot; static DEFINE_MUTEX(tcpv6_prot_mutex); static const struct proto *saved_tcpv4_prot; @@ -392,6 +435,7 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, int __user *optlen, int tx) { int rc = 0; + const struct tls_cipher_desc *cipher_desc; struct tls_context *ctx = tls_get_ctx(sk); struct tls_crypto_info *crypto_info; struct cipher_context *cctx; @@ -430,172 +474,19 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, goto out; } - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: { - struct tls12_crypto_info_aes_gcm_128 * - crypto_info_aes_gcm_128 = - container_of(crypto_info, - struct tls12_crypto_info_aes_gcm_128, - info); - - if (len != sizeof(*crypto_info_aes_gcm_128)) { - rc = -EINVAL; - goto out; - } - memcpy(crypto_info_aes_gcm_128->iv, - cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, - TLS_CIPHER_AES_GCM_128_IV_SIZE); - memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq, - TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); - if (copy_to_user(optval, - crypto_info_aes_gcm_128, - sizeof(*crypto_info_aes_gcm_128))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_AES_GCM_256: { - struct tls12_crypto_info_aes_gcm_256 * - crypto_info_aes_gcm_256 = - container_of(crypto_info, - struct tls12_crypto_info_aes_gcm_256, - info); - - if (len != sizeof(*crypto_info_aes_gcm_256)) { - rc = -EINVAL; - goto out; - } - memcpy(crypto_info_aes_gcm_256->iv, - cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, - TLS_CIPHER_AES_GCM_256_IV_SIZE); - memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq, - TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); - if (copy_to_user(optval, - crypto_info_aes_gcm_256, - sizeof(*crypto_info_aes_gcm_256))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_AES_CCM_128: { - struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = - container_of(crypto_info, - struct tls12_crypto_info_aes_ccm_128, info); - - if (len != sizeof(*aes_ccm_128)) { - rc = -EINVAL; - goto out; - } - memcpy(aes_ccm_128->iv, - cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, - TLS_CIPHER_AES_CCM_128_IV_SIZE); - memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, - TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); - if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_CHACHA20_POLY1305: { - struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = - container_of(crypto_info, - struct tls12_crypto_info_chacha20_poly1305, - info); - - if (len != sizeof(*chacha20_poly1305)) { - rc = -EINVAL; - goto out; - } - memcpy(chacha20_poly1305->iv, - cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, - TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); - memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, - TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); - if (copy_to_user(optval, chacha20_poly1305, - sizeof(*chacha20_poly1305))) - rc = -EFAULT; - break; + cipher_desc = get_cipher_desc(crypto_info->cipher_type); + if (!cipher_desc || len != cipher_desc->crypto_info) { + rc = -EINVAL; + goto out; } - case TLS_CIPHER_SM4_GCM: { - struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = - container_of(crypto_info, - struct tls12_crypto_info_sm4_gcm, info); - if (len != sizeof(*sm4_gcm_info)) { - rc = -EINVAL; - goto out; - } - memcpy(sm4_gcm_info->iv, - cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, - TLS_CIPHER_SM4_GCM_IV_SIZE); - memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, - TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); - if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_SM4_CCM: { - struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = - container_of(crypto_info, - struct tls12_crypto_info_sm4_ccm, info); + memcpy(crypto_info_iv(crypto_info, cipher_desc), + cctx->iv + cipher_desc->salt, cipher_desc->iv); + memcpy(crypto_info_rec_seq(crypto_info, cipher_desc), + cctx->rec_seq, cipher_desc->rec_seq); - if (len != sizeof(*sm4_ccm_info)) { - rc = -EINVAL; - goto out; - } - memcpy(sm4_ccm_info->iv, - cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, - TLS_CIPHER_SM4_CCM_IV_SIZE); - memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, - TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); - if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_ARIA_GCM_128: { - struct tls12_crypto_info_aria_gcm_128 * - crypto_info_aria_gcm_128 = - container_of(crypto_info, - struct tls12_crypto_info_aria_gcm_128, - info); - - if (len != sizeof(*crypto_info_aria_gcm_128)) { - rc = -EINVAL; - goto out; - } - memcpy(crypto_info_aria_gcm_128->iv, - cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE, - TLS_CIPHER_ARIA_GCM_128_IV_SIZE); - memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq, - TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE); - if (copy_to_user(optval, - crypto_info_aria_gcm_128, - sizeof(*crypto_info_aria_gcm_128))) - rc = -EFAULT; - break; - } - case TLS_CIPHER_ARIA_GCM_256: { - struct tls12_crypto_info_aria_gcm_256 * - crypto_info_aria_gcm_256 = - container_of(crypto_info, - struct tls12_crypto_info_aria_gcm_256, - info); - - if (len != sizeof(*crypto_info_aria_gcm_256)) { - rc = -EINVAL; - goto out; - } - memcpy(crypto_info_aria_gcm_256->iv, - cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE, - TLS_CIPHER_ARIA_GCM_256_IV_SIZE); - memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq, - TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE); - if (copy_to_user(optval, - crypto_info_aria_gcm_256, - sizeof(*crypto_info_aria_gcm_256))) - rc = -EFAULT; - break; - } - default: - rc = -EINVAL; - } + if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info)) + rc = -EFAULT; out: return rc; @@ -696,7 +587,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, struct tls_crypto_info *crypto_info; struct tls_crypto_info *alt_crypto_info; struct tls_context *ctx = tls_get_ctx(sk); - size_t optsize; + const struct tls_cipher_desc *cipher_desc; int rc = 0; int conf; @@ -737,46 +628,23 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, } } - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - optsize = sizeof(struct tls12_crypto_info_aes_gcm_128); - break; - case TLS_CIPHER_AES_GCM_256: { - optsize = sizeof(struct tls12_crypto_info_aes_gcm_256); - break; + cipher_desc = get_cipher_desc(crypto_info->cipher_type); + if (!cipher_desc) { + rc = -EINVAL; + goto err_crypto_info; } - case TLS_CIPHER_AES_CCM_128: - optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); - break; - case TLS_CIPHER_CHACHA20_POLY1305: - optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); - break; - case TLS_CIPHER_SM4_GCM: - optsize = sizeof(struct tls12_crypto_info_sm4_gcm); - break; - case TLS_CIPHER_SM4_CCM: - optsize = sizeof(struct tls12_crypto_info_sm4_ccm); - break; + + switch (crypto_info->cipher_type) { case TLS_CIPHER_ARIA_GCM_128: - if (crypto_info->version != TLS_1_2_VERSION) { - rc = -EINVAL; - goto err_crypto_info; - } - optsize = sizeof(struct tls12_crypto_info_aria_gcm_128); - break; case TLS_CIPHER_ARIA_GCM_256: if (crypto_info->version != TLS_1_2_VERSION) { rc = -EINVAL; goto err_crypto_info; } - optsize = sizeof(struct tls12_crypto_info_aria_gcm_256); break; - default: - rc = -EINVAL; - goto err_crypto_info; } - if (optlen != optsize) { + if (optlen != cipher_desc->crypto_info) { rc = -EINVAL; goto err_crypto_info; } @@ -959,10 +827,12 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll; + ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock; ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll; + ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock; #ifdef CONFIG_TLS_DEVICE ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index f37f4a0fcd3c..ca1e0e198ceb 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -369,7 +369,6 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, static int tls_strp_read_copyin(struct tls_strparser *strp) { - struct socket *sock = strp->sk->sk_socket; read_descriptor_t desc; desc.arg.data = strp; @@ -377,7 +376,7 @@ static int tls_strp_read_copyin(struct tls_strparser *strp) desc.count = 1; /* give more than one skb per call */ /* sk should be locked here, so okay to do read_sock */ - sock->ops->read_sock(strp->sk, &desc, tls_strp_copyin); + tcp_read_sock(strp->sk, &desc, tls_strp_copyin); return desc.error; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 53f944e6d8ef..1ed4a611631f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -984,6 +984,9 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, int ret = 0; int pending; + if (!eor && (msg->msg_flags & MSG_EOR)) + return -EINVAL; + if (unlikely(msg->msg_controllen)) { ret = tls_process_cmsg(sk, msg, &record_type); if (ret) { @@ -1193,7 +1196,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int ret; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | - MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | + MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | MSG_SENDPAGE_NOPOLICY)) return -EOPNOTSUPP; @@ -1845,13 +1848,10 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, return sk_flush_backlog(sk); } -static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, - bool nonblock) +static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, + bool nonblock) { long timeo; - int err; - - lock_sock(sk); timeo = sock_rcvtimeo(sk, nonblock); @@ -1865,26 +1865,30 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, !READ_ONCE(ctx->reader_present), &wait); remove_wait_queue(&ctx->wq, &wait); - if (timeo <= 0) { - err = -EAGAIN; - goto err_unlock; - } - if (signal_pending(current)) { - err = sock_intr_errno(timeo); - goto err_unlock; - } + if (timeo <= 0) + return -EAGAIN; + if (signal_pending(current)) + return sock_intr_errno(timeo); } WRITE_ONCE(ctx->reader_present, 1); return 0; +} -err_unlock: - release_sock(sk); +static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, + bool nonblock) +{ + int err; + + lock_sock(sk); + err = tls_rx_reader_acquire(sk, ctx, nonblock); + if (err) + release_sock(sk); return err; } -static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) +static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) { if (unlikely(ctx->reader_contended)) { if (wq_has_sleeper(&ctx->wq)) @@ -1896,6 +1900,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) } WRITE_ONCE(ctx->reader_present, 0); +} + +static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) +{ + tls_rx_reader_release(sk, ctx); release_sock(sk); } @@ -2193,6 +2202,102 @@ splice_requeue: goto splice_read_end; } +int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t read_actor) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct tls_prot_info *prot = &tls_ctx->prot_info; + struct strp_msg *rxm = NULL; + struct sk_buff *skb = NULL; + struct sk_psock *psock; + size_t flushed_at = 0; + bool released = true; + struct tls_msg *tlm; + ssize_t copied = 0; + ssize_t decrypted; + int err, used; + + psock = sk_psock_get(sk); + if (psock) { + sk_psock_put(sk, psock); + return -EINVAL; + } + err = tls_rx_reader_acquire(sk, ctx, true); + if (err < 0) + return err; + + /* If crypto failed the connection is broken */ + err = ctx->async_wait.err; + if (err) + goto read_sock_end; + + decrypted = 0; + do { + if (!skb_queue_empty(&ctx->rx_list)) { + skb = __skb_dequeue(&ctx->rx_list); + rxm = strp_msg(skb); + tlm = tls_msg(skb); + } else { + struct tls_decrypt_arg darg; + + err = tls_rx_rec_wait(sk, NULL, true, released); + if (err <= 0) + goto read_sock_end; + + memset(&darg.inargs, 0, sizeof(darg.inargs)); + + err = tls_rx_one_record(sk, NULL, &darg); + if (err < 0) { + tls_err_abort(sk, -EBADMSG); + goto read_sock_end; + } + + released = tls_read_flush_backlog(sk, prot, INT_MAX, + 0, decrypted, + &flushed_at); + skb = darg.skb; + rxm = strp_msg(skb); + tlm = tls_msg(skb); + decrypted += rxm->full_len; + + tls_rx_rec_done(ctx); + } + + /* read_sock does not support reading control messages */ + if (tlm->control != TLS_RECORD_TYPE_DATA) { + err = -EINVAL; + goto read_sock_requeue; + } + + used = read_actor(desc, skb, rxm->offset, rxm->full_len); + if (used <= 0) { + if (!copied) + err = used; + goto read_sock_requeue; + } + copied += used; + if (used < rxm->full_len) { + rxm->offset += used; + rxm->full_len -= used; + if (!desc->count) + goto read_sock_requeue; + } else { + consume_skb(skb); + if (!desc->count) + skb = NULL; + } + } while (skb); + +read_sock_end: + tls_rx_reader_release(sk, ctx); + return copied ? : err; + +read_sock_requeue: + __skb_queue_head(&ctx->rx_list, skb); + goto read_sock_end; +} + bool tls_sw_sock_is_readable(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -2485,10 +2590,10 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) struct tls_sw_context_rx *sw_ctx_rx = NULL; struct cipher_context *cctx; struct crypto_aead **aead; - u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; struct crypto_tfm *tfm; - char *iv, *rec_seq, *key, *salt, *cipher_name; - size_t keysize; + char *iv, *rec_seq, *key, *salt; + const struct tls_cipher_desc *cipher_desc; + u16 nonce_size; int rc = 0; if (!ctx) { @@ -2542,148 +2647,19 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) aead = &sw_ctx_rx->aead_recv; } - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: { - struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; - - gcm_128_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; - tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; - iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; - iv = gcm_128_info->iv; - rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; - rec_seq = gcm_128_info->rec_seq; - keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE; - key = gcm_128_info->key; - salt = gcm_128_info->salt; - salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; - cipher_name = "gcm(aes)"; - break; - } - case TLS_CIPHER_AES_GCM_256: { - struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; - - gcm_256_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; - tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE; - iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; - iv = gcm_256_info->iv; - rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE; - rec_seq = gcm_256_info->rec_seq; - keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE; - key = gcm_256_info->key; - salt = gcm_256_info->salt; - salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; - cipher_name = "gcm(aes)"; - break; - } - case TLS_CIPHER_AES_CCM_128: { - struct tls12_crypto_info_aes_ccm_128 *ccm_128_info; - - ccm_128_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; - tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE; - iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; - iv = ccm_128_info->iv; - rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE; - rec_seq = ccm_128_info->rec_seq; - keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE; - key = ccm_128_info->key; - salt = ccm_128_info->salt; - salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE; - cipher_name = "ccm(aes)"; - break; - } - case TLS_CIPHER_CHACHA20_POLY1305: { - struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info; - - chacha20_poly1305_info = (void *)crypto_info; - nonce_size = 0; - tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE; - iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE; - iv = chacha20_poly1305_info->iv; - rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE; - rec_seq = chacha20_poly1305_info->rec_seq; - keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE; - key = chacha20_poly1305_info->key; - salt = chacha20_poly1305_info->salt; - salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE; - cipher_name = "rfc7539(chacha20,poly1305)"; - break; - } - case TLS_CIPHER_SM4_GCM: { - struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; - - sm4_gcm_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; - tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; - iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; - iv = sm4_gcm_info->iv; - rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; - rec_seq = sm4_gcm_info->rec_seq; - keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; - key = sm4_gcm_info->key; - salt = sm4_gcm_info->salt; - salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; - cipher_name = "gcm(sm4)"; - break; - } - case TLS_CIPHER_SM4_CCM: { - struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; - - sm4_ccm_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; - tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; - iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; - iv = sm4_ccm_info->iv; - rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; - rec_seq = sm4_ccm_info->rec_seq; - keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; - key = sm4_ccm_info->key; - salt = sm4_ccm_info->salt; - salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; - cipher_name = "ccm(sm4)"; - break; - } - case TLS_CIPHER_ARIA_GCM_128: { - struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info; - - aria_gcm_128_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE; - tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE; - iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE; - iv = aria_gcm_128_info->iv; - rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE; - rec_seq = aria_gcm_128_info->rec_seq; - keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE; - key = aria_gcm_128_info->key; - salt = aria_gcm_128_info->salt; - salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE; - cipher_name = "gcm(aria)"; - break; - } - case TLS_CIPHER_ARIA_GCM_256: { - struct tls12_crypto_info_aria_gcm_256 *gcm_256_info; - - gcm_256_info = (void *)crypto_info; - nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE; - tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE; - iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE; - iv = gcm_256_info->iv; - rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE; - rec_seq = gcm_256_info->rec_seq; - keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE; - key = gcm_256_info->key; - salt = gcm_256_info->salt; - salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE; - cipher_name = "gcm(aria)"; - break; - } - default: + cipher_desc = get_cipher_desc(crypto_info->cipher_type); + if (!cipher_desc) { rc = -EINVAL; goto free_priv; } + nonce_size = cipher_desc->nonce; + + iv = crypto_info_iv(crypto_info, cipher_desc); + key = crypto_info_key(crypto_info, cipher_desc); + salt = crypto_info_salt(crypto_info, cipher_desc); + rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); + if (crypto_info->version == TLS_1_3_VERSION) { nonce_size = 0; prot->aad_size = TLS_HEADER_SIZE; @@ -2694,9 +2670,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) } /* Sanity-check the sizes for stack allocations. */ - if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE || - rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE || - prot->aad_size > TLS_MAX_AAD_SIZE) { + if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) { rc = -EINVAL; goto free_priv; } @@ -2704,28 +2678,29 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) prot->version = crypto_info->version; prot->cipher_type = crypto_info->cipher_type; prot->prepend_size = TLS_HEADER_SIZE + nonce_size; - prot->tag_size = tag_size; + prot->tag_size = cipher_desc->tag; prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size; - prot->iv_size = iv_size; - prot->salt_size = salt_size; - cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL); + prot->iv_size = cipher_desc->iv; + prot->salt_size = cipher_desc->salt; + cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL); if (!cctx->iv) { rc = -ENOMEM; goto free_priv; } /* Note: 128 & 256 bit salt are the same size */ - prot->rec_seq_size = rec_seq_size; - memcpy(cctx->iv, salt, salt_size); - memcpy(cctx->iv + salt_size, iv, iv_size); - cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); + prot->rec_seq_size = cipher_desc->rec_seq; + memcpy(cctx->iv, salt, cipher_desc->salt); + memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); + + cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL); if (!cctx->rec_seq) { rc = -ENOMEM; goto free_iv; } if (!*aead) { - *aead = crypto_alloc_aead(cipher_name, 0, 0); + *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); if (IS_ERR(*aead)) { rc = PTR_ERR(*aead); *aead = NULL; @@ -2735,8 +2710,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ctx->push_pending_record = tls_sw_push_pending_record; - rc = crypto_aead_setkey(*aead, key, keysize); - + rc = crypto_aead_setkey(*aead, key, cipher_desc->key); if (rc) goto free_aead; |