diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 18:10:50 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 18:10:50 -0800 |
commit | 36289a03bcd3aabdf66de75cb6d1b4ee15726438 (patch) | |
tree | 1230c6391678f9255f74d7a4f65e95ea8a39d452 /arch/arm64 | |
parent | 69308402ca6f5b80a5a090ade0b13bd146891420 (diff) | |
parent | 8b84475318641c2b89320859332544cf187e1cbd (diff) |
Merge tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Use kmap_local instead of kmap_atomic
- Change request callback to take void pointer
- Print FIPS status in /proc/crypto (when enabled)
Algorithms:
- Add rfc4106/gcm support on arm64
- Add ARIA AVX2/512 support on x86
Drivers:
- Add TRNG driver for StarFive SoC
- Delete ux500/hash driver (subsumed by stm32/hash)
- Add zlib support in qat
- Add RSA support in aspeed"
* tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (156 commits)
crypto: x86/aria-avx - Do not use avx2 instructions
crypto: aspeed - Fix modular aspeed-acry
crypto: hisilicon/qm - fix coding style issues
crypto: hisilicon/qm - update comments to match function
crypto: hisilicon/qm - change function names
crypto: hisilicon/qm - use min() instead of min_t()
crypto: hisilicon/qm - remove some unused defines
crypto: proc - Print fips status
crypto: crypto4xx - Call dma_unmap_page when done
crypto: octeontx2 - Fix objects shared between several modules
crypto: nx - Fix sparse warnings
crypto: ecc - Silence sparse warning
tls: Pass rec instead of aead_req into tls_encrypt_done
crypto: api - Remove completion function scaffolding
tls: Remove completion function scaffolding
tipc: Remove completion function scaffolding
net: ipv6: Remove completion function scaffolding
net: ipv4: Remove completion function scaffolding
net: macsec: Remove completion function scaffolding
dm: Remove completion function scaffolding
...
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/crypto/aes-ce-ccm-glue.c | 57 | ||||
-rw-r--r-- | arch/arm64/crypto/ghash-ce-glue.c | 145 | ||||
-rw-r--r-- | arch/arm64/crypto/sm4-ce-ccm-glue.c | 44 | ||||
-rw-r--r-- | arch/arm64/crypto/sm4-ce-gcm-glue.c | 51 |
4 files changed, 182 insertions, 115 deletions
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index c4f14415f5f0..25cd3808ecbe 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -161,43 +161,39 @@ static int ccm_encrypt(struct aead_request *req) memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); - if (unlikely(err)) - return err; kernel_neon_begin(); if (req->assoclen) ccm_calculate_auth_mac(req, mac); - do { + while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; + bool final = walk.nbytes == walk.total; - if (walk.nbytes == walk.total) + if (final) tail = 0; ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); - if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + if (!final) + kernel_neon_end(); + err = skcipher_walk_done(&walk, tail); + if (!final) + kernel_neon_begin(); + } - kernel_neon_end(); + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - if (walk.nbytes) { - err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); - } - } while (walk.nbytes); + kernel_neon_end(); /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); - return 0; + return err; } static int ccm_decrypt(struct aead_request *req) @@ -219,37 +215,36 @@ static int ccm_decrypt(struct aead_request *req) memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_decrypt(&walk, req, false); - if (unlikely(err)) - return err; kernel_neon_begin(); if (req->assoclen) ccm_calculate_auth_mac(req, mac); - do { + while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; + bool final = walk.nbytes == walk.total; - if (walk.nbytes == walk.total) + if (final) tail = 0; ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); - if (walk.nbytes == walk.total) - ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + if (!final) + kernel_neon_end(); + err = skcipher_walk_done(&walk, tail); + if (!final) + kernel_neon_begin(); + } - kernel_neon_end(); + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - if (walk.nbytes) { - err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); - } - } while (walk.nbytes); + kernel_neon_end(); + + if (unlikely(err)) + return err; /* compare calculated auth tag with the stored one */ scatterwalk_map_and_copy(buf, req->src, diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index e5e9adc1fcf4..97331b454ea8 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -9,6 +9,7 @@ #include <asm/simd.h> #include <asm/unaligned.h> #include <crypto/aes.h> +#include <crypto/gcm.h> #include <crypto/algapi.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> @@ -28,7 +29,8 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 -#define GCM_IV_SIZE 12 + +#define RFC4106_NONCE_SIZE 4 struct ghash_key { be128 k; @@ -43,6 +45,7 @@ struct ghash_desc_ctx { struct gcm_aes_ctx { struct crypto_aes_ctx aes_key; + u8 nonce[RFC4106_NONCE_SIZE]; struct ghash_key ghash_key; }; @@ -226,8 +229,8 @@ static int num_rounds(struct crypto_aes_ctx *ctx) return 6 + ctx->key_length / 4; } -static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, - unsigned int keylen) +static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey, + unsigned int keylen) { struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); u8 key[GHASH_BLOCK_SIZE]; @@ -258,17 +261,9 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, return 0; } -static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - switch (authsize) { - case 4: - case 8: - case 12 ... 16: - break; - default: - return -EINVAL; - } - return 0; + return crypto_gcm_check_authsize(authsize); } static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], @@ -302,13 +297,12 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], } } -static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) +static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); u8 buf[GHASH_BLOCK_SIZE]; struct scatter_walk walk; - u32 len = req->assoclen; int buf_count = 0; scatterwalk_start(&walk, req->src); @@ -338,27 +332,25 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) } } -static int gcm_encrypt(struct aead_request *req) +static int gcm_encrypt(struct aead_request *req, char *iv, int assoclen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); int nrounds = num_rounds(&ctx->aes_key); struct skcipher_walk walk; u8 buf[AES_BLOCK_SIZE]; - u8 iv[AES_BLOCK_SIZE]; u64 dg[2] = {}; be128 lengths; u8 *tag; int err; - lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.a = cpu_to_be64(assoclen * 8); lengths.b = cpu_to_be64(req->cryptlen * 8); - if (req->assoclen) - gcm_calculate_auth_mac(req, dg); + if (assoclen) + gcm_calculate_auth_mac(req, dg, assoclen); - memcpy(iv, req->iv, GCM_IV_SIZE); - put_unaligned_be32(2, iv + GCM_IV_SIZE); + put_unaligned_be32(2, iv + GCM_AES_IV_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); @@ -403,7 +395,7 @@ static int gcm_encrypt(struct aead_request *req) return 0; } -static int gcm_decrypt(struct aead_request *req) +static int gcm_decrypt(struct aead_request *req, char *iv, int assoclen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); @@ -412,21 +404,19 @@ static int gcm_decrypt(struct aead_request *req) struct skcipher_walk walk; u8 otag[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; - u8 iv[AES_BLOCK_SIZE]; u64 dg[2] = {}; be128 lengths; u8 *tag; int ret; int err; - lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.a = cpu_to_be64(assoclen * 8); lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8); - if (req->assoclen) - gcm_calculate_auth_mac(req, dg); + if (assoclen) + gcm_calculate_auth_mac(req, dg, assoclen); - memcpy(iv, req->iv, GCM_IV_SIZE); - put_unaligned_be32(2, iv + GCM_IV_SIZE); + put_unaligned_be32(2, iv + GCM_AES_IV_SIZE); scatterwalk_map_and_copy(otag, req->src, req->assoclen + req->cryptlen - authsize, @@ -471,14 +461,76 @@ static int gcm_decrypt(struct aead_request *req) return ret ? -EBADMSG : 0; } -static struct aead_alg gcm_aes_alg = { - .ivsize = GCM_IV_SIZE, +static int gcm_aes_encrypt(struct aead_request *req) +{ + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, req->iv, GCM_AES_IV_SIZE); + return gcm_encrypt(req, iv, req->assoclen); +} + +static int gcm_aes_decrypt(struct aead_request *req) +{ + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, req->iv, GCM_AES_IV_SIZE); + return gcm_decrypt(req, iv, req->assoclen); +} + +static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey, + unsigned int keylen) +{ + struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm); + int err; + + keylen -= RFC4106_NONCE_SIZE; + err = gcm_aes_setkey(tfm, inkey, keylen); + if (err) + return err; + + memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE); + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + return crypto_rfc4106_check_authsize(authsize); +} + +static int rfc4106_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); + memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); + + return crypto_ipsec_check_assoclen(req->assoclen) ?: + gcm_encrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE); +} + +static int rfc4106_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); + memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); + + return crypto_ipsec_check_assoclen(req->assoclen) ?: + gcm_decrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE); +} + +static struct aead_alg gcm_aes_algs[] = {{ + .ivsize = GCM_AES_IV_SIZE, .chunksize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, - .setkey = gcm_setkey, - .setauthsize = gcm_setauthsize, - .encrypt = gcm_encrypt, - .decrypt = gcm_decrypt, + .setkey = gcm_aes_setkey, + .setauthsize = gcm_aes_setauthsize, + .encrypt = gcm_aes_encrypt, + .decrypt = gcm_aes_decrypt, .base.cra_name = "gcm(aes)", .base.cra_driver_name = "gcm-aes-ce", @@ -487,7 +539,23 @@ static struct aead_alg gcm_aes_alg = { .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) + 4 * sizeof(u64[2]), .base.cra_module = THIS_MODULE, -}; +}, { + .ivsize = GCM_RFC4106_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = rfc4106_encrypt, + .decrypt = rfc4106_decrypt, + + .base.cra_name = "rfc4106(gcm(aes))", + .base.cra_driver_name = "rfc4106-gcm-aes-ce", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) + + 4 * sizeof(u64[2]), + .base.cra_module = THIS_MODULE, +}}; static int __init ghash_ce_mod_init(void) { @@ -495,7 +563,8 @@ static int __init ghash_ce_mod_init(void) return -ENODEV; if (cpu_have_named_feature(PMULL)) - return crypto_register_aead(&gcm_aes_alg); + return crypto_register_aeads(gcm_aes_algs, + ARRAY_SIZE(gcm_aes_algs)); return crypto_register_shash(&ghash_alg); } @@ -503,7 +572,7 @@ static int __init ghash_ce_mod_init(void) static void __exit ghash_ce_mod_exit(void) { if (cpu_have_named_feature(PMULL)) - crypto_unregister_aead(&gcm_aes_alg); + crypto_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs)); else crypto_unregister_shash(&ghash_alg); } diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c index f2cec7b52efc..5e7e17bbec81 100644 --- a/arch/arm64/crypto/sm4-ce-ccm-glue.c +++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c @@ -166,7 +166,7 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk, unsigned int nbytes, u8 *mac)) { u8 __aligned(8) ctr0[SM4_BLOCK_SIZE]; - int err; + int err = 0; /* preserve the initial ctr0 for the TAG */ memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE); @@ -177,33 +177,37 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk, if (req->assoclen) ccm_calculate_auth_mac(req, mac); - do { + while (walk->nbytes && walk->nbytes != walk->total) { unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; - const u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - if (walk->nbytes == walk->total) - tail = 0; + sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr, + walk->src.virt.addr, walk->iv, + walk->nbytes - tail, mac); + + kernel_neon_end(); + + err = skcipher_walk_done(walk, tail); + + kernel_neon_begin(); + } - if (walk->nbytes - tail) - sm4_ce_ccm_crypt(rkey_enc, dst, src, walk->iv, - walk->nbytes - tail, mac); + if (walk->nbytes) { + sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr, + walk->src.virt.addr, walk->iv, + walk->nbytes, mac); - if (walk->nbytes == walk->total) - sm4_ce_ccm_final(rkey_enc, ctr0, mac); + sm4_ce_ccm_final(rkey_enc, ctr0, mac); kernel_neon_end(); - if (walk->nbytes) { - err = skcipher_walk_done(walk, tail); - if (err) - return err; - if (walk->nbytes) - kernel_neon_begin(); - } - } while (walk->nbytes > 0); + err = skcipher_walk_done(walk, 0); + } else { + sm4_ce_ccm_final(rkey_enc, ctr0, mac); - return 0; + kernel_neon_end(); + } + + return err; } static int ccm_encrypt(struct aead_request *req) diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c index c450a2025ca9..73bfb6972d3a 100644 --- a/arch/arm64/crypto/sm4-ce-gcm-glue.c +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c @@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) } static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk, - struct sm4_gcm_ctx *ctx, u8 ghash[], + u8 ghash[], int err, void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc, u8 *dst, const u8 *src, u8 *iv, unsigned int nbytes, u8 *ghash, const u8 *ghash_table, const u8 *lengths)) { + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); u8 __aligned(8) iv[SM4_BLOCK_SIZE]; be128 __aligned(8) lengths; - int err; memset(ghash, 0, SM4_BLOCK_SIZE); lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(walk->total * 8); - memcpy(iv, walk->iv, GCM_IV_SIZE); + memcpy(iv, req->iv, GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE); kernel_neon_begin(); @@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk, if (req->assoclen) gcm_calculate_auth_mac(req, ghash); - do { + while (walk->nbytes) { unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; if (walk->nbytes == walk->total) { - tail = 0; - sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, walk->nbytes, ghash, ctx->ghash_table, (const u8 *)&lengths); - } else if (walk->nbytes - tail) { - sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, - walk->nbytes - tail, ghash, - ctx->ghash_table, NULL); + + kernel_neon_end(); + + return skcipher_walk_done(walk, 0); } + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv, + walk->nbytes - tail, ghash, + ctx->ghash_table, NULL); + kernel_neon_end(); err = skcipher_walk_done(walk, tail); - if (err) - return err; - if (walk->nbytes) - kernel_neon_begin(); - } while (walk->nbytes > 0); - return 0; + kernel_neon_begin(); + } + + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv, + walk->nbytes, ghash, ctx->ghash_table, + (const u8 *)&lengths); + + kernel_neon_end(); + + return err; } static int gcm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; struct skcipher_walk walk; int err; err = skcipher_walk_aead_encrypt(&walk, req, false); - if (err) - return err; - - err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc); + err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc); if (err) return err; @@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(aead); - struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead); u8 __aligned(8) ghash[SM4_BLOCK_SIZE]; u8 authtag[SM4_BLOCK_SIZE]; struct skcipher_walk walk; int err; err = skcipher_walk_aead_decrypt(&walk, req, false); - if (err) - return err; - - err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec); + err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec); if (err) return err; |