From eed1e1afd8d542d9644534c1b712599b5d680007 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Mon, 16 May 2016 02:53:36 +0200 Subject: crypto: user - no parsing of CRYPTO_MSG_GETALG The CRYPTO_MSG_GETALG netlink message type provides a buffer to the kernel to retrieve information from the kernel. The data buffer will not provide any input and will not be read. Hence the nlmsg_parse is not applicable to this netlink message type. This patch fixes the following kernel log message when using this netlink interface: netlink: 208 bytes leftover after parsing attributes in process `XXX'. Patch successfully tested with libkcapi from [1] which uses CRYPTO_MSG_GETALG to obtain cipher-specific information from the kernel. [1] http://www.chronox.de/libkcapi.html Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 43fe85f20d57..f71960dea882 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -516,10 +516,12 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } - err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, - crypto_policy); - if (err < 0) - return err; + if (type != (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE)) { + err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, + CRYPTOCFGA_MAX, crypto_policy); + if (err < 0) + return err; + } if (link->doit == NULL) return -EINVAL; -- cgit v1.2.3-70-g09d2 From ed494d4fa234443a005489afb6b9583415ad89ff Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 31 May 2016 13:11:57 +0200 Subject: crypto: drbg - reduce number of setkey calls The CTR DRBG code always set the key for each sym cipher invocation even though the key has not been changed. The patch ensures that the setkey is only invoked when a new key is generated by the DRBG. With this patch, the CTR DRBG performance increases by more than 150%. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index 0a3538f6cf22..0aca2b908c76 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -252,8 +252,10 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192"); MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128"); MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128"); -static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, - unsigned char *outval, const struct drbg_string *in); +static void drbg_kcapi_symsetkey(struct drbg_state *drbg, + const unsigned char *key); +static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, + const struct drbg_string *in); static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg); @@ -270,6 +272,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg, drbg_string_fill(&data, out, drbg_blocklen(drbg)); /* 10.4.3 step 2 / 4 */ + drbg_kcapi_symsetkey(drbg, key); list_for_each_entry(curr, in, list) { const unsigned char *pos = curr->buf; size_t len = curr->len; @@ -278,7 +281,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg, /* 10.4.3 step 4.2 */ if (drbg_blocklen(drbg) == cnt) { cnt = 0; - ret = drbg_kcapi_sym(drbg, key, out, &data); + ret = drbg_kcapi_sym(drbg, out, &data); if (ret) return ret; } @@ -290,7 +293,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg, } /* 10.4.3 step 4.2 for last block */ if (cnt) - ret = drbg_kcapi_sym(drbg, key, out, &data); + ret = drbg_kcapi_sym(drbg, out, &data); return ret; } @@ -425,6 +428,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, /* 10.4.2 step 12: overwriting of outval is implemented in next step */ /* 10.4.2 step 13 */ + drbg_kcapi_symsetkey(drbg, temp); while (generated_len < bytes_to_return) { short blocklen = 0; /* @@ -432,7 +436,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, * implicit as the key is only drbg_blocklen in size based on * the implementation of the cipher function callback */ - ret = drbg_kcapi_sym(drbg, temp, X, &cipherin); + ret = drbg_kcapi_sym(drbg, X, &cipherin); if (ret) goto out; blocklen = (drbg_blocklen(drbg) < @@ -488,6 +492,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); if (ret) goto out; + drbg_kcapi_symsetkey(drbg, drbg->C); } drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); @@ -500,7 +505,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, crypto_inc(drbg->V, drbg_blocklen(drbg)); /* * 10.2.1.2 step 2.2 */ - ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin); + ret = drbg_kcapi_sym(drbg, temp + len, &cipherin); if (ret) goto out; /* 10.2.1.2 step 2.3 and 3 */ @@ -517,6 +522,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, /* 10.2.1.2 step 5 */ memcpy(drbg->C, temp, drbg_keylen(drbg)); + drbg_kcapi_symsetkey(drbg, drbg->C); /* 10.2.1.2 step 6 */ memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); ret = 0; @@ -546,6 +552,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg, ret = drbg_ctr_update(drbg, addtl, 2); if (ret) return 0; + drbg_kcapi_symsetkey(drbg, drbg->C); } /* 10.2.1.5.2 step 4.1 */ @@ -554,7 +561,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg, while (len < buflen) { int outlen = 0; /* 10.2.1.5.2 step 4.2 */ - ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data); + ret = drbg_kcapi_sym(drbg, drbg->scratchpad, &data); if (ret) { len = ret; goto out; @@ -1653,13 +1660,21 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) return 0; } -static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, - unsigned char *outval, const struct drbg_string *in) +static void drbg_kcapi_symsetkey(struct drbg_state *drbg, + const unsigned char *key) { struct crypto_cipher *tfm = (struct crypto_cipher *)drbg->priv_data; crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); +} + +static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, + const struct drbg_string *in) +{ + struct crypto_cipher *tfm = + (struct crypto_cipher *)drbg->priv_data; + /* there is only component in *in */ BUG_ON(in->len < drbg_blocklen(drbg)); crypto_cipher_encrypt_one(tfm, outval, in->buf); -- cgit v1.2.3-70-g09d2 From 88aff46040a8a13ade85e8e450d781f89899e9fb Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 2 Jun 2016 13:48:38 +0800 Subject: crypto: skcipher - remove unused header cpumask.h Remove unused header cpumask.h from crypto/ablkcipher.c. Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e5b5721809e2..6b80516778c6 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -14,7 +14,6 @@ */ #include -#include #include #include #include -- cgit v1.2.3-70-g09d2 From 355912852115cd8aa4ad02c25182ae615ce925fb Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 14 Jun 2016 07:34:13 +0200 Subject: crypto: drbg - use CTR AES instead of ECB AES The CTR DRBG derives its random data from the CTR that is encrypted with AES. This patch now changes the CTR DRBG implementation such that the CTR AES mode is employed. This allows the use of steamlined CTR AES implementation such as ctr-aes-aesni. Unfortunately there are the following subtile changes we need to apply when using the CTR AES mode: - the CTR mode increments the counter after the cipher operation, but the CTR DRBG requires the increment before the cipher op. Hence, the crypto_inc is applied to the counter (drbg->V) once it is recalculated. - the CTR mode wants to encrypt data, but the CTR DRBG is interested in the encrypted counter only. The full CTR mode is the XOR of the encrypted counter with the plaintext data. To access the encrypted counter, the patch uses a NULL data vector as plaintext to be "encrypted". Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/drbg.c | 193 ++++++++++++++++++++++++++++++++++++-------------- include/crypto/drbg.h | 9 +++ 3 files changed, 149 insertions(+), 54 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 1d33beb6a1ae..c903f1832f2c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1567,6 +1567,7 @@ config CRYPTO_DRBG_HASH config CRYPTO_DRBG_CTR bool "Enable CTR DRBG" select CRYPTO_AES + depends on CRYPTO_CTR help Enable the CTR DRBG variant as defined in NIST SP800-90A. diff --git a/crypto/drbg.c b/crypto/drbg.c index 0aca2b908c76..4ee1a9c79420 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -258,6 +258,7 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in); static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg); +static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen); /* BCC function for CTR DRBG as defined in 10.4.3 */ static int drbg_ctr_bcc(struct drbg_state *drbg, @@ -482,36 +483,37 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, drbg_blocklen(drbg); unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */ unsigned int len = 0; - struct drbg_string cipherin; if (3 > reseed) memset(df_data, 0, drbg_statelen(drbg)); - /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ - if (seed) { - ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); + if (!reseed) { + /* + * The DRBG uses the CTR mode of the underlying AES cipher. The + * CTR mode increments the counter value after the AES operation + * but SP800-90A requires that the counter is incremented before + * the AES operation. Hence, we increment it at the time we set + * it by one. + */ + crypto_inc(drbg->V, drbg_blocklen(drbg)); + + ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C, + drbg_keylen(drbg)); if (ret) goto out; - drbg_kcapi_symsetkey(drbg, drbg->C); } - drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); - /* - * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation - * zeroizes all memory during initialization - */ - while (len < (drbg_statelen(drbg))) { - /* 10.2.1.2 step 2.1 */ - crypto_inc(drbg->V, drbg_blocklen(drbg)); - /* - * 10.2.1.2 step 2.2 */ - ret = drbg_kcapi_sym(drbg, temp + len, &cipherin); + /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ + if (seed) { + ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); if (ret) goto out; - /* 10.2.1.2 step 2.3 and 3 */ - len += drbg_blocklen(drbg); } + ret = drbg_kcapi_sym_ctr(drbg, temp, drbg_statelen(drbg)); + if (ret) + return ret; + /* 10.2.1.2 step 4 */ temp_p = temp; df_data_p = df_data; @@ -522,9 +524,14 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, /* 10.2.1.2 step 5 */ memcpy(drbg->C, temp, drbg_keylen(drbg)); - drbg_kcapi_symsetkey(drbg, drbg->C); + ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C, + drbg_keylen(drbg)); + if (ret) + goto out; /* 10.2.1.2 step 6 */ memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); + /* See above: increment counter by one to compensate timing of CTR op */ + crypto_inc(drbg->V, drbg_blocklen(drbg)); ret = 0; out: @@ -543,46 +550,26 @@ static int drbg_ctr_generate(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl) { - int len = 0; - int ret = 0; - struct drbg_string data; + int ret; + int len = min_t(int, buflen, INT_MAX); /* 10.2.1.5.2 step 2 */ if (addtl && !list_empty(addtl)) { ret = drbg_ctr_update(drbg, addtl, 2); if (ret) return 0; - drbg_kcapi_symsetkey(drbg, drbg->C); } /* 10.2.1.5.2 step 4.1 */ - crypto_inc(drbg->V, drbg_blocklen(drbg)); - drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); - while (len < buflen) { - int outlen = 0; - /* 10.2.1.5.2 step 4.2 */ - ret = drbg_kcapi_sym(drbg, drbg->scratchpad, &data); - if (ret) { - len = ret; - goto out; - } - outlen = (drbg_blocklen(drbg) < (buflen - len)) ? - drbg_blocklen(drbg) : (buflen - len); - /* 10.2.1.5.2 step 4.3 */ - memcpy(buf + len, drbg->scratchpad, outlen); - len += outlen; - /* 10.2.1.5.2 step 6 */ - if (len < buflen) - crypto_inc(drbg->V, drbg_blocklen(drbg)); - } + ret = drbg_kcapi_sym_ctr(drbg, buf, len); + if (ret) + return ret; /* 10.2.1.5.2 step 6 */ ret = drbg_ctr_update(drbg, NULL, 3); if (ret) len = ret; -out: - memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); return len; } @@ -1634,10 +1621,46 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ #ifdef CONFIG_CRYPTO_DRBG_CTR +static int drbg_fini_sym_kernel(struct drbg_state *drbg) +{ + struct crypto_cipher *tfm = + (struct crypto_cipher *)drbg->priv_data; + if (tfm) + crypto_free_cipher(tfm); + drbg->priv_data = NULL; + + if (drbg->ctr_handle) + crypto_free_skcipher(drbg->ctr_handle); + drbg->ctr_handle = NULL; + + if (drbg->ctr_req) + skcipher_request_free(drbg->ctr_req);; + drbg->ctr_req = NULL; + + kfree(drbg->ctr_null_value_buf); + drbg->ctr_null_value = NULL; + + return 0; +} + +static void drbg_skcipher_cb(struct crypto_async_request *req, int error) +{ + struct drbg_state *drbg = req->data; + + if (error == -EINPROGRESS) + return; + drbg->ctr_async_err = error; + complete(&drbg->ctr_completion); +} + +#define DRBG_CTR_NULL_LEN 128 static int drbg_init_sym_kernel(struct drbg_state *drbg) { - int ret = 0; struct crypto_cipher *tfm; + struct crypto_skcipher *sk_tfm; + struct skcipher_request *req; + unsigned int alignmask; + char ctr_name[CRYPTO_MAX_ALG_NAME]; tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); if (IS_ERR(tfm)) { @@ -1647,16 +1670,41 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) } BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); drbg->priv_data = tfm; - return ret; -} -static int drbg_fini_sym_kernel(struct drbg_state *drbg) -{ - struct crypto_cipher *tfm = - (struct crypto_cipher *)drbg->priv_data; - if (tfm) - crypto_free_cipher(tfm); - drbg->priv_data = NULL; + if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", + drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) { + drbg_fini_sym_kernel(drbg); + return -EINVAL; + } + sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0); + if (IS_ERR(sk_tfm)) { + pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n", + ctr_name); + drbg_fini_sym_kernel(drbg); + return PTR_ERR(sk_tfm); + } + drbg->ctr_handle = sk_tfm; + + req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); + if (!req) { + pr_info("DRBG: could not allocate request queue\n"); + drbg_fini_sym_kernel(drbg); + return PTR_ERR(req); + } + drbg->ctr_req = req; + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + drbg_skcipher_cb, drbg); + + alignmask = crypto_skcipher_alignmask(sk_tfm); + drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, + GFP_KERNEL); + if (!drbg->ctr_null_value_buf) { + drbg_fini_sym_kernel(drbg); + return -ENOMEM; + } + drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf, + alignmask + 1); + return 0; } @@ -1680,6 +1728,43 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, crypto_cipher_encrypt_one(tfm, outval, in->buf); return 0; } + +static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen) +{ + struct scatterlist sg_in; + + sg_init_one(&sg_in, drbg->ctr_null_value, DRBG_CTR_NULL_LEN); + + while (outlen) { + u32 cryptlen = min_t(u32, outlen, DRBG_CTR_NULL_LEN); + struct scatterlist sg_out; + int ret; + + sg_init_one(&sg_out, outbuf, cryptlen); + skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, + cryptlen, drbg->V); + ret = crypto_skcipher_encrypt(drbg->ctr_req); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &drbg->ctr_completion); + if (!ret && !drbg->ctr_async_err) { + reinit_completion(&drbg->ctr_completion); + break; + } + default: + return ret; + } + init_completion(&drbg->ctr_completion); + + outlen -= cryptlen; + } + + return 0; +} #endif /* CONFIG_CRYPTO_DRBG_CTR */ /*************************************************************** diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index d961b2b16f55..b2fe15d1ceba 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,14 @@ struct drbg_state { /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */ + __u8 *ctr_null_value; /* CTR mode aligned zero buf */ + struct completion ctr_completion; /* CTR mode async handler */ + int ctr_async_err; /* CTR mode async error */ + bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ struct work_struct seed_work; /* asynchronous seeding support */ -- cgit v1.2.3-70-g09d2 From 3cfc3b97211238ffc1a7885ebe62f899180fe043 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 14 Jun 2016 07:35:13 +0200 Subject: crypto: drbg - use aligned buffers Hardware cipher implementation may require aligned buffers. All buffers that potentially are processed with a cipher are now aligned. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 44 ++++++++++++++++++++++++-------------------- include/crypto/drbg.h | 3 +++ 2 files changed, 27 insertions(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index 4ee1a9c79420..8ac3ea11d437 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1139,11 +1139,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) if (!drbg) return; kzfree(drbg->V); - drbg->V = NULL; + drbg->Vbuf = NULL; kzfree(drbg->C); - drbg->C = NULL; - kzfree(drbg->scratchpad); - drbg->scratchpad = NULL; + drbg->Cbuf = NULL; + kzfree(drbg->scratchpadbuf); + drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; drbg->d_ops = NULL; drbg->core = NULL; @@ -1179,12 +1179,18 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) goto err; } - drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); - if (!drbg->V) - goto err; - drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL); - if (!drbg->C) + ret = drbg->d_ops->crypto_init(drbg); + if (ret < 0) goto err; + + drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); + if (!drbg->Vbuf) + goto fini; + drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); + drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); + if (!drbg->Cbuf) + goto fini; + drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1); /* scratchpad is only generated for CTR and Hash */ if (drbg->core->flags & DRBG_HMAC) sb_size = 0; @@ -1198,13 +1204,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); if (0 < sb_size) { - drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL); - if (!drbg->scratchpad) - goto err; + drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL); + if (!drbg->scratchpadbuf) + goto fini; + drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); } return 0; +fini: + drbg->d_ops->crypto_fini(drbg); err: drbg_dealloc_state(drbg); return ret; @@ -1472,10 +1481,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (ret) goto unlock; - ret = -EFAULT; - if (drbg->d_ops->crypto_init(drbg)) - goto err; - ret = drbg_prepare_hrng(drbg); if (ret) goto free_everything; @@ -1499,8 +1504,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, mutex_unlock(&drbg->drbg_mutex); return ret; -err: - drbg_dealloc_state(drbg); unlock: mutex_unlock(&drbg->drbg_mutex); return ret; @@ -1585,7 +1588,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) sdesc->shash.tfm = tfm; sdesc->shash.flags = 0; drbg->priv_data = sdesc; - return 0; + + return crypto_shash_alignmask(tfm); } static int drbg_fini_hash_kernel(struct drbg_state *drbg) @@ -1705,7 +1709,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf, alignmask + 1); - return 0; + return alignmask; } static void drbg_kcapi_symsetkey(struct drbg_state *drbg, diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index b2fe15d1ceba..61580b19f9f6 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -108,13 +108,16 @@ struct drbg_test_data { struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; + unsigned char *Cbuf; /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ size_t reseed_ctr; size_t reseed_threshold; /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; + unsigned char *scratchpadbuf; void *priv_data; /* Cipher handle */ struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ -- cgit v1.2.3-70-g09d2 From a07203fbfcf146b737781ee4658ed198956036ca Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 14 Jun 2016 07:35:37 +0200 Subject: crypto: drbg - use full CTR AES for update The CTR DRBG update function performs a full CTR AES operation including the XOR with "plaintext" data. Hence, remove the XOR from the code and use the CTR mode to do the XOR. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index 8ac3ea11d437..8ceb71699dea 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -258,7 +258,10 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in); static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg); -static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen); +static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, + u8 *inbuf, u32 inbuflen, + u8 *outbuf, u32 outlen); +#define DRBG_CTR_NULL_LEN 128 /* BCC function for CTR DRBG as defined in 10.4.3 */ static int drbg_ctr_bcc(struct drbg_state *drbg, @@ -481,8 +484,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, unsigned char *temp = drbg->scratchpad; unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) + drbg_blocklen(drbg); - unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */ - unsigned int len = 0; if (3 > reseed) memset(df_data, 0, drbg_statelen(drbg)); @@ -510,18 +511,11 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, goto out; } - ret = drbg_kcapi_sym_ctr(drbg, temp, drbg_statelen(drbg)); + ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg), + temp, drbg_statelen(drbg)); if (ret) return ret; - /* 10.2.1.2 step 4 */ - temp_p = temp; - df_data_p = df_data; - for (len = 0; len < drbg_statelen(drbg); len++) { - *temp_p ^= *df_data_p; - df_data_p++; temp_p++; - } - /* 10.2.1.2 step 5 */ memcpy(drbg->C, temp, drbg_keylen(drbg)); ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C, @@ -561,7 +555,8 @@ static int drbg_ctr_generate(struct drbg_state *drbg, } /* 10.2.1.5.2 step 4.1 */ - ret = drbg_kcapi_sym_ctr(drbg, buf, len); + ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN, + buf, len); if (ret) return ret; @@ -1657,7 +1652,6 @@ static void drbg_skcipher_cb(struct crypto_async_request *req, int error) complete(&drbg->ctr_completion); } -#define DRBG_CTR_NULL_LEN 128 static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; @@ -1733,14 +1727,16 @@ static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, return 0; } -static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *outbuf, u32 outlen) +static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, + u8 *inbuf, u32 inlen, + u8 *outbuf, u32 outlen) { struct scatterlist sg_in; - sg_init_one(&sg_in, drbg->ctr_null_value, DRBG_CTR_NULL_LEN); + sg_init_one(&sg_in, inbuf, inlen); while (outlen) { - u32 cryptlen = min_t(u32, outlen, DRBG_CTR_NULL_LEN); + u32 cryptlen = min_t(u32, inlen, outlen); struct scatterlist sg_out; int ret; -- cgit v1.2.3-70-g09d2 From 103eb3f7bfb4fce0e299afbf50fef8ffa8d9d38c Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 14 Jun 2016 07:36:06 +0200 Subject: crypto: drbg - avoid duplicate maintenance of key The TFM object maintains the key for the CTR DRBG. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index 8ceb71699dea..ded86385ab6e 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -517,8 +517,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, return ret; /* 10.2.1.2 step 5 */ - memcpy(drbg->C, temp, drbg_keylen(drbg)); - ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C, + ret = crypto_skcipher_setkey(drbg->ctr_handle, temp, drbg_keylen(drbg)); if (ret) goto out; -- cgit v1.2.3-70-g09d2 From 5a7de97309f5af4458b1a25a2a529a1a893c5269 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 14 Jun 2016 16:14:58 +0300 Subject: crypto: rsa - return raw integers for the ASN.1 parser Return the raw key with no other processing so that the caller can copy it or MPI parse it, etc. The scope is to have only one ANS.1 parser for all RSA implementations. Update the RSA software implementation so that it does the MPI conversion on top. Signed-off-by: Tudor Ambarus Signed-off-by: Herbert Xu --- crypto/rsa.c | 105 +++++++++++++++++++++++++++++---------- crypto/rsa_helper.c | 111 +++++++++++++++--------------------------- include/crypto/internal/rsa.h | 22 ++++++--- 3 files changed, 135 insertions(+), 103 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa.c b/crypto/rsa.c index 77d737f52147..dc692d43b666 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -10,16 +10,23 @@ */ #include +#include #include #include #include #include +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; +}; + /* * RSAEP function [RFC3447 sec 5.1.1] * c = m^e mod n; */ -static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) +static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) { /* (1) Validate 0 <= m < n */ if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) @@ -33,7 +40,7 @@ static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) * RSADP function [RFC3447 sec 5.1.2] * m = c^d mod n; */ -static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) +static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c) { /* (1) Validate 0 <= c < n */ if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) @@ -47,7 +54,7 @@ static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) * RSASP1 function [RFC3447 sec 5.2.1] * s = m^d mod n */ -static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) +static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m) { /* (1) Validate 0 <= m < n */ if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) @@ -61,7 +68,7 @@ static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) * RSAVP1 function [RFC3447 sec 5.2.2] * m = s^e mod n; */ -static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) +static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s) { /* (1) Validate 0 <= s < n */ if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0) @@ -71,7 +78,7 @@ static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) return mpi_powm(m, s, key->e, key->n); } -static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) +static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm) { return akcipher_tfm_ctx(tfm); } @@ -79,7 +86,7 @@ static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) static int rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - const struct rsa_key *pkey = rsa_get_key(tfm); + const struct rsa_mpi_key *pkey = rsa_get_key(tfm); MPI m, c = mpi_alloc(0); int ret = 0; int sign; @@ -118,7 +125,7 @@ err_free_c: static int rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - const struct rsa_key *pkey = rsa_get_key(tfm); + const struct rsa_mpi_key *pkey = rsa_get_key(tfm); MPI c, m = mpi_alloc(0); int ret = 0; int sign; @@ -156,7 +163,7 @@ err_free_m: static int rsa_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - const struct rsa_key *pkey = rsa_get_key(tfm); + const struct rsa_mpi_key *pkey = rsa_get_key(tfm); MPI m, s = mpi_alloc(0); int ret = 0; int sign; @@ -195,7 +202,7 @@ err_free_s: static int rsa_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - const struct rsa_key *pkey = rsa_get_key(tfm); + const struct rsa_mpi_key *pkey = rsa_get_key(tfm); MPI s, m = mpi_alloc(0); int ret = 0; int sign; @@ -233,6 +240,16 @@ err_free_m: return ret; } +static void rsa_free_mpi_key(struct rsa_mpi_key *key) +{ + mpi_free(key->d); + mpi_free(key->e); + mpi_free(key->n); + key->d = NULL; + key->e = NULL; + key->n = NULL; +} + static int rsa_check_key_length(unsigned int len) { switch (len) { @@ -251,49 +268,87 @@ static int rsa_check_key_length(unsigned int len) static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { - struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {0}; int ret; - ret = rsa_parse_pub_key(pkey, key, keylen); + /* Free the old MPI key if any */ + rsa_free_mpi_key(mpi_key); + + ret = rsa_parse_pub_key(&raw_key, key, keylen); if (ret) return ret; - if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { - rsa_free_key(pkey); - ret = -EINVAL; + mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz); + if (!mpi_key->e) + goto err; + + mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz); + if (!mpi_key->n) + goto err; + + if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; } - return ret; + + return 0; + +err: + rsa_free_mpi_key(mpi_key); + return -ENOMEM; } static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { - struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {0}; int ret; - ret = rsa_parse_priv_key(pkey, key, keylen); + /* Free the old MPI key if any */ + rsa_free_mpi_key(mpi_key); + + ret = rsa_parse_priv_key(&raw_key, key, keylen); if (ret) return ret; - if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { - rsa_free_key(pkey); - ret = -EINVAL; + mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz); + if (!mpi_key->d) + goto err; + + mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz); + if (!mpi_key->e) + goto err; + + mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz); + if (!mpi_key->n) + goto err; + + if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { + rsa_free_mpi_key(mpi_key); + return -EINVAL; } - return ret; + + return 0; + +err: + rsa_free_mpi_key(mpi_key); + return -ENOMEM; } static int rsa_max_size(struct crypto_akcipher *tfm) { - struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm); return pkey->n ? mpi_get_size(pkey->n) : -EINVAL; } static void rsa_exit_tfm(struct crypto_akcipher *tfm) { - struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm); - rsa_free_key(pkey); + rsa_free_mpi_key(pkey); } static struct akcipher_alg rsa = { @@ -310,7 +365,7 @@ static struct akcipher_alg rsa = { .cra_driver_name = "rsa-generic", .cra_priority = 100, .cra_module = THIS_MODULE, - .cra_ctxsize = sizeof(struct rsa_key), + .cra_ctxsize = sizeof(struct rsa_mpi_key), }, }; diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index d226f48d0907..583656af4fe2 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c @@ -22,20 +22,29 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct rsa_key *key = context; + const u8 *ptr = value; + size_t n_sz = vlen; - key->n = mpi_read_raw_data(value, vlen); - - if (!key->n) - return -ENOMEM; - - /* In FIPS mode only allow key size 2K & 3K */ - if (fips_enabled && (mpi_get_size(key->n) != 256 && - mpi_get_size(key->n) != 384)) { - pr_err("RSA: key size not allowed in FIPS mode\n"); - mpi_free(key->n); - key->n = NULL; + /* invalid key provided */ + if (!value || !vlen) return -EINVAL; + + if (fips_enabled) { + while (!*ptr && n_sz) { + ptr++; + n_sz--; + } + + /* In FIPS mode only allow key size 2K & 3K */ + if (n_sz != 256 && n_sz != 384) { + pr_err("RSA: key size not allowed in FIPS mode\n"); + return -EINVAL; + } } + + key->n = value; + key->n_sz = vlen; + return 0; } @@ -44,10 +53,12 @@ int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, { struct rsa_key *key = context; - key->e = mpi_read_raw_data(value, vlen); + /* invalid key provided */ + if (!value || !key->n_sz || !vlen || vlen > key->n_sz) + return -EINVAL; - if (!key->e) - return -ENOMEM; + key->e = value; + key->e_sz = vlen; return 0; } @@ -57,46 +68,20 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, { struct rsa_key *key = context; - key->d = mpi_read_raw_data(value, vlen); - - if (!key->d) - return -ENOMEM; - - /* In FIPS mode only allow key size 2K & 3K */ - if (fips_enabled && (mpi_get_size(key->d) != 256 && - mpi_get_size(key->d) != 384)) { - pr_err("RSA: key size not allowed in FIPS mode\n"); - mpi_free(key->d); - key->d = NULL; + /* invalid key provided */ + if (!value || !key->n_sz || !vlen || vlen > key->n_sz) return -EINVAL; - } - return 0; -} -static void free_mpis(struct rsa_key *key) -{ - mpi_free(key->n); - mpi_free(key->e); - mpi_free(key->d); - key->n = NULL; - key->e = NULL; - key->d = NULL; -} + key->d = value; + key->d_sz = vlen; -/** - * rsa_free_key() - frees rsa key allocated by rsa_parse_key() - * - * @rsa_key: struct rsa_key key representation - */ -void rsa_free_key(struct rsa_key *key) -{ - free_mpis(key); + return 0; } -EXPORT_SYMBOL_GPL(rsa_free_key); /** - * rsa_parse_pub_key() - extracts an rsa public key from BER encoded buffer - * and stores it in the provided struct rsa_key + * rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the + * provided struct rsa_key, pointers to the raw key as is, + * so that the caller can copy it or MPI parse it, etc. * * @rsa_key: struct rsa_key key representation * @key: key in BER format @@ -107,23 +92,15 @@ EXPORT_SYMBOL_GPL(rsa_free_key); int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len) { - int ret; - - free_mpis(rsa_key); - ret = asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len); - if (ret < 0) - goto error; - - return 0; -error: - free_mpis(rsa_key); - return ret; + return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len); } EXPORT_SYMBOL_GPL(rsa_parse_pub_key); /** - * rsa_parse_pub_key() - extracts an rsa private key from BER encoded buffer - * and stores it in the provided struct rsa_key + * rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the + * provided struct rsa_key, pointers to the raw key + * as is, so that the caller can copy it or MPI parse it, + * etc. * * @rsa_key: struct rsa_key key representation * @key: key in BER format @@ -134,16 +111,6 @@ EXPORT_SYMBOL_GPL(rsa_parse_pub_key); int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len) { - int ret; - - free_mpis(rsa_key); - ret = asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len); - if (ret < 0) - goto error; - - return 0; -error: - free_mpis(rsa_key); - return ret; + return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len); } EXPORT_SYMBOL_GPL(rsa_parse_priv_key); diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index c7585bdecbc2..d6c042a2ee52 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -12,12 +12,24 @@ */ #ifndef _RSA_HELPER_ #define _RSA_HELPER_ -#include +#include +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + */ struct rsa_key { - MPI n; - MPI e; - MPI d; + const u8 *n; + const u8 *e; + const u8 *d; + size_t n_sz; + size_t e_sz; + size_t d_sz; }; int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, @@ -26,7 +38,5 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); -void rsa_free_key(struct rsa_key *rsa_key); - extern struct crypto_template rsa_pkcs1pad_tmpl; #endif -- cgit v1.2.3-70-g09d2 From 88f1d316b9d0a213a9590fb5da6a72c0ec7012de Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 15 Jun 2016 19:13:25 +0800 Subject: crypto: drbg - fix semicolon.cocci warnings crypto/drbg.c:1637:39-40: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci CC: Stephan Mueller Signed-off-by: Fengguang Wu Acked-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index ded86385ab6e..8b39f509f0a6 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1632,7 +1632,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) drbg->ctr_handle = NULL; if (drbg->ctr_req) - skcipher_request_free(drbg->ctr_req);; + skcipher_request_free(drbg->ctr_req); drbg->ctr_req = NULL; kfree(drbg->ctr_null_value_buf); -- cgit v1.2.3-70-g09d2 From b30bdfa86431afbafe15284a3ad5ac19b49b88e3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jun 2016 22:27:05 +0800 Subject: crypto: gcm - Filter out async ghash if necessary As it is if you ask for a sync gcm you may actually end up with an async one because it does not filter out async implementations of ghash. This patch fixes this by adding the necessary filter when looking for ghash. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/gcm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index bec329b3de8d..d9ea5f9c0574 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, + algt->mask)); if (IS_ERR(ghash_alg)) return PTR_ERR(ghash_alg); -- cgit v1.2.3-70-g09d2 From 53964b9ee63b7075931b8df85307c449da564b50 Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Fri, 17 Jun 2016 10:30:35 +0530 Subject: crypto: sha3 - Add SHA-3 hash algorithm This patch adds the implementation of SHA3 algorithm in software and it's based on original implementation pushed in patch https://lwn.net/Articles/518415/ with additional changes to match the padding rules specified in SHA-3 specification. Signed-off-by: Jeff Garzik Signed-off-by: Raveendra Padasalagi Signed-off-by: Herbert Xu --- crypto/Kconfig | 10 ++ crypto/Makefile | 1 + crypto/sha3_generic.c | 300 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/sha3.h | 29 +++++ 4 files changed, 340 insertions(+) create mode 100644 crypto/sha3_generic.c create mode 100644 include/crypto/sha3.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index c903f1832f2c..6881d1a5f859 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -750,6 +750,16 @@ config CRYPTO_SHA512_SPARC64 SHA-512 secure hash standard (DFIPS 180-2) implemented using sparc64 crypto instructions, when available. +config CRYPTO_SHA3 + tristate "SHA3 digest algorithm" + select CRYPTO_HASH + help + SHA-3 secure hash standard (DFIPS 202). It's based on + cryptographic sponge function family called Keccak. + + References: + http://keccak.noekeon.org/ + config CRYPTO_TGR192 tristate "Tiger digest algorithms" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index 4f4ef7eaae3f..0b82c4753743 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o +obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c new file mode 100644 index 000000000000..62264397a2d2 --- /dev/null +++ b/crypto/sha3_generic.c @@ -0,0 +1,300 @@ +/* + * Cryptographic API. + * + * SHA-3, as specified in + * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * + * SHA-3 code by Jeff Garzik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option)• + * any later version. + * + */ +#include +#include +#include +#include +#include +#include + +#define KECCAK_ROUNDS 24 + +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) + +static const u64 keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 +}; + +static const int keccakf_rotc[24] = { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 +}; + +static const int keccakf_piln[24] = { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 +}; + +/* update the state with given number of rounds */ + +static void keccakf(u64 st[25]) +{ + int i, j, round; + u64 t, bc[5]; + + for (round = 0; round < KECCAK_ROUNDS; round++) { + + /* Theta */ + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] + ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + /* Rho Pi */ + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + /* Chi */ + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & + bc[(i + 2) % 5]; + } + + /* Iota */ + st[0] ^= keccakf_rndc[round]; + } +} + +static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz) +{ + memset(sctx, 0, sizeof(*sctx)); + sctx->md_len = digest_sz; + sctx->rsiz = 200 - 2 * digest_sz; + sctx->rsizw = sctx->rsiz / 8; +} + +static int sha3_224_init(struct shash_desc *desc) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + + sha3_init(sctx, SHA3_224_DIGEST_SIZE); + return 0; +} + +static int sha3_256_init(struct shash_desc *desc) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + + sha3_init(sctx, SHA3_256_DIGEST_SIZE); + return 0; +} + +static int sha3_384_init(struct shash_desc *desc) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + + sha3_init(sctx, SHA3_384_DIGEST_SIZE); + return 0; +} + +static int sha3_512_init(struct shash_desc *desc) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + + sha3_init(sctx, SHA3_512_DIGEST_SIZE); + return 0; +} + +static int sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + unsigned int done; + const u8 *src; + + done = 0; + src = data; + + if ((sctx->partial + len) > (sctx->rsiz - 1)) { + if (sctx->partial) { + done = -sctx->partial; + memcpy(sctx->buf + sctx->partial, data, + done + sctx->rsiz); + src = sctx->buf; + } + + do { + unsigned int i; + + for (i = 0; i < sctx->rsizw; i++) + sctx->st[i] ^= ((u64 *) src)[i]; + keccakf(sctx->st); + + done += sctx->rsiz; + src = data + done; + } while (done + (sctx->rsiz - 1) < len); + + sctx->partial = 0; + } + memcpy(sctx->buf + sctx->partial, src, len - done); + sctx->partial += (len - done); + + return 0; +} + +static int sha3_final(struct shash_desc *desc, u8 *out) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + unsigned int i, inlen = sctx->partial; + + sctx->buf[inlen++] = 0x06; + memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); + sctx->buf[sctx->rsiz - 1] |= 0x80; + + for (i = 0; i < sctx->rsizw; i++) + sctx->st[i] ^= ((u64 *) sctx->buf)[i]; + + keccakf(sctx->st); + + for (i = 0; i < sctx->rsizw; i++) + sctx->st[i] = cpu_to_le64(sctx->st[i]); + + memcpy(out, sctx->st, sctx->md_len); + + memset(sctx, 0, sizeof(*sctx)); + return 0; +} + +static struct shash_alg sha3_224 = { + .digestsize = SHA3_224_DIGEST_SIZE, + .init = sha3_224_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base = { + .cra_name = "sha3-224", + .cra_driver_name = "sha3-224-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha3_256 = { + .digestsize = SHA3_256_DIGEST_SIZE, + .init = sha3_256_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base = { + .cra_name = "sha3-256", + .cra_driver_name = "sha3-256-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha3_384 = { + .digestsize = SHA3_384_DIGEST_SIZE, + .init = sha3_384_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "sha3-384-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha3_512 = { + .digestsize = SHA3_512_DIGEST_SIZE, + .init = sha3_512_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base = { + .cra_name = "sha3-512", + .cra_driver_name = "sha3-512-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init sha3_generic_mod_init(void) +{ + int ret; + + ret = crypto_register_shash(&sha3_224); + if (ret < 0) + goto err_out; + ret = crypto_register_shash(&sha3_256); + if (ret < 0) + goto err_out_224; + ret = crypto_register_shash(&sha3_384); + if (ret < 0) + goto err_out_256; + ret = crypto_register_shash(&sha3_512); + if (ret < 0) + goto err_out_384; + + return 0; + +err_out_384: + crypto_unregister_shash(&sha3_384); +err_out_256: + crypto_unregister_shash(&sha3_256); +err_out_224: + crypto_unregister_shash(&sha3_224); +err_out: + return ret; +} + +static void __exit sha3_generic_mod_fini(void) +{ + crypto_unregister_shash(&sha3_224); + crypto_unregister_shash(&sha3_256); + crypto_unregister_shash(&sha3_384); + crypto_unregister_shash(&sha3_512); +} + +module_init(sha3_generic_mod_init); +module_exit(sha3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-224-generic"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-256-generic"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-384-generic"); +MODULE_ALIAS_CRYPTO("sha3-512"); +MODULE_ALIAS_CRYPTO("sha3-512-generic"); diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000000..f4c9f68f5ffe --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,29 @@ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int md_len; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +#endif -- cgit v1.2.3-70-g09d2 From 79cc6ab8947bd238b64afddc56ed84ee65f012ef Mon Sep 17 00:00:00 2001 From: raveendra padasalagi Date: Fri, 17 Jun 2016 10:30:36 +0530 Subject: crypto: sha3 - Add SHA-3 Test's in tcrypt Added support for SHA-3 algorithm test's in tcrypt module and related test vectors. Signed-off-by: Raveendra Padasalagi Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 53 ++++++++++++++++++++++- crypto/testmgr.c | 40 ++++++++++++++++++ crypto/testmgr.h | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 217 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 579dce071463..4675459e82da 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -72,7 +72,8 @@ static char *check[] = { "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", - "lzo", "cts", "zlib", NULL + "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512", + NULL }; struct tcrypt_result { @@ -1284,6 +1285,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) ret += tcrypt_test("crct10dif"); break; + case 48: + ret += tcrypt_test("sha3-224"); + break; + + case 49: + ret += tcrypt_test("sha3-256"); + break; + + case 50: + ret += tcrypt_test("sha3-384"); + break; + + case 51: + ret += tcrypt_test("sha3-512"); + break; + case 100: ret += tcrypt_test("hmac(md5)"); break; @@ -1691,6 +1708,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_hash_speed("poly1305", sec, poly1305_speed_template); if (mode > 300 && mode < 400) break; + case 322: + test_hash_speed("sha3-224", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + + case 323: + test_hash_speed("sha3-256", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + + case 324: + test_hash_speed("sha3-384", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + + case 325: + test_hash_speed("sha3-512", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; @@ -1770,6 +1803,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_ahash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; + case 418: + test_ahash_speed("sha3-224", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 419: + test_ahash_speed("sha3-256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 420: + test_ahash_speed("sha3-384", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + + case 421: + test_ahash_speed("sha3-512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 499: break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c727fb0cb021..b773a563809b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3658,6 +3658,46 @@ static const struct alg_test_desc alg_test_descs[] = { .count = SHA256_TEST_VECTORS } } + }, { + .alg = "sha3-224", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = sha3_224_tv_template, + .count = SHA3_224_TEST_VECTORS + } + } + }, { + .alg = "sha3-256", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = sha3_256_tv_template, + .count = SHA3_256_TEST_VECTORS + } + } + }, { + .alg = "sha3-384", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = sha3_384_tv_template, + .count = SHA3_384_TEST_VECTORS + } + } + }, { + .alg = "sha3-512", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = sha3_512_tv_template, + .count = SHA3_512_TEST_VECTORS + } + } }, { .alg = "sha384", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 487ec880e889..b70e3c92bedd 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -376,6 +376,131 @@ static struct hash_testvec md4_tv_template [] = { }, }; +#define SHA3_224_TEST_VECTORS 3 +static struct hash_testvec sha3_224_tv_template[] = { + { + .plaintext = "", + .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7" + "\x3b\x6e\x15\x45\x4f\x0e\xb1\xab" + "\xd4\x59\x7f\x9a\x1b\x07\x8e\x3f" + "\x5b\x5a\x6b\xc7", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x9e\x86\xff\x69\x55\x7c\xa9\x5f" + "\x40\x5f\x08\x12\x69\x68\x5b\x38" + "\xe3\xa8\x19\xb3\x09\xee\x94\x2f" + "\x48\x2b\x6a\x8b", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl" + "jklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x8a\x24\x10\x8b\x15\x4a\xda\x21" + "\xc9\xfd\x55\x74\x49\x44\x79\xba" + "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" + "\xd0\xfc\xce\x33", + }, +}; + +#define SHA3_256_TEST_VECTORS 3 +static struct hash_testvec sha3_256_tv_template[] = { + { + .plaintext = "", + .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" + "\x51\xc1\x47\x56\xa0\x61\xd6\x62" + "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa" + "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x80\x08\x4b\xf2\xfb\xa0\x24\x75" + "\x72\x6f\xeb\x2c\xab\x2d\x82\x15" + "\xea\xb1\x4b\xc6\xbd\xd8\xbf\xb2" + "\xc8\x15\x12\x57\x03\x2e\xcd\x8b", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl" + "jklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x41\xc0\xdb\xa2\xa9\xd6\x24\x08" + "\x49\x10\x03\x76\xa8\x23\x5e\x2c" + "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" + "\xdb\x32\xdd\x97\x49\x6d\x33\x76", + }, +}; + + +#define SHA3_384_TEST_VECTORS 3 +static struct hash_testvec sha3_384_tv_template[] = { + { + .plaintext = "", + .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" + "\x01\x10\x7d\x85\x2e\x4c\x24\x85" + "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61" + "\x99\x5e\x71\xbb\xee\x98\x3a\x2a" + "\xc3\x71\x38\x31\x26\x4a\xdb\x47" + "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x18\x15\xf7\x74\xf3\x20\x49\x1b" + "\x48\x56\x9e\xfe\xc7\x94\xd2\x49" + "\xee\xb5\x9a\xae\x46\xd2\x2b\xf7" + "\x7d\xaf\xe2\x5c\x5e\xdc\x28\xd7" + "\xea\x44\xf9\x3e\xe1\x23\x4a\xa8" + "\x8f\x61\xc9\x19\x12\xa4\xcc\xd9", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl" + "jklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x99\x1c\x66\x57\x55\xeb\x3a\x4b" + "\x6b\xbd\xfb\x75\xc7\x8a\x49\x2e" + "\x8c\x56\xa2\x2c\x5c\x4d\x7e\x42" + "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" + "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" + "\x9e\xef\x51\xac\xd0\x65\x7c\x22", + }, +}; + + +#define SHA3_512_TEST_VECTORS 3 +static struct hash_testvec sha3_512_tv_template[] = { + { + .plaintext = "", + .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" + "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e" + "\x97\xc9\x82\x16\x4f\xe2\x58\x59" + "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6" + "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c" + "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58" + "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3" + "\x01\x75\x85\x86\x28\x1d\xcd\x26", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x69\x7f\x2d\x85\x61\x72\xcb\x83" + "\x09\xd6\xb8\xb9\x7d\xac\x4d\xe3" + "\x44\xb5\x49\xd4\xde\xe6\x1e\xdf" + "\xb4\x96\x2d\x86\x98\xb7\xfa\x80" + "\x3f\x4f\x93\xff\x24\x39\x35\x86" + "\xe2\x8b\x5b\x95\x7a\xc3\xd1\xd3" + "\x69\x42\x0c\xe5\x33\x32\x71\x2f" + "\x99\x7b\xd3\x36\xd0\x9a\xb0\x2a", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl" + "jklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x04\xa3\x71\xe8\x4e\xcf\xb5\xb8" + "\xb7\x7c\xb4\x86\x10\xfc\xa8\x18" + "\x2d\xd4\x57\xce\x6f\x32\x6a\x0f" + "\xd3\xd7\xec\x2f\x1e\x91\x63\x6d" + "\xee\x69\x1f\xbe\x0c\x98\x53\x02" + "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" + "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" + "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", + }, +}; + + /* * MD5 test vectors from RFC1321 */ -- cgit v1.2.3-70-g09d2 From 01ac94580ac2f7076394212ca963da7409016796 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 17 Jun 2016 12:16:19 +0300 Subject: crypto: drbg - fix an error code in drbg_init_sym_kernel() We accidentally return PTR_ERR(NULL) which is success but we should return -ENOMEM. Fixes: 355912852115 ('crypto: drbg - use CTR AES instead of ECB AES') Signed-off-by: Dan Carpenter Acked-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/drbg.c b/crypto/drbg.c index 8b39f509f0a6..f752da3a7c75 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1686,7 +1686,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) if (!req) { pr_info("DRBG: could not allocate request queue\n"); drbg_fini_sym_kernel(drbg); - return PTR_ERR(req); + return -ENOMEM; } drbg->ctr_req = req; skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, -- cgit v1.2.3-70-g09d2 From fd2efd93b6fcd981263477298cf1544b46683378 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 23 Jun 2016 18:06:02 +0800 Subject: Revert "crypto: user - no parsing of CRYPTO_MSG_GETALG" This patch commit eed1e1afd8d542d9644534c1b712599b5d680007 as it is only a workaround for the real bug and the proper fix has now been applied as 055ddaace03580455a7b7dbea8e93d62acee61fc ("crypto: user - re-add size check for CRYPTO_MSG_GETALG"). Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index f71960dea882..43fe85f20d57 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -516,12 +516,10 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } - if (type != (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE)) { - err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, - CRYPTOCFGA_MAX, crypto_policy); - if (err < 0) - return err; - } + err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, + crypto_policy); + if (err < 0) + return err; if (link->doit == NULL) return -EINVAL; -- cgit v1.2.3-70-g09d2 From 81760ea6a95ad4c41273a71052f61b9f087b5753 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Jun 2016 16:55:13 +0800 Subject: crypto: cryptd - Add helpers to check whether a tfm is queued This patch adds helpers to check whether a given tfm is currently queued. This is meant to be used by ablk_helper and similar entities to ensure that no reordering is introduced because of requests queued in cryptd with respect to requests being processed in softirq context. The per-cpu queue length limit is also increased to 1000 in line with network limits. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 132 +++++++++++++++++++++++++++++++++++++++++------- include/crypto/cryptd.h | 5 ++ 2 files changed, 118 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 7921251cdb13..cf8037a87b2d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -31,7 +32,7 @@ #include #include -#define CRYPTD_MAX_CPU_QLEN 100 +#define CRYPTD_MAX_CPU_QLEN 1000 struct cryptd_cpu_queue { struct crypto_queue queue; @@ -58,6 +59,7 @@ struct aead_instance_ctx { }; struct cryptd_blkcipher_ctx { + atomic_t refcnt; struct crypto_blkcipher *child; }; @@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx { }; struct cryptd_hash_ctx { + atomic_t refcnt; struct crypto_shash *child; }; @@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx { }; struct cryptd_aead_ctx { + atomic_t refcnt; struct crypto_aead *child; }; @@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, { int cpu, err; struct cryptd_cpu_queue *cpu_queue; + struct crypto_tfm *tfm; + atomic_t *refcnt; + bool may_backlog; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); + + refcnt = crypto_tfm_ctx(request->tfm); + may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; + + if (err == -EBUSY && !may_backlog) + goto out_put_cpu; + queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); + + if (!atomic_read(refcnt)) + goto out_put_cpu; + + tfm = request->tfm; + atomic_inc(refcnt); + +out_put_cpu: put_cpu(); return err; @@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, unsigned int len)) { struct cryptd_blkcipher_request_ctx *rctx; + struct cryptd_blkcipher_ctx *ctx; + struct crypto_ablkcipher *tfm; struct blkcipher_desc desc; + int refcnt; rctx = ablkcipher_request_ctx(req); @@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, req->base.complete = rctx->complete; out: + tfm = crypto_ablkcipher_reqtfm(req); + ctx = crypto_ablkcipher_ctx(tfm); + refcnt = atomic_read(&ctx->refcnt); + local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); + + if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + crypto_free_ablkcipher(tfm); } static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) @@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req, return cryptd_enqueue_request(queue, &req->base); } +static void cryptd_hash_complete(struct ahash_request *req, int err) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + int refcnt = atomic_read(&ctx->refcnt); + + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); + + if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + crypto_free_ahash(tfm); +} + static void cryptd_hash_init(struct crypto_async_request *req_async, int err) { struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); @@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err) req->base.complete = rctx->complete; out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + cryptd_hash_complete(req, err); } static int cryptd_hash_init_enqueue(struct ahash_request *req) @@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err) req->base.complete = rctx->complete; out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + cryptd_hash_complete(req, err); } static int cryptd_hash_update_enqueue(struct ahash_request *req) @@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err) req->base.complete = rctx->complete; out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + cryptd_hash_complete(req, err); } static int cryptd_hash_final_enqueue(struct ahash_request *req) @@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) req->base.complete = rctx->complete; out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + cryptd_hash_complete(req, err); } static int cryptd_hash_finup_enqueue(struct ahash_request *req) @@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) req->base.complete = rctx->complete; out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); + cryptd_hash_complete(req, err); } static int cryptd_hash_digest_enqueue(struct ahash_request *req) @@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req, int (*crypt)(struct aead_request *req)) { struct cryptd_aead_request_ctx *rctx; + struct cryptd_aead_ctx *ctx; crypto_completion_t compl; + struct crypto_aead *tfm; + int refcnt; rctx = aead_request_ctx(req); compl = rctx->complete; @@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req, goto out; aead_request_set_tfm(req, child); err = crypt( req ); + out: + tfm = crypto_aead_reqtfm(req); + ctx = crypto_aead_ctx(tfm); + refcnt = atomic_read(&ctx->refcnt); + local_bh_disable(); compl(&req->base, err); local_bh_enable(); + + if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + crypto_free_aead(tfm); } static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) @@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask) { char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct cryptd_blkcipher_ctx *ctx; struct crypto_tfm *tfm; if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, @@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, return ERR_PTR(-EINVAL); } + ctx = crypto_tfm_ctx(tfm); + atomic_set(&ctx->refcnt, 1); + return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); } EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); @@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) } EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); +bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); + + return atomic_read(&ctx->refcnt) - 1; +} +EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); + void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) { - crypto_free_ablkcipher(&tfm->base); + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); + + if (atomic_dec_and_test(&ctx->refcnt)) + crypto_free_ablkcipher(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); @@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask) { char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct cryptd_hash_ctx *ctx; struct crypto_ahash *tfm; if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, @@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, return ERR_PTR(-EINVAL); } + ctx = crypto_ahash_ctx(tfm); + atomic_set(&ctx->refcnt, 1); + return __cryptd_ahash_cast(tfm); } EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); @@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req) } EXPORT_SYMBOL_GPL(cryptd_shash_desc); +bool cryptd_ahash_queued(struct cryptd_ahash *tfm) +{ + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); + + return atomic_read(&ctx->refcnt) - 1; +} +EXPORT_SYMBOL_GPL(cryptd_ahash_queued); + void cryptd_free_ahash(struct cryptd_ahash *tfm) { - crypto_free_ahash(&tfm->base); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); + + if (atomic_dec_and_test(&ctx->refcnt)) + crypto_free_ahash(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_ahash); @@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask) { char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct cryptd_aead_ctx *ctx; struct crypto_aead *tfm; if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, @@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, crypto_free_aead(tfm); return ERR_PTR(-EINVAL); } + + ctx = crypto_aead_ctx(tfm); + atomic_set(&ctx->refcnt, 1); + return __cryptd_aead_cast(tfm); } EXPORT_SYMBOL_GPL(cryptd_alloc_aead); @@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) } EXPORT_SYMBOL_GPL(cryptd_aead_child); +bool cryptd_aead_queued(struct cryptd_aead *tfm) +{ + struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); + + return atomic_read(&ctx->refcnt) - 1; +} +EXPORT_SYMBOL_GPL(cryptd_aead_queued); + void cryptd_free_aead(struct cryptd_aead *tfm) { - crypto_free_aead(&tfm->base); + struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); + + if (atomic_dec_and_test(&ctx->refcnt)) + crypto_free_aead(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_aead); diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1547f540c920..bc792d5a9e88 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); +bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_ahash { @@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { @@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm); -- cgit v1.2.3-70-g09d2 From 88407a39b5169889fe95a9355891b98437aa3e50 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Jun 2016 16:55:15 +0800 Subject: crypto: ablk_helper - Fix cryptd reordering This patch fixes an old bug where requests can be reordered because some are processed by cryptd while others are processed directly in softirq context. The fix is to always postpone to cryptd if there are currently requests outstanding from the same tfm. Signed-off-by: Herbert Xu --- crypto/ablk_helper.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c index e1fcf53bb931..1441f07d0a19 100644 --- a/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c @@ -71,7 +71,8 @@ int ablk_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (!may_use_simd()) { + if (!may_use_simd() || + (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); @@ -90,7 +91,8 @@ int ablk_decrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (!may_use_simd()) { + if (!may_use_simd() || + (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); -- cgit v1.2.3-70-g09d2 From 331bf739c4f9992a73547d20bd8f2378b97d386a Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Tue, 21 Jun 2016 18:21:46 -0700 Subject: crypto: sha1-mb - async implementation for sha1-mb Herbert wants the sha1-mb algorithm to have an async implementation: https://lkml.org/lkml/2016/4/5/286. Currently, sha1-mb uses an async interface for the outer algorithm and a sync interface for the inner algorithm. This patch introduces a async interface for even the inner algorithm. Signed-off-by: Megha Dey Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- arch/x86/crypto/sha-mb/sha1_mb.c | 182 ++++++++++++++++++++++----------------- crypto/mcryptd.c | 132 ++++++++++++---------------- include/crypto/internal/hash.h | 12 +-- include/crypto/mcryptd.h | 8 +- 4 files changed, 165 insertions(+), 169 deletions(-) (limited to 'crypto') diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c index 0a464919542c..669cc37268e1 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb.c +++ b/arch/x86/crypto/sha-mb/sha1_mb.c @@ -80,10 +80,10 @@ struct sha1_mb_ctx { static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) { - struct shash_desc *desc; + struct ahash_request *areq; - desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); - return container_of(desc, struct mcryptd_hash_request_ctx, desc); + areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); + return container_of(areq, struct mcryptd_hash_request_ctx, areq); } static inline struct ahash_request @@ -93,7 +93,7 @@ static inline struct ahash_request } static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct shash_desc *desc) + struct ahash_request *areq) { rctx->flag = HASH_UPDATE; } @@ -375,9 +375,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) } } -static int sha1_mb_init(struct shash_desc *desc) +static int sha1_mb_init(struct ahash_request *areq) { - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); hash_ctx_init(sctx); sctx->job.result_digest[0] = SHA1_H0; @@ -395,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc) static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) { int i; - struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); + struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); __be32 *dst = (__be32 *) rctx->out; for (i = 0; i < 5; ++i) @@ -427,7 +427,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, } sha_ctx = (struct sha1_hash_ctx *) - shash_desc_ctx(&rctx->desc); + ahash_request_ctx(&rctx->areq); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); @@ -519,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, mcryptd_arm_flusher(cstate, delay); } -static int sha1_mb_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha1_mb_update(struct ahash_request *areq) { struct mcryptd_hash_request_ctx *rctx = - container_of(desc, struct mcryptd_hash_request_ctx, desc); + container_of(areq, struct mcryptd_hash_request_ctx, areq); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); @@ -539,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data, } /* need to init context */ - req_ctx_init(rctx, desc); + req_ctx_init(rctx, areq); nbytes = crypto_ahash_walk_first(req, &rctx->walk); @@ -552,7 +551,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data, rctx->flag |= HASH_DONE; /* submit */ - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, @@ -579,11 +578,10 @@ done: return ret; } -static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int sha1_mb_finup(struct ahash_request *areq) { struct mcryptd_hash_request_ctx *rctx = - container_of(desc, struct mcryptd_hash_request_ctx, desc); + container_of(areq, struct mcryptd_hash_request_ctx, areq); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); @@ -598,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, } /* need to init context */ - req_ctx_init(rctx, desc); + req_ctx_init(rctx, areq); nbytes = crypto_ahash_walk_first(req, &rctx->walk); @@ -611,11 +609,10 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, rctx->flag |= HASH_DONE; flag = HASH_LAST; } - rctx->out = out; /* submit */ rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); @@ -641,10 +638,10 @@ done: return ret; } -static int sha1_mb_final(struct shash_desc *desc, u8 *out) +static int sha1_mb_final(struct ahash_request *areq) { struct mcryptd_hash_request_ctx *rctx = - container_of(desc, struct mcryptd_hash_request_ctx, desc); + container_of(areq, struct mcryptd_hash_request_ctx, areq); struct mcryptd_alg_cstate *cstate = this_cpu_ptr(sha1_mb_alg_state.alg_cstate); @@ -659,12 +656,11 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out) } /* need to init context */ - req_ctx_init(rctx, desc); + req_ctx_init(rctx, areq); - rctx->out = out; rctx->flag |= HASH_DONE | HASH_FINAL; - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); /* flag HASH_FINAL and 0 data size */ sha1_mb_add_list(rctx, cstate); kernel_fpu_begin(); @@ -691,48 +687,98 @@ done: return ret; } -static int sha1_mb_export(struct shash_desc *desc, void *out) +static int sha1_mb_export(struct ahash_request *areq, void *out) { - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); memcpy(out, sctx, sizeof(*sctx)); return 0; } -static int sha1_mb_import(struct shash_desc *desc, const void *in) +static int sha1_mb_import(struct ahash_request *areq, const void *in) { - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); memcpy(sctx, in, sizeof(*sctx)); return 0; } +static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) +{ + struct mcryptd_ahash *mcryptd_tfm; + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); + struct mcryptd_hash_ctx *mctx; -static struct shash_alg sha1_mb_shash_alg = { - .digestsize = SHA1_DIGEST_SIZE, + mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", + CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(mcryptd_tfm)) + return PTR_ERR(mcryptd_tfm); + mctx = crypto_ahash_ctx(&mcryptd_tfm->base); + mctx->alg_state = &sha1_mb_alg_state; + ctx->mcryptd_tfm = mcryptd_tfm; + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct ahash_request) + + crypto_ahash_reqsize(&mcryptd_tfm->base)); + + return 0; +} + +static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) +{ + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); + + mcryptd_free_ahash(ctx->mcryptd_tfm); +} + +static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct ahash_request) + + sizeof(struct sha1_hash_ctx)); + + return 0; +} + +static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm) +{ + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); + + mcryptd_free_ahash(ctx->mcryptd_tfm); +} + +static struct ahash_alg sha1_mb_areq_alg = { .init = sha1_mb_init, .update = sha1_mb_update, .final = sha1_mb_final, .finup = sha1_mb_finup, .export = sha1_mb_export, .import = sha1_mb_import, - .descsize = sizeof(struct sha1_hash_ctx), - .statesize = sizeof(struct sha1_hash_ctx), - .base = { - .cra_name = "__sha1-mb", - .cra_driver_name = "__intel_sha1-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread sleep - */ - .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_hash_ctx), + .base = { + .cra_name = "__sha1-mb", + .cra_driver_name = "__intel_sha1-mb", + .cra_priority = 100, + /* + * use ASYNC flag as some buffers in multi-buffer + * algo may not have completed before hashing thread + * sleep + */ + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_INTERNAL, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT + (sha1_mb_areq_alg.halg.base.cra_list), + .cra_init = sha1_mb_areq_init_tfm, + .cra_exit = sha1_mb_areq_exit_tfm, + .cra_ctxsize = sizeof(struct sha1_hash_ctx), + } } }; @@ -817,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm); + struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); struct mcryptd_hash_request_ctx *rctx; - struct shash_desc *desc; + struct ahash_request *areq; memcpy(mcryptd_req, req, sizeof(*req)); ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); rctx = ahash_request_ctx(mcryptd_req); - desc = &rctx->desc; - desc->tfm = child; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - return crypto_ahash_import(mcryptd_req, in); -} - -static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; + areq = &rctx->areq; - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha1_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); + ahash_request_set_tfm(areq, child); + ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, + rctx->complete, req); - mcryptd_free_ahash(ctx->mcryptd_tfm); + return crypto_ahash_import(mcryptd_req, in); } static struct ahash_alg sha1_mb_async_alg = { @@ -965,7 +985,7 @@ static int __init sha1_mb_mod_init(void) } sha1_mb_alg_state.flusher = &sha1_mb_flusher; - err = crypto_register_shash(&sha1_mb_shash_alg); + err = crypto_register_ahash(&sha1_mb_areq_alg); if (err) goto err2; err = crypto_register_ahash(&sha1_mb_async_alg); @@ -975,7 +995,7 @@ static int __init sha1_mb_mod_init(void) return 0; err1: - crypto_unregister_shash(&sha1_mb_shash_alg); + crypto_unregister_ahash(&sha1_mb_areq_alg); err2: for_each_possible_cpu(cpu) { cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); @@ -991,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void) struct mcryptd_alg_cstate *cpu_state; crypto_unregister_ahash(&sha1_mb_async_alg); - crypto_unregister_shash(&sha1_mb_shash_alg); + crypto_unregister_ahash(&sha1_mb_areq_alg); for_each_possible_cpu(cpu) { cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); kfree(cpu_state->mgr); diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index c4eb9da49d4f..86fb59b109a9 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -41,7 +41,7 @@ struct mcryptd_flush_list { static struct mcryptd_flush_list __percpu *mcryptd_flist; struct hashd_instance_ctx { - struct crypto_shash_spawn spawn; + struct crypto_ahash_spawn spawn; struct mcryptd_queue *queue; }; @@ -272,18 +272,18 @@ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); - struct crypto_shash_spawn *spawn = &ictx->spawn; + struct crypto_ahash_spawn *spawn = &ictx->spawn; struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_shash *hash; + struct crypto_ahash *hash; - hash = crypto_spawn_shash(spawn); + hash = crypto_spawn_ahash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); ctx->child = hash; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct mcryptd_hash_request_ctx) + - crypto_shash_descsize(hash)); + crypto_ahash_reqsize(hash)); return 0; } @@ -291,21 +291,21 @@ static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_shash(ctx->child); + crypto_free_ahash(ctx->child); } static int mcryptd_hash_setkey(struct crypto_ahash *parent, const u8 *key, unsigned int keylen) { struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); - struct crypto_shash *child = ctx->child; + struct crypto_ahash *child = ctx->child; int err; - crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & + crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - err = crypto_shash_setkey(child, key, keylen); - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & + err = crypto_ahash_setkey(child, key, keylen); + crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } @@ -331,20 +331,20 @@ static int mcryptd_hash_enqueue(struct ahash_request *req, static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_shash *child = ctx->child; + struct crypto_ahash *child = ctx->child; struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct shash_desc *desc = &rctx->desc; + struct ahash_request *desc = &rctx->areq; if (unlikely(err == -EINPROGRESS)) goto out; - desc->tfm = child; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_tfm(desc, child); + ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, + rctx->complete, req_async); - err = crypto_shash_init(desc); - - req->base.complete = rctx->complete; + rctx->out = req->result; + err = crypto_ahash_init(desc); out: local_bh_disable(); @@ -365,7 +365,8 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) if (unlikely(err == -EINPROGRESS)) goto out; - err = shash_ahash_mcryptd_update(req, &rctx->desc); + rctx->out = req->result; + err = ahash_mcryptd_update(&rctx->areq); if (err) { req->base.complete = rctx->complete; goto out; @@ -391,7 +392,8 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) if (unlikely(err == -EINPROGRESS)) goto out; - err = shash_ahash_mcryptd_final(req, &rctx->desc); + rctx->out = req->result; + err = ahash_mcryptd_final(&rctx->areq); if (err) { req->base.complete = rctx->complete; goto out; @@ -416,8 +418,8 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) if (unlikely(err == -EINPROGRESS)) goto out; - - err = shash_ahash_mcryptd_finup(req, &rctx->desc); + rctx->out = req->result; + err = ahash_mcryptd_finup(&rctx->areq); if (err) { req->base.complete = rctx->complete; @@ -439,25 +441,21 @@ static int mcryptd_hash_finup_enqueue(struct ahash_request *req) static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) { struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_shash *child = ctx->child; + struct crypto_ahash *child = ctx->child; struct ahash_request *req = ahash_request_cast(req_async); struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct shash_desc *desc = &rctx->desc; + struct ahash_request *desc = &rctx->areq; if (unlikely(err == -EINPROGRESS)) goto out; - desc->tfm = child; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ - - err = shash_ahash_mcryptd_digest(req, desc); + ahash_request_set_tfm(desc, child); + ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, + rctx->complete, req_async); - if (err) { - req->base.complete = rctx->complete; - goto out; - } + rctx->out = req->result; + err = ahash_mcryptd_digest(desc); - return; out: local_bh_disable(); rctx->complete(&req->base, err); @@ -473,14 +471,14 @@ static int mcryptd_hash_export(struct ahash_request *req, void *out) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - return crypto_shash_export(&rctx->desc, out); + return crypto_ahash_export(&rctx->areq, out); } static int mcryptd_hash_import(struct ahash_request *req, const void *in) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - return crypto_shash_import(&rctx->desc, in); + return crypto_ahash_import(&rctx->areq, in); } static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, @@ -488,7 +486,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; - struct shash_alg *salg; + struct hash_alg_common *halg; struct crypto_alg *alg; u32 type = 0; u32 mask = 0; @@ -496,11 +494,11 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, mcryptd_check_internal(tb, &type, &mask); - salg = shash_attr_alg(tb[1], type, mask); - if (IS_ERR(salg)) - return PTR_ERR(salg); + halg = ahash_attr_alg(tb[1], type, mask); + if (IS_ERR(halg)) + return PTR_ERR(halg); - alg = &salg->base; + alg = &halg->base; pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), sizeof(*ctx)); @@ -511,7 +509,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, ctx = ahash_instance_ctx(inst); ctx->queue = queue; - err = crypto_init_shash_spawn(&ctx->spawn, salg, + err = crypto_init_ahash_spawn(&ctx->spawn, halg, ahash_crypto_instance(inst)); if (err) goto out_free_inst; @@ -521,8 +519,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, type |= CRYPTO_ALG_INTERNAL; inst->alg.halg.base.cra_flags = type; - inst->alg.halg.digestsize = salg->digestsize; - inst->alg.halg.statesize = salg->statesize; + inst->alg.halg.digestsize = halg->digestsize; + inst->alg.halg.statesize = halg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; @@ -539,7 +537,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, err = ahash_register_instance(tmpl, inst); if (err) { - crypto_drop_shash(&ctx->spawn); + crypto_drop_ahash(&ctx->spawn); out_free_inst: kfree(inst); } @@ -575,7 +573,7 @@ static void mcryptd_free(struct crypto_instance *inst) switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AHASH: - crypto_drop_shash(&hctx->spawn); + crypto_drop_ahash(&hctx->spawn); kfree(ahash_instance(inst)); return; default: @@ -612,55 +610,38 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, } EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); -int shash_ahash_mcryptd_digest(struct ahash_request *req, - struct shash_desc *desc) +int ahash_mcryptd_digest(struct ahash_request *desc) { int err; - err = crypto_shash_init(desc) ?: - shash_ahash_mcryptd_finup(req, desc); + err = crypto_ahash_init(desc) ?: + ahash_mcryptd_finup(desc); return err; } -EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); -int shash_ahash_mcryptd_update(struct ahash_request *req, - struct shash_desc *desc) +int ahash_mcryptd_update(struct ahash_request *desc) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - /* alignment is to be done by multi-buffer crypto algorithm if needed */ - return shash->update(desc, NULL, 0); + return crypto_ahash_update(desc); } -EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); -int shash_ahash_mcryptd_finup(struct ahash_request *req, - struct shash_desc *desc) +int ahash_mcryptd_finup(struct ahash_request *desc) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - /* alignment is to be done by multi-buffer crypto algorithm if needed */ - return shash->finup(desc, NULL, 0, req->result); + return crypto_ahash_finup(desc); } -EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); -int shash_ahash_mcryptd_final(struct ahash_request *req, - struct shash_desc *desc) +int ahash_mcryptd_final(struct ahash_request *desc) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - /* alignment is to be done by multi-buffer crypto algorithm if needed */ - return shash->final(desc, req->result); + return crypto_ahash_final(desc); } -EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); -struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) +struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) { struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); @@ -668,12 +649,12 @@ struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) } EXPORT_SYMBOL_GPL(mcryptd_ahash_child); -struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) +struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) { struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - return &rctx->desc; + return &rctx->areq; } -EXPORT_SYMBOL_GPL(mcryptd_shash_desc); +EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); void mcryptd_free_ahash(struct mcryptd_ahash *tfm) { @@ -681,7 +662,6 @@ void mcryptd_free_ahash(struct mcryptd_ahash *tfm) } EXPORT_SYMBOL_GPL(mcryptd_free_ahash); - static int __init mcryptd_init(void) { int err, cpu; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 49dae16f8929..1d4f365d8f03 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -114,14 +114,10 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); -int shash_ahash_mcryptd_update(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_final(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_finup(struct ahash_request *req, - struct shash_desc *desc); -int shash_ahash_mcryptd_digest(struct ahash_request *req, - struct shash_desc *desc); +int ahash_mcryptd_update(struct ahash_request *desc); +int ahash_mcryptd_final(struct ahash_request *desc); +int ahash_mcryptd_finup(struct ahash_request *desc); +int ahash_mcryptd_digest(struct ahash_request *desc); int crypto_init_shash_ops_async(struct crypto_tfm *tfm); diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index c23ee1f7ee80..4a53c0d38cd2 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h @@ -39,7 +39,7 @@ struct mcryptd_instance_ctx { }; struct mcryptd_hash_ctx { - struct crypto_shash *child; + struct crypto_ahash *child; struct mcryptd_alg_state *alg_state; }; @@ -59,13 +59,13 @@ struct mcryptd_hash_request_ctx { struct crypto_hash_walk walk; u8 *out; int flag; - struct shash_desc desc; + struct ahash_request areq; }; struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); -struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); -struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); +struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); +struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); void mcryptd_free_ahash(struct mcryptd_ahash *tfm); void mcryptd_flusher(struct work_struct *work); -- cgit v1.2.3-70-g09d2 From 4e5f2c400765e3a3ce512dc1ae890bac53401798 Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Wed, 22 Jun 2016 17:49:13 +0100 Subject: crypto: kpp - Key-agreement Protocol Primitives API (KPP) Add key-agreement protocol primitives (kpp) API which allows to implement primitives required by protocols such as DH and ECDH. The API is composed mainly by the following functions * set_secret() - It allows the user to set his secret, also referred to as his private key, along with the parameters known to both parties involved in the key-agreement session. * generate_public_key() - It generates the public key to be sent to the other counterpart involved in the key-agreement session. The function has to be called after set_params() and set_secret() * generate_secret() - It generates the shared secret for the session Other functions such as init() and exit() are provided for allowing cryptographic hardware to be inizialized properly before use Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/Kconfig | 10 ++ crypto/Makefile | 1 + crypto/crypto_user.c | 20 +++ crypto/kpp.c | 123 +++++++++++++++ include/crypto/internal/kpp.h | 64 ++++++++ include/crypto/kpp.h | 328 ++++++++++++++++++++++++++++++++++++++++ include/linux/crypto.h | 1 + include/uapi/linux/cryptouser.h | 5 + 8 files changed, 552 insertions(+) create mode 100644 crypto/kpp.c create mode 100644 include/crypto/internal/kpp.h create mode 100644 include/crypto/kpp.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 6881d1a5f859..e72c4270173d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER select CRYPTO_AKCIPHER2 select CRYPTO_ALGAPI +config CRYPTO_KPP2 + tristate + select CRYPTO_ALGAPI2 + +config CRYPTO_KPP + tristate + select CRYPTO_ALGAPI + select CRYPTO_KPP2 + config CRYPTO_RSA tristate "RSA algorithm" select CRYPTO_AKCIPHER @@ -115,6 +124,7 @@ config CRYPTO_MANAGER2 select CRYPTO_HASH2 select CRYPTO_BLKCIPHER2 select CRYPTO_AKCIPHER2 + select CRYPTO_KPP2 config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" diff --git a/crypto/Makefile b/crypto/Makefile index 0b82c4753743..07b0f51bd645 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -30,6 +30,7 @@ crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o +obj-$(CONFIG_CRYPTO_KPP2) += kpp.o $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 43fe85f20d57..d28513fb5a90 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "internal.h" @@ -126,6 +127,21 @@ nla_put_failure: return -EMSGSIZE; } +static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_kpp rkpp; + + strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, + sizeof(struct crypto_report_kpp), &rkpp)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -176,6 +192,10 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; + case CRYPTO_ALG_TYPE_KPP: + if (crypto_report_kpp(skb, alg)) + goto nla_put_failure; + break; } out: diff --git a/crypto/kpp.c b/crypto/kpp.c new file mode 100644 index 000000000000..d36ce05eee43 --- /dev/null +++ b/crypto/kpp.c @@ -0,0 +1,123 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +#ifdef CONFIG_NET +static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_kpp rkpp; + + strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, + sizeof(struct crypto_report_kpp), &rkpp)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); + +static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_puts(m, "type : kpp\n"); +} + +static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm); + struct kpp_alg *alg = crypto_kpp_alg(kpp); + + alg->exit(kpp); +} + +static int crypto_kpp_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm); + struct kpp_alg *alg = crypto_kpp_alg(kpp); + + if (alg->exit) + kpp->base.exit = crypto_kpp_exit_tfm; + + if (alg->init) + return alg->init(kpp); + + return 0; +} + +static const struct crypto_type crypto_kpp_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_kpp_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_kpp_show, +#endif + .report = crypto_kpp_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_KPP, + .tfmsize = offsetof(struct crypto_kpp, base), +}; + +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_kpp); + +static void kpp_prepare_alg(struct kpp_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + base->cra_type = &crypto_kpp_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_KPP; +} + +int crypto_register_kpp(struct kpp_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + kpp_prepare_alg(alg); + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_kpp); + +void crypto_unregister_kpp(struct kpp_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_kpp); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Key-agreement Protocol Primitives"); diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000000..ad3acf3649be --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,64 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include +#include + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000000..4fa897f3366b --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,328 @@ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode thie buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitevs API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is requeried for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kzfree(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer, + unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->set_secret(tfm, buffer, len); +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->generate_public_key(req); +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->compute_shared_secret(req); +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * Return: minimum len for output buffer or error code if key hasn't been set + */ +static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d844cbc365f7..992cfc2e5df1 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -48,6 +48,7 @@ #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 +#define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 2e67bb64c1da..79b5ded2001a 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -45,6 +45,7 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ + CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -107,5 +108,9 @@ struct crypto_report_akcipher { char type[CRYPTO_MAX_NAME]; }; +struct crypto_report_kpp { + char type[CRYPTO_MAX_NAME]; +}; + #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ sizeof(struct crypto_report_blkcipher)) -- cgit v1.2.3-70-g09d2 From 802c7f1c84e4b5a6ac78635878041023fc5831b1 Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Wed, 22 Jun 2016 17:49:14 +0100 Subject: crypto: dh - Add DH software implementation * Implement MPI based Diffie-Hellman under kpp API * Test provided uses data generad by OpenSSL Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 ++ crypto/Makefile | 4 + crypto/dh.c | 189 ++++++++++++++++++++++++++++++++++++++++++ crypto/dh_helper.c | 95 +++++++++++++++++++++ crypto/testmgr.c | 144 ++++++++++++++++++++++++++++++++ crypto/testmgr.h | 230 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/dh.h | 29 +++++++ include/crypto/kpp.h | 1 + 8 files changed, 700 insertions(+) create mode 100644 crypto/dh.c create mode 100644 crypto/dh_helper.c create mode 100644 include/crypto/dh.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index e72c4270173d..162d2f9aa242 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -111,6 +111,14 @@ config CRYPTO_RSA help Generic implementation of the RSA public key algorithm. +config CRYPTO_DH + tristate "Diffie-Hellman algorithm" + select CRYPTO_KPP + select MPILIB + help + Generic implementation of the Diffie-Hellman algorithm. + + config CRYPTO_MANAGER tristate "Cryptographic algorithm manager" select CRYPTO_MANAGER2 diff --git a/crypto/Makefile b/crypto/Makefile index 07b0f51bd645..82897208e8e0 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -32,6 +32,10 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o obj-$(CONFIG_CRYPTO_KPP2) += kpp.o +dh_generic-y := dh.o +dh_generic-y += dh_helper.o +obj-$(CONFIG_CRYPTO_DH) += dh_generic.o + $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h clean-files += rsapubkey-asn1.c rsapubkey-asn1.h diff --git a/crypto/dh.c b/crypto/dh.c new file mode 100644 index 000000000000..5e960fe28681 --- /dev/null +++ b/crypto/dh.c @@ -0,0 +1,189 @@ +/* Diffie-Hellman Key Agreement Method [RFC2631] + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +struct dh_ctx { + MPI p; + MPI g; + MPI xa; +}; + +static inline void dh_clear_params(struct dh_ctx *ctx) +{ + mpi_free(ctx->p); + mpi_free(ctx->g); + ctx->p = NULL; + ctx->g = NULL; +} + +static void dh_free_ctx(struct dh_ctx *ctx) +{ + dh_clear_params(ctx); + mpi_free(ctx->xa); + ctx->xa = NULL; +} + +/* + * If base is g we compute the public key + * ya = g^xa mod p; [RFC2631 sec 2.1.1] + * else if base if the counterpart public key we compute the shared secret + * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + */ +static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val) +{ + /* val = base^xa mod p */ + return mpi_powm(val, base, ctx->xa, ctx->p); +} + +static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm) +{ + return kpp_tfm_ctx(tfm); +} + +static int dh_check_params_length(unsigned int p_len) +{ + return (p_len < 1536) ? -EINVAL : 0; +} + +static int dh_set_params(struct dh_ctx *ctx, struct dh *params) +{ + if (unlikely(!params->p || !params->g)) + return -EINVAL; + + if (dh_check_params_length(params->p_size << 3)) + return -EINVAL; + + ctx->p = mpi_read_raw_data(params->p, params->p_size); + if (!ctx->p) + return -EINVAL; + + ctx->g = mpi_read_raw_data(params->g, params->g_size); + if (!ctx->g) { + mpi_free(ctx->p); + return -EINVAL; + } + + return 0; +} + +static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len) +{ + struct dh_ctx *ctx = dh_get_ctx(tfm); + struct dh params; + + if (crypto_dh_decode_key(buf, len, ¶ms) < 0) + return -EINVAL; + + if (dh_set_params(ctx, ¶ms) < 0) + return -EINVAL; + + ctx->xa = mpi_read_raw_data(params.key, params.key_size); + if (!ctx->xa) { + dh_clear_params(ctx); + return -EINVAL; + } + + return 0; +} + +static int dh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct dh_ctx *ctx = dh_get_ctx(tfm); + MPI base, val = mpi_alloc(0); + int ret = 0; + int sign; + + if (!val) + return -ENOMEM; + + if (unlikely(!ctx->xa)) { + ret = -EINVAL; + goto err_free_val; + } + + if (req->src) { + base = mpi_read_raw_from_sgl(req->src, req->src_len); + if (!base) { + ret = EINVAL; + goto err_free_val; + } + } else { + base = ctx->g; + } + + ret = _compute_val(ctx, base, val); + if (ret) + goto err_free_base; + + ret = mpi_write_to_sgl(val, req->dst, &req->dst_len, &sign); + if (ret) + goto err_free_base; + + if (sign < 0) + ret = -EBADMSG; +err_free_base: + if (req->src) + mpi_free(base); +err_free_val: + mpi_free(val); + return ret; +} + +static int dh_max_size(struct crypto_kpp *tfm) +{ + struct dh_ctx *ctx = dh_get_ctx(tfm); + + return mpi_get_size(ctx->p); +} + +static void dh_exit_tfm(struct crypto_kpp *tfm) +{ + struct dh_ctx *ctx = dh_get_ctx(tfm); + + dh_free_ctx(ctx); +} + +static struct kpp_alg dh = { + .set_secret = dh_set_secret, + .generate_public_key = dh_compute_value, + .compute_shared_secret = dh_compute_value, + .max_size = dh_max_size, + .exit = dh_exit_tfm, + .base = { + .cra_name = "dh", + .cra_driver_name = "dh-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct dh_ctx), + }, +}; + +static int dh_init(void) +{ + return crypto_register_kpp(&dh); +} + +static void dh_exit(void) +{ + crypto_unregister_kpp(&dh); +} + +module_init(dh_init); +module_exit(dh_exit); +MODULE_ALIAS_CRYPTO("dh"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DH generic algorithm"); diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c new file mode 100644 index 000000000000..02db76b20d00 --- /dev/null +++ b/crypto/dh_helper.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include + +#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int)) + +static inline u8 *dh_pack_data(void *dst, const void *src, size_t size) +{ + memcpy(dst, src, size); + return dst + size; +} + +static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) +{ + memcpy(dst, src, size); + return src + size; +} + +static inline int dh_data_size(const struct dh *p) +{ + return p->key_size + p->p_size + p->g_size; +} + +int crypto_dh_key_len(const struct dh *p) +{ + return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p); +} +EXPORT_SYMBOL_GPL(crypto_dh_key_len); + +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) +{ + u8 *ptr = buf; + struct kpp_secret secret = { + .type = CRYPTO_KPP_SECRET_TYPE_DH, + .len = len + }; + + if (unlikely(!buf)) + return -EINVAL; + + if (len != crypto_dh_key_len(params)) + return -EINVAL; + + ptr = dh_pack_data(ptr, &secret, sizeof(secret)); + ptr = dh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); + ptr = dh_pack_data(ptr, ¶ms->p_size, sizeof(params->p_size)); + ptr = dh_pack_data(ptr, ¶ms->g_size, sizeof(params->g_size)); + ptr = dh_pack_data(ptr, params->key, params->key_size); + ptr = dh_pack_data(ptr, params->p, params->p_size); + dh_pack_data(ptr, params->g, params->g_size); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_dh_encode_key); + +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) +{ + const u8 *ptr = buf; + struct kpp_secret secret; + + if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE)) + return -EINVAL; + + ptr = dh_unpack_data(&secret, ptr, sizeof(secret)); + if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH) + return -EINVAL; + + ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); + ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size)); + ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size)); + if (secret.len != crypto_dh_key_len(params)) + return -EINVAL; + + /* Don't allocate memory. Set pointers to data within + * the given buffer + */ + params->key = (void *)ptr; + params->p = (void *)(ptr + params->key_size); + params->g = (void *)(ptr + params->key_size + params->p_size); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b773a563809b..ff79eb887fd0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "internal.h" @@ -120,6 +121,11 @@ struct akcipher_test_suite { unsigned int count; }; +struct kpp_test_suite { + struct kpp_testvec *vecs; + unsigned int count; +}; + struct alg_test_desc { const char *alg; int (*test)(const struct alg_test_desc *desc, const char *driver, @@ -134,6 +140,7 @@ struct alg_test_desc { struct cprng_test_suite cprng; struct drbg_test_suite drbg; struct akcipher_test_suite akcipher; + struct kpp_test_suite kpp; } suite; }; @@ -1777,6 +1784,133 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver, } +static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec, + const char *alg) +{ + struct kpp_request *req; + void *input_buf = NULL; + void *output_buf = NULL; + struct tcrypt_result result; + unsigned int out_len_max; + int err = -ENOMEM; + struct scatterlist src, dst; + + req = kpp_request_alloc(tfm, GFP_KERNEL); + if (!req) + return err; + + init_completion(&result.completion); + + err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); + if (err < 0) + goto free_req; + + out_len_max = crypto_kpp_maxsize(tfm); + output_buf = kzalloc(out_len_max, GFP_KERNEL); + if (!output_buf) { + err = -ENOMEM; + goto free_req; + } + + /* Use appropriate parameter as base */ + kpp_request_set_input(req, NULL, 0); + sg_init_one(&dst, output_buf, out_len_max); + kpp_request_set_output(req, &dst, out_len_max); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + + /* Compute public key */ + err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); + if (err) { + pr_err("alg: %s: generate public key test failed. err %d\n", + alg, err); + goto free_output; + } + /* Verify calculated public key */ + if (memcmp(vec->expected_a_public, sg_virt(req->dst), + vec->expected_a_public_size)) { + pr_err("alg: %s: generate public key test failed. Invalid output\n", + alg); + err = -EINVAL; + goto free_output; + } + + /* Calculate shared secret key by using counter part (b) public key. */ + input_buf = kzalloc(vec->b_public_size, GFP_KERNEL); + if (!input_buf) { + err = -ENOMEM; + goto free_output; + } + + memcpy(input_buf, vec->b_public, vec->b_public_size); + sg_init_one(&src, input_buf, vec->b_public_size); + sg_init_one(&dst, output_buf, out_len_max); + kpp_request_set_input(req, &src, vec->b_public_size); + kpp_request_set_output(req, &dst, out_len_max); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); + if (err) { + pr_err("alg: %s: compute shard secret test failed. err %d\n", + alg, err); + goto free_all; + } + /* + * verify shared secret from which the user will derive + * secret key by executing whatever hash it has chosen + */ + if (memcmp(vec->expected_ss, sg_virt(req->dst), + vec->expected_ss_size)) { + pr_err("alg: %s: compute shared secret test failed. Invalid output\n", + alg); + err = -EINVAL; + } + +free_all: + kfree(input_buf); +free_output: + kfree(output_buf); +free_req: + kpp_request_free(req); + return err; +} + +static int test_kpp(struct crypto_kpp *tfm, const char *alg, + struct kpp_testvec *vecs, unsigned int tcount) +{ + int ret, i; + + for (i = 0; i < tcount; i++) { + ret = do_test_kpp(tfm, vecs++, alg); + if (ret) { + pr_err("alg: %s: test failed on vector %d, err=%d\n", + alg, i + 1, ret); + return ret; + } + } + return 0; +} + +static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + struct crypto_kpp *tfm; + int err = 0; + + tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask); + if (IS_ERR(tfm)) { + pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + if (desc->suite.kpp.vecs) + err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs, + desc->suite.kpp.count); + + crypto_free_kpp(tfm); + return err; +} + static int do_test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs) { @@ -2728,6 +2862,16 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "dh", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = { + .vecs = dh_tv_template, + .count = DH_TEST_VECTORS + } + } }, { .alg = "digest_null", .test = alg_test_null, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index b70e3c92bedd..78e874eca031 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -133,6 +133,17 @@ struct akcipher_testvec { bool public_key_vec; }; +struct kpp_testvec { + unsigned char *secret; + unsigned char *b_public; + unsigned char *expected_a_public; + unsigned char *expected_ss; + unsigned short secret_size; + unsigned short b_public_size; + unsigned short expected_a_public_size; + unsigned short expected_ss_size; +}; + static char zeroed_string[48]; /* @@ -330,6 +341,225 @@ static struct akcipher_testvec rsa_tv_template[] = { } }; +#define DH_TEST_VECTORS 2 + +struct kpp_testvec dh_tv_template[] = { + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x11\x02" /* len */ + "\x00\x01\x00\x00" /* key_size */ + "\x00\x01\x00\x00" /* p_size */ + "\x01\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x02\x11" /* len */ + "\x00\x00\x01\x00" /* key_size */ + "\x00\x00\x01\x00" /* p_size */ + "\x00\x00\x00\x01" /* g_size */ +#endif + /* xa */ + "\x44\xc1\x48\x36\xa7\x2b\x6f\x4e\x43\x03\x68\xad\x31\x00\xda\xf3" + "\x2a\x01\xa8\x32\x63\x5f\x89\x32\x1f\xdf\x4c\xa1\x6a\xbc\x10\x15" + "\x90\x35\xc9\x26\x41\xdf\x7b\xaa\x56\x56\x3d\x85\x44\xb5\xc0\x8e" + "\x37\x83\x06\x50\xb3\x5f\x0e\x28\x2c\xd5\x46\x15\xe3\xda\x7d\x74" + "\x87\x13\x91\x4f\xd4\x2d\xf6\xc7\x5e\x14\x2c\x11\xc2\x26\xb4\x3a" + "\xe3\xb2\x36\x20\x11\x3b\x22\xf2\x06\x65\x66\xe2\x57\x58\xf8\x22" + "\x1a\x94\xbd\x2b\x0e\x8c\x55\xad\x61\x23\x45\x2b\x19\x1e\x63\x3a" + "\x13\x61\xe3\xa0\x79\x70\x3e\x6d\x98\x32\xbc\x7f\x82\xc3\x11\xd8" + "\xeb\x53\xb5\xfc\xb5\xd5\x3c\x4a\xea\x92\x3e\x01\xce\x15\x65\xd4" + "\xaa\x85\xc1\x11\x90\x83\x31\x6e\xfe\xe7\x7f\x7d\xed\xab\xf9\x29" + "\xf8\xc7\xf1\x68\xc6\xb7\xe4\x1f\x2f\x28\xa0\xc9\x1a\x50\x64\x29" + "\x4b\x01\x6d\x1a\xda\x46\x63\x21\x07\x40\x8c\x8e\x4c\x6f\xb5\xe5" + "\x12\xf3\xc2\x1b\x48\x27\x5e\x27\x01\xb1\xaa\xed\x68\x9b\x83\x18" + "\x8f\xb1\xeb\x1f\x04\xd1\x3c\x79\xed\x4b\xf7\x0a\x33\xdc\xe0\xc6" + "\xd8\x02\x51\x59\x00\x74\x30\x07\x4c\x2d\xac\xe4\x13\xf1\x80\xf0" + "\xce\xfa\xff\xa9\xce\x29\x46\xdd\x9d\xad\xd1\xc3\xc6\x58\x1a\x63" + /* p */ + "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81" + "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc" + "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89" + "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64" + "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3" + "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98" + "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26" + "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6" + "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06" + "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7" + "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2" + "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb" + "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f" + "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca" + "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67" + "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73" + /* g */ + "\x02", + .b_public = + "\x2a\x67\x5c\xfd\x63\x5d\xc0\x97\x0a\x8b\xa2\x1f\xf8\x8a\xcb\x54" + "\xca\x2f\xd3\x49\x3f\x01\x8e\x87\xfe\xcc\x94\xa0\x3e\xd4\x26\x79" + "\x9a\x94\x3c\x11\x81\x58\x5c\x60\x3d\xf5\x98\x90\x89\x64\x62\x1f" + "\xbd\x05\x6d\x2b\xcd\x84\x40\x9b\x4a\x1f\xe0\x19\xf1\xca\x20\xb3" + "\x4e\xa0\x4f\x15\xcc\xa5\xfe\xa5\xb4\xf5\x0b\x18\x7a\x5a\x37\xaa" + "\x58\x00\x19\x7f\xe2\xa3\xd9\x1c\x44\x57\xcc\xde\x2e\xc1\x38\xea" + "\xeb\xe3\x90\x40\xc4\x6c\xf7\xcd\xe9\x22\x50\x71\xf5\x7c\xdb\x37" + "\x0e\x80\xc3\xed\x7e\xb1\x2b\x2f\xbe\x71\xa6\x11\xa5\x9d\xf5\x39" + "\xf1\xa2\xe5\x85\xbc\x25\x91\x4e\x84\x8d\x26\x9f\x4f\xe6\x0f\xa6" + "\x2b\x6b\xf9\x0d\xaf\x6f\xbb\xfa\x2d\x79\x15\x31\x57\xae\x19\x60" + "\x22\x0a\xf5\xfd\x98\x0e\xbf\x5d\x49\x75\x58\x37\xbc\x7f\xf5\x21" + "\x56\x1e\xd5\xb3\x50\x0b\xca\x96\xf3\xd1\x3f\xb3\x70\xa8\x6d\x63" + "\x48\xfb\x3d\xd7\x29\x91\x45\xb5\x48\xcd\xb6\x78\x30\xf2\x3f\x1e" + "\xd6\x22\xd6\x35\x9b\xf9\x1f\x85\xae\xab\x4b\xd7\xe0\xc7\x86\x67" + "\x3f\x05\x7f\xa6\x0d\x2f\x0d\xbf\x53\x5f\x4d\x2c\x6d\x5e\x57\x40" + "\x30\x3a\x23\x98\xf9\xb4\x32\xf5\x32\x83\xdd\x0b\xae\x33\x97\x2f", + .expected_a_public = + "\x5c\x24\xdf\xeb\x5b\x4b\xf8\xc5\xef\x39\x48\x82\xe0\x1e\x62\xee" + "\x8a\xae\xdf\x93\x6c\x2b\x16\x95\x92\x16\x3f\x16\x7b\x75\x03\x85" + "\xd9\xf1\x69\xc2\x14\x87\x45\xfc\xa4\x19\xf6\xf0\xa4\xf3\xec\xd4" + "\x6c\x5c\x03\x3b\x94\xc2\x2f\x92\xe4\xce\xb3\xe4\x72\xe8\x17\xe6" + "\x23\x7e\x00\x01\x09\x59\x13\xbf\xc1\x2f\x99\xa9\x07\xaa\x02\x23" + "\x4a\xca\x39\x4f\xbc\xec\x0f\x27\x4f\x19\x93\x6c\xb9\x30\x52\xfd" + "\x2b\x9d\x86\xf1\x06\x1e\xb6\x56\x27\x4a\xc9\x8a\xa7\x8a\x48\x5e" + "\xb5\x60\xcb\xdf\xff\x03\x26\x10\xbf\x90\x8f\x46\x60\xeb\x9b\x9a" + "\xd6\x6f\x44\x91\x03\x92\x18\x2c\x96\x5e\x40\x19\xfb\xf4\x4f\x3a" + "\x02\x7b\xaf\xcc\x22\x20\x79\xb9\xf8\x9f\x8f\x85\x6b\xec\x44\xbb" + "\xe6\xa8\x8e\xb1\xe8\x2c\xee\x64\xee\xf8\xbd\x00\xf3\xe2\x2b\x93" + "\xcd\xe7\xc4\xdf\xc9\x19\x46\xfe\xb6\x07\x73\xc1\x8a\x64\x79\x26" + "\xe7\x30\xad\x2a\xdf\xe6\x8f\x59\xf5\x81\xbf\x4a\x29\x91\xe7\xb7" + "\xcf\x48\x13\x27\x75\x79\x40\xd9\xd6\x32\x52\x4e\x6a\x86\xae\x6f" + "\xc2\xbf\xec\x1f\xc2\x69\xb2\xb6\x59\xe5\xa5\x17\xa4\x77\xb7\x62" + "\x46\xde\xe8\xd2\x89\x78\x9a\xef\xa3\xb5\x8f\x26\xec\x80\xda\x39", + .expected_ss = + "\x8f\xf3\xac\xa2\xea\x22\x11\x5c\x45\x65\x1a\x77\x75\x2e\xcf\x46" + "\x23\x14\x1e\x67\x53\x4d\x35\xb0\x38\x1d\x4e\xb9\x41\x9a\x21\x24" + "\x6e\x9f\x40\xfe\x90\x51\xb1\x06\xa4\x7b\x87\x17\x2f\xe7\x5e\x22" + "\xf0\x7b\x54\x84\x0a\xac\x0a\x90\xd2\xd7\xe8\x7f\xe7\xe3\x30\x75" + "\x01\x1f\x24\x75\x56\xbe\xcc\x8d\x1e\x68\x0c\x41\x72\xd3\xfa\xbb" + "\xe5\x9c\x60\xc7\x28\x77\x0c\xbe\x89\xab\x08\xd6\x21\xe7\x2e\x1a" + "\x58\x7a\xca\x4f\x22\xf3\x2b\x30\xfd\xf4\x98\xc1\xa3\xf8\xf6\xcc" + "\xa9\xe4\xdb\x5b\xee\xd5\x5c\x6f\x62\x4c\xd1\x1a\x02\x2a\x23\xe4" + "\xb5\x57\xf3\xf9\xec\x04\x83\x54\xfe\x08\x5e\x35\xac\xfb\xa8\x09" + "\x82\x32\x60\x11\xb2\x16\x62\x6b\xdf\xda\xde\x9c\xcb\x63\x44\x6c" + "\x59\x26\x6a\x8f\xb0\x24\xcb\xa6\x72\x48\x1e\xeb\xe0\xe1\x09\x44" + "\xdd\xee\x66\x6d\x84\xcf\xa5\xc1\xb8\x36\x74\xd3\x15\x96\xc3\xe4" + "\xc6\x5a\x4d\x23\x97\x0c\x5c\xcb\xa9\xf5\x29\xc2\x0e\xff\x93\x82" + "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11" + "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50" + "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17", + .secret_size = 529, + .b_public_size = 256, + .expected_a_public_size = 256, + .expected_ss_size = 256, + }, + { + .secret = +#ifdef __LITTLE_ENDIAN + "\x01\x00" /* type */ + "\x11\x02" /* len */ + "\x00\x01\x00\x00" /* key_size */ + "\x00\x01\x00\x00" /* p_size */ + "\x01\x00\x00\x00" /* g_size */ +#else + "\x00\x01" /* type */ + "\x02\x11" /* len */ + "\x00\x00\x01\x00" /* key_size */ + "\x00\x00\x01\x00" /* p_size */ + "\x00\x00\x00\x01" /* g_size */ +#endif + /* xa */ + "\x4d\x75\xa8\x6e\xba\x23\x3a\x0c\x63\x56\xc8\xc9\x5a\xa7\xd6\x0e" + "\xed\xae\x40\x78\x87\x47\x5f\xe0\xa7\x7b\xba\x84\x88\x67\x4e\xe5" + "\x3c\xcc\x5c\x6a\xe7\x4a\x20\xec\xbe\xcb\xf5\x52\x62\x9f\x37\x80" + "\x0c\x72\x7b\x83\x66\xa4\xf6\x7f\x95\x97\x1c\x6a\x5c\x7e\xf1\x67" + "\x37\xb3\x93\x39\x3d\x0b\x55\x35\xd9\xe5\x22\x04\x9f\xf8\xc1\x04" + "\xce\x13\xa5\xac\xe1\x75\x05\xd1\x2b\x53\xa2\x84\xef\xb1\x18\xf4" + "\x66\xdd\xea\xe6\x24\x69\x5a\x49\xe0\x7a\xd8\xdf\x1b\xb7\xf1\x6d" + "\x9b\x50\x2c\xc8\x1c\x1c\xa3\xb4\x37\xfb\x66\x3f\x67\x71\x73\xa9" + "\xff\x5f\xd9\xa2\x25\x6e\x25\x1b\x26\x54\xbf\x0c\xc6\xdb\xea\x0a" + "\x52\x6c\x16\x7c\x27\x68\x15\x71\x58\x73\x9d\xe6\xc2\x80\xaa\x97" + "\x31\x66\xfb\xa6\xfb\xfd\xd0\x9c\x1d\xbe\x81\x48\xf5\x9a\x32\xf1" + "\x69\x62\x18\x78\xae\x72\x36\xe6\x94\x27\xd1\xff\x18\x4f\x28\x6a" + "\x16\xbd\x6a\x60\xee\xe5\xf9\x6d\x16\xe4\xb8\xa6\x41\x9b\x23\x7e" + "\xf7\x9d\xd1\x1d\x03\x15\x66\x3a\xcf\xb6\x2c\x13\x96\x2c\x52\x21" + "\xe4\x2d\x48\x7a\x8a\x5d\xb2\x88\xed\x98\x61\x79\x8b\x6a\x1e\x5f" + "\xd0\x8a\x2d\x99\x5a\x2b\x0f\xbc\xef\x53\x8f\x32\xc1\xa2\x99\x26" + /* p */ + "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81" + "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc" + "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89" + "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64" + "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3" + "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98" + "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26" + "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6" + "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06" + "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7" + "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2" + "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb" + "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f" + "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca" + "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67" + "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73" + /* g */ + "\x02", + .b_public = + "\x99\x4d\xd9\x01\x84\x8e\x4a\x5b\xb8\xa5\x64\x8c\x6c\x00\x5c\x0e" + "\x1e\x1b\xee\x5d\x9f\x53\xe3\x16\x70\x01\xed\xbf\x4f\x14\x36\x6e" + "\xe4\x43\x45\x43\x49\xcc\xb1\xb0\x2a\xc0\x6f\x22\x55\x42\x17\x94" + "\x18\x83\xd7\x2a\x5c\x51\x54\xf8\x4e\x7c\x10\xda\x76\x68\x57\x77" + "\x1e\x62\x03\x30\x04\x7b\x4c\x39\x9c\x54\x01\x54\xec\xef\xb3\x55" + "\xa4\xc0\x24\x6d\x3d\xbd\xcc\x46\x5b\x00\x96\xc7\xea\x93\xd1\x3f" + "\xf2\x6a\x72\xe3\xf2\xc1\x92\x24\x5b\xda\x48\x70\x2c\xa9\x59\x97" + "\x19\xb1\xd6\x54\xb3\x9c\x2e\xb0\x63\x07\x9b\x5e\xac\xb5\xf2\xb1" + "\x5b\xf8\xf3\xd7\x2d\x37\x9b\x68\x6c\xf8\x90\x07\xbc\x37\x9a\xa5" + "\xe2\x91\x12\x25\x47\x77\xe3\x3d\xb2\x95\x69\x44\x0b\x91\x1e\xaf" + "\x7c\x8c\x7c\x34\x41\x6a\xab\x60\x6e\xc6\x52\xec\x7e\x94\x0a\x37" + "\xec\x98\x90\xdf\x3f\x02\xbd\x23\x52\xdd\xd9\xe5\x31\x80\x74\x25" + "\xb6\xd2\xd3\xcc\xd5\xcc\x6d\xf9\x7e\x4d\x78\xab\x77\x51\xfa\x77" + "\x19\x94\x49\x8c\x05\xd4\x75\xed\xd2\xb3\x64\x57\xe0\x52\x99\xc0" + "\x83\xe3\xbb\x5e\x2b\xf1\xd2\xc0\xb1\x37\x36\x0b\x7c\xb5\x63\x96" + "\x8e\xde\x04\x23\x11\x95\x62\x11\x9a\xce\x6f\x63\xc8\xd5\xd1\x8f", + .expected_a_public = + "\x90\x89\xe4\x82\xd6\x0a\xcf\x1a\xae\xce\x1b\x66\xa7\x19\x71\x18" + "\x8f\x95\x4b\x5b\x80\x45\x4a\x5a\x43\x99\x4d\x37\xcf\xa3\xa7\x28" + "\x9c\xc7\x73\xf1\xb2\x17\xf6\x99\xe3\x6b\x56\xcb\x3e\x35\x60\x7d" + "\x65\xc7\x84\x6b\x3e\x60\xee\xcd\xd2\x70\xe7\xc9\x32\x1c\xf0\xb4" + "\xf9\x52\xd9\x88\x75\xfd\x40\x2c\xa7\xbe\x19\x1c\x0a\xae\x93\xe1" + "\x71\xc7\xcd\x4f\x33\x5c\x10\x7d\x39\x56\xfc\x73\x84\xb2\x67\xc3" + "\x77\x26\x20\x97\x2b\xf8\x13\x43\x93\x9c\x9a\xa4\x08\xc7\x34\x83" + "\xe6\x98\x61\xe7\x16\x30\x2c\xb1\xdb\x2a\xb2\xcc\xc3\x02\xa5\x3c" + "\x71\x50\x14\x83\xc7\xbb\xa4\xbe\x98\x1b\xfe\xcb\x43\xe9\x97\x62" + "\xd6\xf0\x8c\xcb\x1c\xba\x1e\xa8\xa6\xa6\x50\xfc\x85\x7d\x47\xbf" + "\xf4\x3e\x23\xd3\x5f\xb2\x71\x3e\x40\x94\xaa\x87\x83\x2c\x6c\x8e" + "\x60\xfd\xdd\xf7\xf4\x76\x03\xd3\x1d\xec\x18\x51\xa3\xf2\x44\x1a" + "\x3f\xb4\x7c\x18\x0d\x68\x65\x92\x54\x0d\x2d\x81\x16\xf1\x84\x66" + "\x89\x92\xd0\x1a\x5e\x1f\x42\x46\x5b\xe5\x83\x86\x80\xd9\xcd\x3a" + "\x5a\x2f\xb9\x59\x9b\xe4\x43\x84\x64\xf3\x09\x1a\x0a\xa2\x64\x0f" + "\x77\x4e\x8d\x8b\xe6\x88\xd1\xfc\xaf\x8f\xdf\x1d\xbc\x31\xb3\xbd", + .expected_ss = + "\x34\xc3\x35\x14\x88\x46\x26\x23\x97\xbb\xdd\x28\x5c\x94\xf6\x47" + "\xca\xb3\x19\xaf\xca\x44\x9b\xc2\x7d\x89\xfd\x96\x14\xfd\x6d\x58" + "\xd8\xc4\x6b\x61\x2a\x0d\xf2\x36\x45\xc8\xe4\xa4\xed\x81\x53\x81" + "\x66\x1e\xe0\x5a\xb1\x78\x2d\x0b\x5c\xb4\xd1\xfc\x90\xc6\x9c\xdb" + "\x5a\x30\x0b\x14\x7d\xbe\xb3\x7d\xb1\xb2\x76\x3c\x6c\xef\x74\x6b" + "\xe7\x1f\x64\x0c\xab\x65\xe1\x76\x5c\x3d\x83\xb5\x8a\xfb\xaf\x0f" + "\xf2\x06\x14\x8f\xa0\xf6\xc1\x89\x78\xf2\xba\x72\x73\x3c\xf7\x76" + "\x21\x67\xbc\x24\x31\xb8\x09\x65\x0f\x0c\x02\x32\x4a\x98\x14\xfc" + "\x72\x2c\x25\x60\x68\x5f\x2f\x30\x1e\x5b\xf0\x3b\xd1\xa2\x87\xa0" + "\x54\xdf\xdb\xc0\xee\x0a\x0f\x47\xc9\x90\x20\x2c\xf9\xe3\x52\xad" + "\x27\x65\x8d\x54\x8d\xa8\xa1\xf3\xed\x15\xd4\x94\x28\x90\x31\x93" + "\x1b\xc0\x51\xbb\x43\x5d\x76\x3b\x1d\x2a\x71\x50\xea\x5d\x48\x94" + "\x7f\x6f\xf1\x48\xdb\x30\xe5\xae\x64\x79\xd9\x7a\xdb\xc6\xff\xd8" + "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a" + "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee" + "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd", + .secret_size = 529, + .b_public_size = 256, + .expected_a_public_size = 256, + .expected_ss_size = 256, + } +}; + /* * MD4 test vectors from RFC1320 */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000000..5102a8f282e6 --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,29 @@ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +struct dh { + void *key; + void *p; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int g_size; +}; + +int crypto_dh_key_len(const struct dh *params); +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +#endif diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 4fa897f3366b..937ac122354a 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -242,6 +242,7 @@ static inline void kpp_request_set_output(struct kpp_request *req, enum { CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, }; /** -- cgit v1.2.3-70-g09d2 From 3c4b23901a0c766879dff680cd6bdab47bcdbbd2 Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Wed, 22 Jun 2016 17:49:15 +0100 Subject: crypto: ecdh - Add ECDH software support * Implement ECDH under kpp API * Provide ECC software support for curve P-192 and P-256. * Add kpp test for ECDH with data generated by OpenSSL Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 + crypto/Makefile | 4 + crypto/ecc.c | 1018 +++++++++++++++++++++++++++++++++++++++++++++++ crypto/ecc.h | 83 ++++ crypto/ecc_curve_defs.h | 57 +++ crypto/ecdh.c | 151 +++++++ crypto/ecdh_helper.c | 86 ++++ crypto/testmgr.c | 10 + crypto/testmgr.h | 93 +++++ include/crypto/ecdh.h | 30 ++ include/crypto/kpp.h | 1 + 11 files changed, 1538 insertions(+) create mode 100644 crypto/ecc.c create mode 100644 crypto/ecc.h create mode 100644 crypto/ecc_curve_defs.h create mode 100644 crypto/ecdh.c create mode 100644 crypto/ecdh_helper.c create mode 100644 include/crypto/ecdh.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 162d2f9aa242..5baaa9d87574 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -118,6 +118,11 @@ config CRYPTO_DH help Generic implementation of the Diffie-Hellman algorithm. +config CRYPTO_ECDH + tristate "ECDH algorithm" + select CRYTPO_KPP + help + Generic implementation of the ECDH algorithm config CRYPTO_MANAGER tristate "Cryptographic algorithm manager" diff --git a/crypto/Makefile b/crypto/Makefile index 82897208e8e0..df1bcfb090d2 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -35,6 +35,10 @@ obj-$(CONFIG_CRYPTO_KPP2) += kpp.o dh_generic-y := dh.o dh_generic-y += dh_helper.o obj-$(CONFIG_CRYPTO_DH) += dh_generic.o +ecdh_generic-y := ecc.o +ecdh_generic-y += ecdh.o +ecdh_generic-y += ecdh_helper.o +obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h diff --git a/crypto/ecc.c b/crypto/ecc.c new file mode 100644 index 000000000000..9aedec6bbe72 --- /dev/null +++ b/crypto/ecc.c @@ -0,0 +1,1018 @@ +/* + * Copyright (c) 2013, Kenneth MacKay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include "ecc.h" +#include "ecc_curve_defs.h" + +typedef struct { + u64 m_low; + u64 m_high; +} uint128_t; + +static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) +{ + switch (curve_id) { + /* In FIPS mode only allow P256 and higher */ + case ECC_CURVE_NIST_P192: + return fips_enabled ? NULL : &nist_p192; + case ECC_CURVE_NIST_P256: + return &nist_p256; + default: + return NULL; + } +} + +static u64 *ecc_alloc_digits_space(unsigned int ndigits) +{ + size_t len = ndigits * sizeof(u64); + + if (!len) + return NULL; + + return kmalloc(len, GFP_KERNEL); +} + +static void ecc_free_digits_space(u64 *space) +{ + kzfree(space); +} + +static struct ecc_point *ecc_alloc_point(unsigned int ndigits) +{ + struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL); + + if (!p) + return NULL; + + p->x = ecc_alloc_digits_space(ndigits); + if (!p->x) + goto err_alloc_x; + + p->y = ecc_alloc_digits_space(ndigits); + if (!p->y) + goto err_alloc_y; + + p->ndigits = ndigits; + + return p; + +err_alloc_y: + ecc_free_digits_space(p->x); +err_alloc_x: + kfree(p); + return NULL; +} + +static void ecc_free_point(struct ecc_point *p) +{ + if (!p) + return; + + kzfree(p->x); + kzfree(p->y); + kzfree(p); +} + +static void vli_clear(u64 *vli, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + vli[i] = 0; +} + +/* Returns true if vli == 0, false otherwise. */ +static bool vli_is_zero(const u64 *vli, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) { + if (vli[i]) + return false; + } + + return true; +} + +/* Returns nonzero if bit bit of vli is set. */ +static u64 vli_test_bit(const u64 *vli, unsigned int bit) +{ + return (vli[bit / 64] & ((u64)1 << (bit % 64))); +} + +/* Counts the number of 64-bit "digits" in vli. */ +static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) +{ + int i; + + /* Search from the end until we find a non-zero digit. + * We do it in reverse because we expect that most digits will + * be nonzero. + */ + for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--); + + return (i + 1); +} + +/* Counts the number of bits required for vli. */ +static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) +{ + unsigned int i, num_digits; + u64 digit; + + num_digits = vli_num_digits(vli, ndigits); + if (num_digits == 0) + return 0; + + digit = vli[num_digits - 1]; + for (i = 0; digit; i++) + digit >>= 1; + + return ((num_digits - 1) * 64 + i); +} + +/* Sets dest = src. */ +static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + dest[i] = src[i]; +} + +/* Returns sign of left - right. */ +static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits) +{ + int i; + + for (i = ndigits - 1; i >= 0; i--) { + if (left[i] > right[i]) + return 1; + else if (left[i] < right[i]) + return -1; + } + + return 0; +} + +/* Computes result = in << c, returning carry. Can modify in place + * (if result == in). 0 < shift < 64. + */ +static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift, + unsigned int ndigits) +{ + u64 carry = 0; + int i; + + for (i = 0; i < ndigits; i++) { + u64 temp = in[i]; + + result[i] = (temp << shift) | carry; + carry = temp >> (64 - shift); + } + + return carry; +} + +/* Computes vli = vli >> 1. */ +static void vli_rshift1(u64 *vli, unsigned int ndigits) +{ + u64 *end = vli; + u64 carry = 0; + + vli += ndigits; + + while (vli-- > end) { + u64 temp = *vli; + *vli = (temp >> 1) | carry; + carry = temp << 63; + } +} + +/* Computes result = left + right, returning carry. Can modify in place. */ +static u64 vli_add(u64 *result, const u64 *left, const u64 *right, + unsigned int ndigits) +{ + u64 carry = 0; + int i; + + for (i = 0; i < ndigits; i++) { + u64 sum; + + sum = left[i] + right[i] + carry; + if (sum != left[i]) + carry = (sum < left[i]); + + result[i] = sum; + } + + return carry; +} + +/* Computes result = left - right, returning borrow. Can modify in place. */ +static u64 vli_sub(u64 *result, const u64 *left, const u64 *right, + unsigned int ndigits) +{ + u64 borrow = 0; + int i; + + for (i = 0; i < ndigits; i++) { + u64 diff; + + diff = left[i] - right[i] - borrow; + if (diff != left[i]) + borrow = (diff > left[i]); + + result[i] = diff; + } + + return borrow; +} + +static uint128_t mul_64_64(u64 left, u64 right) +{ + u64 a0 = left & 0xffffffffull; + u64 a1 = left >> 32; + u64 b0 = right & 0xffffffffull; + u64 b1 = right >> 32; + u64 m0 = a0 * b0; + u64 m1 = a0 * b1; + u64 m2 = a1 * b0; + u64 m3 = a1 * b1; + uint128_t result; + + m2 += (m0 >> 32); + m2 += m1; + + /* Overflow */ + if (m2 < m1) + m3 += 0x100000000ull; + + result.m_low = (m0 & 0xffffffffull) | (m2 << 32); + result.m_high = m3 + (m2 >> 32); + + return result; +} + +static uint128_t add_128_128(uint128_t a, uint128_t b) +{ + uint128_t result; + + result.m_low = a.m_low + b.m_low; + result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); + + return result; +} + +static void vli_mult(u64 *result, const u64 *left, const u64 *right, + unsigned int ndigits) +{ + uint128_t r01 = { 0, 0 }; + u64 r2 = 0; + unsigned int i, k; + + /* Compute each digit of result in sequence, maintaining the + * carries. + */ + for (k = 0; k < ndigits * 2 - 1; k++) { + unsigned int min; + + if (k < ndigits) + min = 0; + else + min = (k + 1) - ndigits; + + for (i = min; i <= k && i < ndigits; i++) { + uint128_t product; + + product = mul_64_64(left[i], right[k - i]); + + r01 = add_128_128(r01, product); + r2 += (r01.m_high < product.m_high); + } + + result[k] = r01.m_low; + r01.m_low = r01.m_high; + r01.m_high = r2; + r2 = 0; + } + + result[ndigits * 2 - 1] = r01.m_low; +} + +static void vli_square(u64 *result, const u64 *left, unsigned int ndigits) +{ + uint128_t r01 = { 0, 0 }; + u64 r2 = 0; + int i, k; + + for (k = 0; k < ndigits * 2 - 1; k++) { + unsigned int min; + + if (k < ndigits) + min = 0; + else + min = (k + 1) - ndigits; + + for (i = min; i <= k && i <= k - i; i++) { + uint128_t product; + + product = mul_64_64(left[i], left[k - i]); + + if (i < k - i) { + r2 += product.m_high >> 63; + product.m_high = (product.m_high << 1) | + (product.m_low >> 63); + product.m_low <<= 1; + } + + r01 = add_128_128(r01, product); + r2 += (r01.m_high < product.m_high); + } + + result[k] = r01.m_low; + r01.m_low = r01.m_high; + r01.m_high = r2; + r2 = 0; + } + + result[ndigits * 2 - 1] = r01.m_low; +} + +/* Computes result = (left + right) % mod. + * Assumes that left < mod and right < mod, result != mod. + */ +static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, + const u64 *mod, unsigned int ndigits) +{ + u64 carry; + + carry = vli_add(result, left, right, ndigits); + + /* result > mod (result = mod + remainder), so subtract mod to + * get remainder. + */ + if (carry || vli_cmp(result, mod, ndigits) >= 0) + vli_sub(result, result, mod, ndigits); +} + +/* Computes result = (left - right) % mod. + * Assumes that left < mod and right < mod, result != mod. + */ +static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right, + const u64 *mod, unsigned int ndigits) +{ + u64 borrow = vli_sub(result, left, right, ndigits); + + /* In this case, p_result == -diff == (max int) - diff. + * Since -x % d == d - x, we can get the correct result from + * result + mod (with overflow). + */ + if (borrow) + vli_add(result, result, mod, ndigits); +} + +/* Computes p_result = p_product % curve_p. + * See algorithm 5 and 6 from + * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf + */ +static void vli_mmod_fast_192(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + const unsigned int ndigits = 3; + int carry; + + vli_set(result, product, ndigits); + + vli_set(tmp, &product[3], ndigits); + carry = vli_add(result, result, tmp, ndigits); + + tmp[0] = 0; + tmp[1] = product[3]; + tmp[2] = product[4]; + carry += vli_add(result, result, tmp, ndigits); + + tmp[0] = tmp[1] = product[5]; + tmp[2] = 0; + carry += vli_add(result, result, tmp, ndigits); + + while (carry || vli_cmp(curve_prime, result, ndigits) != 1) + carry -= vli_sub(result, result, curve_prime, ndigits); +} + +/* Computes result = product % curve_prime + * from http://www.nsa.gov/ia/_files/nist-routines.pdf + */ +static void vli_mmod_fast_256(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + int carry; + const unsigned int ndigits = 4; + + /* t */ + vli_set(result, product, ndigits); + + /* s1 */ + tmp[0] = 0; + tmp[1] = product[5] & 0xffffffff00000000ull; + tmp[2] = product[6]; + tmp[3] = product[7]; + carry = vli_lshift(tmp, tmp, 1, ndigits); + carry += vli_add(result, result, tmp, ndigits); + + /* s2 */ + tmp[1] = product[6] << 32; + tmp[2] = (product[6] >> 32) | (product[7] << 32); + tmp[3] = product[7] >> 32; + carry += vli_lshift(tmp, tmp, 1, ndigits); + carry += vli_add(result, result, tmp, ndigits); + + /* s3 */ + tmp[0] = product[4]; + tmp[1] = product[5] & 0xffffffff; + tmp[2] = 0; + tmp[3] = product[7]; + carry += vli_add(result, result, tmp, ndigits); + + /* s4 */ + tmp[0] = (product[4] >> 32) | (product[5] << 32); + tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull); + tmp[2] = product[7]; + tmp[3] = (product[6] >> 32) | (product[4] << 32); + carry += vli_add(result, result, tmp, ndigits); + + /* d1 */ + tmp[0] = (product[5] >> 32) | (product[6] << 32); + tmp[1] = (product[6] >> 32); + tmp[2] = 0; + tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32); + carry -= vli_sub(result, result, tmp, ndigits); + + /* d2 */ + tmp[0] = product[6]; + tmp[1] = product[7]; + tmp[2] = 0; + tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull); + carry -= vli_sub(result, result, tmp, ndigits); + + /* d3 */ + tmp[0] = (product[6] >> 32) | (product[7] << 32); + tmp[1] = (product[7] >> 32) | (product[4] << 32); + tmp[2] = (product[4] >> 32) | (product[5] << 32); + tmp[3] = (product[6] << 32); + carry -= vli_sub(result, result, tmp, ndigits); + + /* d4 */ + tmp[0] = product[7]; + tmp[1] = product[4] & 0xffffffff00000000ull; + tmp[2] = product[5]; + tmp[3] = product[6] & 0xffffffff00000000ull; + carry -= vli_sub(result, result, tmp, ndigits); + + if (carry < 0) { + do { + carry += vli_add(result, result, curve_prime, ndigits); + } while (carry < 0); + } else { + while (carry || vli_cmp(curve_prime, result, ndigits) != 1) + carry -= vli_sub(result, result, curve_prime, ndigits); + } +} + +/* Computes result = product % curve_prime + * from http://www.nsa.gov/ia/_files/nist-routines.pdf +*/ +static bool vli_mmod_fast(u64 *result, u64 *product, + const u64 *curve_prime, unsigned int ndigits) +{ + u64 tmp[2 * ndigits]; + + switch (ndigits) { + case 3: + vli_mmod_fast_192(result, product, curve_prime, tmp); + break; + case 4: + vli_mmod_fast_256(result, product, curve_prime, tmp); + break; + default: + pr_err("unsupports digits size!\n"); + return false; + } + + return true; +} + +/* Computes result = (left * right) % curve_prime. */ +static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, + const u64 *curve_prime, unsigned int ndigits) +{ + u64 product[2 * ndigits]; + + vli_mult(product, left, right, ndigits); + vli_mmod_fast(result, product, curve_prime, ndigits); +} + +/* Computes result = left^2 % curve_prime. */ +static void vli_mod_square_fast(u64 *result, const u64 *left, + const u64 *curve_prime, unsigned int ndigits) +{ + u64 product[2 * ndigits]; + + vli_square(product, left, ndigits); + vli_mmod_fast(result, product, curve_prime, ndigits); +} + +#define EVEN(vli) (!(vli[0] & 1)) +/* Computes result = (1 / p_input) % mod. All VLIs are the same size. + * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" + * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf + */ +static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, + unsigned int ndigits) +{ + u64 a[ndigits], b[ndigits]; + u64 u[ndigits], v[ndigits]; + u64 carry; + int cmp_result; + + if (vli_is_zero(input, ndigits)) { + vli_clear(result, ndigits); + return; + } + + vli_set(a, input, ndigits); + vli_set(b, mod, ndigits); + vli_clear(u, ndigits); + u[0] = 1; + vli_clear(v, ndigits); + + while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) { + carry = 0; + + if (EVEN(a)) { + vli_rshift1(a, ndigits); + + if (!EVEN(u)) + carry = vli_add(u, u, mod, ndigits); + + vli_rshift1(u, ndigits); + if (carry) + u[ndigits - 1] |= 0x8000000000000000ull; + } else if (EVEN(b)) { + vli_rshift1(b, ndigits); + + if (!EVEN(v)) + carry = vli_add(v, v, mod, ndigits); + + vli_rshift1(v, ndigits); + if (carry) + v[ndigits - 1] |= 0x8000000000000000ull; + } else if (cmp_result > 0) { + vli_sub(a, a, b, ndigits); + vli_rshift1(a, ndigits); + + if (vli_cmp(u, v, ndigits) < 0) + vli_add(u, u, mod, ndigits); + + vli_sub(u, u, v, ndigits); + if (!EVEN(u)) + carry = vli_add(u, u, mod, ndigits); + + vli_rshift1(u, ndigits); + if (carry) + u[ndigits - 1] |= 0x8000000000000000ull; + } else { + vli_sub(b, b, a, ndigits); + vli_rshift1(b, ndigits); + + if (vli_cmp(v, u, ndigits) < 0) + vli_add(v, v, mod, ndigits); + + vli_sub(v, v, u, ndigits); + if (!EVEN(v)) + carry = vli_add(v, v, mod, ndigits); + + vli_rshift1(v, ndigits); + if (carry) + v[ndigits - 1] |= 0x8000000000000000ull; + } + } + + vli_set(result, u, ndigits); +} + +/* ------ Point operations ------ */ + +/* Returns true if p_point is the point at infinity, false otherwise. */ +static bool ecc_point_is_zero(const struct ecc_point *point) +{ + return (vli_is_zero(point->x, point->ndigits) && + vli_is_zero(point->y, point->ndigits)); +} + +/* Point multiplication algorithm using Montgomery's ladder with co-Z + * coordinates. From http://eprint.iacr.org/2011/338.pdf + */ + +/* Double in place */ +static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, + u64 *curve_prime, unsigned int ndigits) +{ + /* t1 = x, t2 = y, t3 = z */ + u64 t4[ndigits]; + u64 t5[ndigits]; + + if (vli_is_zero(z1, ndigits)) + return; + + /* t4 = y1^2 */ + vli_mod_square_fast(t4, y1, curve_prime, ndigits); + /* t5 = x1*y1^2 = A */ + vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits); + /* t4 = y1^4 */ + vli_mod_square_fast(t4, t4, curve_prime, ndigits); + /* t2 = y1*z1 = z3 */ + vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits); + /* t3 = z1^2 */ + vli_mod_square_fast(z1, z1, curve_prime, ndigits); + + /* t1 = x1 + z1^2 */ + vli_mod_add(x1, x1, z1, curve_prime, ndigits); + /* t3 = 2*z1^2 */ + vli_mod_add(z1, z1, z1, curve_prime, ndigits); + /* t3 = x1 - z1^2 */ + vli_mod_sub(z1, x1, z1, curve_prime, ndigits); + /* t1 = x1^2 - z1^4 */ + vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits); + + /* t3 = 2*(x1^2 - z1^4) */ + vli_mod_add(z1, x1, x1, curve_prime, ndigits); + /* t1 = 3*(x1^2 - z1^4) */ + vli_mod_add(x1, x1, z1, curve_prime, ndigits); + if (vli_test_bit(x1, 0)) { + u64 carry = vli_add(x1, x1, curve_prime, ndigits); + + vli_rshift1(x1, ndigits); + x1[ndigits - 1] |= carry << 63; + } else { + vli_rshift1(x1, ndigits); + } + /* t1 = 3/2*(x1^2 - z1^4) = B */ + + /* t3 = B^2 */ + vli_mod_square_fast(z1, x1, curve_prime, ndigits); + /* t3 = B^2 - A */ + vli_mod_sub(z1, z1, t5, curve_prime, ndigits); + /* t3 = B^2 - 2A = x3 */ + vli_mod_sub(z1, z1, t5, curve_prime, ndigits); + /* t5 = A - x3 */ + vli_mod_sub(t5, t5, z1, curve_prime, ndigits); + /* t1 = B * (A - x3) */ + vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + /* t4 = B * (A - x3) - y1^4 = y3 */ + vli_mod_sub(t4, x1, t4, curve_prime, ndigits); + + vli_set(x1, z1, ndigits); + vli_set(z1, y1, ndigits); + vli_set(y1, t4, ndigits); +} + +/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ +static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime, + unsigned int ndigits) +{ + u64 t1[ndigits]; + + vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */ + vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */ + vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */ + vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */ +} + +/* P = (x1, y1) => 2P, (x2, y2) => P' */ +static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + u64 *p_initial_z, u64 *curve_prime, + unsigned int ndigits) +{ + u64 z[ndigits]; + + vli_set(x2, x1, ndigits); + vli_set(y2, y1, ndigits); + + vli_clear(z, ndigits); + z[0] = 1; + + if (p_initial_z) + vli_set(z, p_initial_z, ndigits); + + apply_z(x1, y1, z, curve_prime, ndigits); + + ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits); + + apply_z(x2, y2, z, curve_prime, ndigits); +} + +/* Input P = (x1, y1, Z), Q = (x2, y2, Z) + * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) + * or P => P', Q => P + Q + */ +static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, + unsigned int ndigits) +{ + /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ + u64 t5[ndigits]; + + /* t5 = x2 - x1 */ + vli_mod_sub(t5, x2, x1, curve_prime, ndigits); + /* t5 = (x2 - x1)^2 = A */ + vli_mod_square_fast(t5, t5, curve_prime, ndigits); + /* t1 = x1*A = B */ + vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + /* t3 = x2*A = C */ + vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + /* t4 = y2 - y1 */ + vli_mod_sub(y2, y2, y1, curve_prime, ndigits); + /* t5 = (y2 - y1)^2 = D */ + vli_mod_square_fast(t5, y2, curve_prime, ndigits); + + /* t5 = D - B */ + vli_mod_sub(t5, t5, x1, curve_prime, ndigits); + /* t5 = D - B - C = x3 */ + vli_mod_sub(t5, t5, x2, curve_prime, ndigits); + /* t3 = C - B */ + vli_mod_sub(x2, x2, x1, curve_prime, ndigits); + /* t2 = y1*(C - B) */ + vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits); + /* t3 = B - x3 */ + vli_mod_sub(x2, x1, t5, curve_prime, ndigits); + /* t4 = (y2 - y1)*(B - x3) */ + vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits); + /* t4 = y3 */ + vli_mod_sub(y2, y2, y1, curve_prime, ndigits); + + vli_set(x2, t5, ndigits); +} + +/* Input P = (x1, y1, Z), Q = (x2, y2, Z) + * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) + * or P => P - Q, Q => P + Q + */ +static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, + unsigned int ndigits) +{ + /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ + u64 t5[ndigits]; + u64 t6[ndigits]; + u64 t7[ndigits]; + + /* t5 = x2 - x1 */ + vli_mod_sub(t5, x2, x1, curve_prime, ndigits); + /* t5 = (x2 - x1)^2 = A */ + vli_mod_square_fast(t5, t5, curve_prime, ndigits); + /* t1 = x1*A = B */ + vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + /* t3 = x2*A = C */ + vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + /* t4 = y2 + y1 */ + vli_mod_add(t5, y2, y1, curve_prime, ndigits); + /* t4 = y2 - y1 */ + vli_mod_sub(y2, y2, y1, curve_prime, ndigits); + + /* t6 = C - B */ + vli_mod_sub(t6, x2, x1, curve_prime, ndigits); + /* t2 = y1 * (C - B) */ + vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits); + /* t6 = B + C */ + vli_mod_add(t6, x1, x2, curve_prime, ndigits); + /* t3 = (y2 - y1)^2 */ + vli_mod_square_fast(x2, y2, curve_prime, ndigits); + /* t3 = x3 */ + vli_mod_sub(x2, x2, t6, curve_prime, ndigits); + + /* t7 = B - x3 */ + vli_mod_sub(t7, x1, x2, curve_prime, ndigits); + /* t4 = (y2 - y1)*(B - x3) */ + vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits); + /* t4 = y3 */ + vli_mod_sub(y2, y2, y1, curve_prime, ndigits); + + /* t7 = (y2 + y1)^2 = F */ + vli_mod_square_fast(t7, t5, curve_prime, ndigits); + /* t7 = x3' */ + vli_mod_sub(t7, t7, t6, curve_prime, ndigits); + /* t6 = x3' - B */ + vli_mod_sub(t6, t7, x1, curve_prime, ndigits); + /* t6 = (y2 + y1)*(x3' - B) */ + vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits); + /* t2 = y3' */ + vli_mod_sub(y1, t6, y1, curve_prime, ndigits); + + vli_set(x1, t7, ndigits); +} + +static void ecc_point_mult(struct ecc_point *result, + const struct ecc_point *point, const u64 *scalar, + u64 *initial_z, u64 *curve_prime, + unsigned int ndigits) +{ + /* R0 and R1 */ + u64 rx[2][ndigits]; + u64 ry[2][ndigits]; + u64 z[ndigits]; + int i, nb; + int num_bits = vli_num_bits(scalar, ndigits); + + vli_set(rx[1], point->x, ndigits); + vli_set(ry[1], point->y, ndigits); + + xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime, + ndigits); + + for (i = num_bits - 2; i > 0; i--) { + nb = !vli_test_bit(scalar, i); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, + ndigits); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, + ndigits); + } + + nb = !vli_test_bit(scalar, 0); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, + ndigits); + + /* Find final 1/Z value. */ + /* X1 - X0 */ + vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits); + /* Yb * (X1 - X0) */ + vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits); + /* xP * Yb * (X1 - X0) */ + vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits); + + /* 1 / (xP * Yb * (X1 - X0)) */ + vli_mod_inv(z, z, curve_prime, point->ndigits); + + /* yP / (xP * Yb * (X1 - X0)) */ + vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits); + /* Xb * yP / (xP * Yb * (X1 - X0)) */ + vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits); + /* End 1/Z calculation */ + + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits); + + apply_z(rx[0], ry[0], z, curve_prime, ndigits); + + vli_set(result->x, rx[0], ndigits); + vli_set(result->y, ry[0], ndigits); +} + +static inline void ecc_swap_digits(const u64 *in, u64 *out, + unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + out[i] = __swab64(in[ndigits - 1 - i]); +} + +int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len) +{ + int nbytes; + const struct ecc_curve *curve = ecc_get_curve(curve_id); + + if (!private_key) + return -EINVAL; + + nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + + if (private_key_len != nbytes) + return -EINVAL; + + if (vli_is_zero((const u64 *)&private_key[0], ndigits)) + return -EINVAL; + + /* Make sure the private key is in the range [1, n-1]. */ + if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1) + return -EINVAL; + + return 0; +} + +int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len, + u8 *public_key, unsigned int public_key_len) +{ + int ret = 0; + struct ecc_point *pk; + u64 priv[ndigits]; + unsigned int nbytes; + const struct ecc_curve *curve = ecc_get_curve(curve_id); + + if (!private_key || !curve) { + ret = -EINVAL; + goto out; + } + + ecc_swap_digits((const u64 *)private_key, priv, ndigits); + + pk = ecc_alloc_point(ndigits); + if (!pk) { + ret = -ENOMEM; + goto out; + } + + ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits); + if (ecc_point_is_zero(pk)) { + ret = -EAGAIN; + goto err_free_point; + } + + nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + ecc_swap_digits(pk->x, (u64 *)public_key, ndigits); + ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits); + +err_free_point: + ecc_free_point(pk); +out: + return ret; +} + +int ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len, + const u8 *public_key, unsigned int public_key_len, + u8 *secret, unsigned int secret_len) +{ + int ret = 0; + struct ecc_point *product, *pk; + u64 priv[ndigits]; + u64 rand_z[ndigits]; + unsigned int nbytes; + const struct ecc_curve *curve = ecc_get_curve(curve_id); + + if (!private_key || !public_key || !curve) { + ret = -EINVAL; + goto out; + } + + nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + + get_random_bytes(rand_z, nbytes); + + pk = ecc_alloc_point(ndigits); + if (!pk) { + ret = -ENOMEM; + goto out; + } + + product = ecc_alloc_point(ndigits); + if (!product) { + ret = -ENOMEM; + goto err_alloc_product; + } + + ecc_swap_digits((const u64 *)public_key, pk->x, ndigits); + ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits); + ecc_swap_digits((const u64 *)private_key, priv, ndigits); + + ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); + + ecc_swap_digits(product->x, (u64 *)secret, ndigits); + + if (ecc_point_is_zero(product)) + ret = -EFAULT; + + ecc_free_point(product); +err_alloc_product: + ecc_free_point(pk); +out: + return ret; +} diff --git a/crypto/ecc.h b/crypto/ecc.h new file mode 100644 index 000000000000..b5db4b989f3c --- /dev/null +++ b/crypto/ecc.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2013, Kenneth MacKay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _CRYPTO_ECC_H +#define _CRYPTO_ECC_H + +#define ECC_MAX_DIGITS 4 /* 256 */ + +#define ECC_DIGITS_TO_BYTES_SHIFT 3 + +/** + * ecc_is_key_valid() - Validate a given ECDH private key + * + * @curve_id: id representing the curve to use + * @ndigits: curve number of digits + * @private_key: private key to be used for the given curve + * @private_key_len: private key len + * + * Returns 0 if the key is acceptable, a negative value otherwise + */ +int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len); + +/** + * ecdh_make_pub_key() - Compute an ECC public key + * + * @curve_id: id representing the curve to use + * @private_key: pregenerated private key for the given curve + * @private_key_len: length of private_key + * @public_key: buffer for storing the public key generated + * @public_key_len: length of the public_key buffer + * + * Returns 0 if the public key was generated successfully, a negative value + * if an error occurred. + */ +int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len, + u8 *public_key, unsigned int public_key_len); + +/** + * ecdh_shared_secret() - Compute a shared secret + * + * @curve_id: id representing the curve to use + * @private_key: private key of part A + * @private_key_len: length of private_key + * @public_key: public key of counterpart B + * @public_key_len: length of public_key + * @secret: buffer for storing the calculated shared secret + * @secret_len: length of the secret buffer + * + * Note: It is recommended that you hash the result of ecdh_shared_secret + * before using it for symmetric encryption or HMAC. + * + * Returns 0 if the shared secret was generated successfully, a negative value + * if an error occurred. + */ +int ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + const u8 *private_key, unsigned int private_key_len, + const u8 *public_key, unsigned int public_key_len, + u8 *secret, unsigned int secret_len); +#endif diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h new file mode 100644 index 000000000000..03ae5f714028 --- /dev/null +++ b/crypto/ecc_curve_defs.h @@ -0,0 +1,57 @@ +#ifndef _CRYTO_ECC_CURVE_DEFS_H +#define _CRYTO_ECC_CURVE_DEFS_H + +struct ecc_point { + u64 *x; + u64 *y; + u8 ndigits; +}; + +struct ecc_curve { + char *name; + struct ecc_point g; + u64 *p; + u64 *n; +}; + +/* NIST P-192 */ +static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, + 0x188DA80EB03090F6ull }; +static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull, + 0x07192B95FFC8DA78ull }; +static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull, + 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull, + 0xFFFFFFFFFFFFFFFFull }; +static struct ecc_curve nist_p192 = { + .name = "nist_192", + .g = { + .x = nist_p192_g_x, + .y = nist_p192_g_y, + .ndigits = 3, + }, + .p = nist_p192_p, + .n = nist_p192_n +}; + +/* NIST P-256 */ +static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, + 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }; +static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, + 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull }; +static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, + 0x0000000000000000ull, 0xFFFFFFFF00000001ull }; +static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull }; +static struct ecc_curve nist_p256 = { + .name = "nist_256", + .g = { + .x = nist_p256_g_x, + .y = nist_p256_g_y, + .ndigits = 4, + }, + .p = nist_p256_p, + .n = nist_p256_n +}; + +#endif diff --git a/crypto/ecdh.c b/crypto/ecdh.c new file mode 100644 index 000000000000..d3a9eeca4b32 --- /dev/null +++ b/crypto/ecdh.c @@ -0,0 +1,151 @@ +/* ECDH key-agreement protocol + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvator Benedetto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "ecc.h" + +struct ecdh_ctx { + unsigned int curve_id; + unsigned int ndigits; + u64 private_key[ECC_MAX_DIGITS]; + u64 public_key[2 * ECC_MAX_DIGITS]; + u64 shared_secret[ECC_MAX_DIGITS]; +}; + +static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) +{ + return kpp_tfm_ctx(tfm); +} + +static unsigned int ecdh_supported_curve(unsigned int curve_id) +{ + switch (curve_id) { + case ECC_CURVE_NIST_P192: return 3; + case ECC_CURVE_NIST_P256: return 4; + default: return 0; + } +} + +static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + struct ecdh params; + unsigned int ndigits; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) + return -EINVAL; + + ndigits = ecdh_supported_curve(params.curve_id); + if (!ndigits) + return -EINVAL; + + ctx->curve_id = params.curve_id; + ctx->ndigits = ndigits; + + if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, + (const u8 *)params.key, params.key_size) < 0) + return -EINVAL; + + memcpy(ctx->private_key, params.key, params.key_size); + + return 0; +} + +static int ecdh_compute_value(struct kpp_request *req) +{ + int ret = 0; + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + size_t copied, nbytes; + void *buf; + + nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + + if (req->src) { + copied = sg_copy_to_buffer(req->src, 1, ctx->public_key, + 2 * nbytes); + if (copied != 2 * nbytes) + return -EINVAL; + + ret = ecdh_shared_secret(ctx->curve_id, ctx->ndigits, + (const u8 *)ctx->private_key, nbytes, + (const u8 *)ctx->public_key, 2 * nbytes, + (u8 *)ctx->shared_secret, nbytes); + + buf = ctx->shared_secret; + } else { + ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits, + (const u8 *)ctx->private_key, nbytes, + (u8 *)ctx->public_key, + sizeof(ctx->public_key)); + buf = ctx->public_key; + /* Public part is a point thus it has both coordinates */ + nbytes *= 2; + } + + if (ret < 0) + return ret; + + copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes); + if (copied != nbytes) + return -EINVAL; + + return ret; +} + +static int ecdh_max_size(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + + /* Public key is made of two coordinates */ + return 2 * nbytes; +} + +static void no_exit_tfm(struct crypto_kpp *tfm) +{ + return; +} + +static struct kpp_alg ecdh = { + .set_secret = ecdh_set_secret, + .generate_public_key = ecdh_compute_value, + .compute_shared_secret = ecdh_compute_value, + .max_size = ecdh_max_size, + .exit = no_exit_tfm, + .base = { + .cra_name = "ecdh", + .cra_driver_name = "ecdh-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecdh_ctx), + }, +}; + +static int ecdh_init(void) +{ + return crypto_register_kpp(&ecdh); +} + +static void ecdh_exit(void) +{ + crypto_unregister_kpp(&ecdh); +} + +module_init(ecdh_init); +module_exit(ecdh_exit); +MODULE_ALIAS_CRYPTO("ecdh"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ECDH generic algorithm"); diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c new file mode 100644 index 000000000000..3cd8a2414e60 --- /dev/null +++ b/crypto/ecdh_helper.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include + +#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short)) + +static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz) +{ + memcpy(dst, src, sz); + return dst + sz; +} + +static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz) +{ + memcpy(dst, src, sz); + return src + sz; +} + +int crypto_ecdh_key_len(const struct ecdh *params) +{ + return ECDH_KPP_SECRET_MIN_SIZE + params->key_size; +} +EXPORT_SYMBOL_GPL(crypto_ecdh_key_len); + +int crypto_ecdh_encode_key(char *buf, unsigned int len, + const struct ecdh *params) +{ + u8 *ptr = buf; + struct kpp_secret secret = { + .type = CRYPTO_KPP_SECRET_TYPE_ECDH, + .len = len + }; + + if (unlikely(!buf)) + return -EINVAL; + + if (len != crypto_ecdh_key_len(params)) + return -EINVAL; + + ptr = ecdh_pack_data(ptr, &secret, sizeof(secret)); + ptr = ecdh_pack_data(ptr, ¶ms->curve_id, sizeof(params->curve_id)); + ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); + ecdh_pack_data(ptr, params->key, params->key_size); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key); + +int crypto_ecdh_decode_key(const char *buf, unsigned int len, + struct ecdh *params) +{ + const u8 *ptr = buf; + struct kpp_secret secret; + + if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE)) + return -EINVAL; + + ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret)); + if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) + return -EINVAL; + + ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); + ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); + if (secret.len != crypto_ecdh_key_len(params)) + return -EINVAL; + + /* Don't allocate memory. Set pointer to data + * within the given buffer + */ + params->key = (void *)ptr; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ff79eb887fd0..537fdc380a7b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3300,6 +3300,16 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "ecdh", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = { + .vecs = ecdh_tv_template, + .count = ECDH_TEST_VECTORS + } + } }, { .alg = "gcm(aes)", .test = alg_test_aead, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 78e874eca031..7358931b3082 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -560,6 +560,99 @@ struct kpp_testvec dh_tv_template[] = { } }; +#ifdef CONFIG_CRYPTO_FIPS +#define ECDH_TEST_VECTORS 1 +#else +#define ECDH_TEST_VECTORS 2 +#endif +struct kpp_testvec ecdh_tv_template[] = { + { +#ifndef CONFIG_CRYPTO_FIPS + .secret = +#ifdef __LITTLE_ENDIAN + "\x02\x00" /* type */ + "\x20\x00" /* len */ + "\x01\x00" /* curve_id */ + "\x18\x00" /* key_size */ +#else + "\x00\x02" /* type */ + "\x00\x20" /* len */ + "\x00\x01" /* curve_id */ + "\x00\x18" /* key_size */ +#endif + "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda" + "\x4e\x19\x1e\x62\x1f\x23\x23\x31" + "\x36\x1e\xd3\x84\x2f\xcc\x21\x72", + .b_public = + "\xc3\xba\x67\x4b\x71\xec\xd0\x76" + "\x7a\x99\x75\x64\x36\x13\x9a\x94" + "\x5d\x8b\xdc\x60\x90\x91\xfd\x3f" + "\xb0\x1f\x8a\x0a\x68\xc6\x88\x6e" + "\x83\x87\xdd\x67\x09\xf8\x8d\x96" + "\x07\xd6\xbd\x1c\xe6\x8d\x9d\x67", + .expected_a_public = + "\x1a\x04\xdb\xa5\xe1\xdd\x4e\x79" + "\xa3\xe6\xef\x0e\x5c\x80\x49\x85" + "\xfa\x78\xb4\xef\x49\xbd\x4c\x7c" + "\x22\x90\x21\x02\xf9\x1b\x81\x5d" + "\x0c\x8a\xa8\x98\xd6\x27\x69\x88" + "\x5e\xbc\x94\xd8\x15\x9e\x21\xce", + .expected_ss = + "\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc" + "\xe3\x40\x60\xc8\x06\x93\xc6\x2e" + "\x99\x80\x81\x28\xaf\xc5\x51\x74", + .secret_size = 32, + .b_public_size = 48, + .expected_a_public_size = 48, + .expected_ss_size = 24 + }, { +#endif + .secret = +#ifdef __LITTLE_ENDIAN + "\x02\x00" /* type */ + "\x28\x00" /* len */ + "\x02\x00" /* curve_id */ + "\x20\x00" /* key_size */ +#else + "\x00\x02" /* type */ + "\x00\x28" /* len */ + "\x00\x02" /* curve_id */ + "\x00\x20" /* key_size */ +#endif + "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" + "\xf6\x62\x1b\x6e\x43\x84\x3a\xa3" + "\x8b\xe0\x86\xc3\x20\x19\xda\x92" + "\x50\x53\x03\xe1\xc0\xea\xb8\x82", + .expected_a_public = + "\x1a\x7f\xeb\x52\x00\xbd\x3c\x31" + "\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4" + "\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5" + "\xb6\x63\x82\x77\x33\x24\xa1\x5f" + "\x6a\xca\x43\x6f\xf7\x7e\xff\x02" + "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a" + "\x6a\x02\x6e\x41\x87\x68\x38\x77" + "\xfa\xa9\x44\x43\x2d\xef\x09\xdf", + .expected_ss = + "\xea\x17\x6f\x7e\x6e\x57\x26\x38" + "\x8b\xfb\x41\xeb\xba\xc8\x6d\xa5" + "\xa8\x72\xd1\xff\xc9\x47\x3d\xaa" + "\x58\x43\x9f\x34\x0f\x8c\xf3\xc9", + .b_public = + "\xcc\xb4\xda\x74\xb1\x47\x3f\xea" + "\x6c\x70\x9e\x38\x2d\xc7\xaa\xb7" + "\x29\xb2\x47\x03\x19\xab\xdd\x34" + "\xbd\xa8\x2c\x93\xe1\xa4\x74\xd9" + "\x64\x63\xf7\x70\x20\x2f\xa4\xe6" + "\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f" + "\xb1\x32\xbb\xaf\x22\x61\xda\xcb" + "\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3", + .secret_size = 40, + .b_public_size = 64, + .expected_a_public_size = 64, + .expected_ss_size = 32 + } +}; + /* * MD4 test vectors from RFC1320 */ diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000000..84bad548d194 --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,30 @@ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 + +struct ecdh { + unsigned short curve_id; + char *key; + unsigned short key_size; +}; + +int crypto_ecdh_key_len(const struct ecdh *params); +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 937ac122354a..30791f75c180 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -243,6 +243,7 @@ static inline void kpp_request_set_output(struct kpp_request *req, enum { CRYPTO_KPP_SECRET_TYPE_UNKNOWN, CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, }; /** -- cgit v1.2.3-70-g09d2 From b578456c342ecd4266dac96c87ca803602ea9c48 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Wed, 22 Jun 2016 19:26:06 +0200 Subject: crypto: jitterentropy - use ktime_get_ns as fallback As part of the Y2038 development, __getnstimeofday is not supposed to be used any more. It is now replaced with ktime_get_ns. The Jitter RNG uses the time stamp to measure the execution time of a given code path and tries to detect variations in the execution time. Therefore, the only requirement the Jitter RNG has, is a sufficient high resolution to detect these variations. The change was tested on x86 to show an identical behavior as RDTSC. The used test code simply measures the execution time of the heart of the RNG: jent_get_nstime(&time); jent_memaccess(ec, min); jent_fold_time(NULL, time, &folded, min); jent_get_nstime(&time2); return ((time2 - time)); Signed-off-by: Stephan Mueller Acked-by: Arnd Bergmann Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 597cedd3531c..c4938497eedb 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -87,24 +87,28 @@ void jent_memcpy(void *dest, const void *src, unsigned int n) memcpy(dest, src, n); } +/* + * Obtain a high-resolution time stamp value. The time stamp is used to measure + * the execution time of a given code path and its variations. Hence, the time + * stamp must have a sufficiently high resolution. + * + * Note, if the function returns zero because a given architecture does not + * implement a high-resolution time stamp, the RNG code's runtime test + * will detect it and will not produce output. + */ void jent_get_nstime(__u64 *out) { - struct timespec ts; __u64 tmp = 0; tmp = random_get_entropy(); /* - * If random_get_entropy does not return a value (which is possible on, - * for example, MIPS), invoke __getnstimeofday + * If random_get_entropy does not return a value, i.e. it is not + * implemented for a given architecture, use a clock source. * hoping that there are timers we can work with. */ - if ((0 == tmp) && - (0 == __getnstimeofday(&ts))) { - tmp = ts.tv_sec; - tmp = tmp << 32; - tmp = tmp | ts.tv_nsec; - } + if (tmp == 0) + tmp = ktime_get_ns(); *out = tmp; } -- cgit v1.2.3-70-g09d2 From 8f44df154de79a61b0e86734f51737b8cccf8dfe Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 24 Jun 2016 16:20:22 +1000 Subject: crypto: ecdh - make ecdh_shared_secret unique There is another ecdh_shared_secret in net/bluetooth/ecc.c Fixes: 3c4b23901a0c ("crypto: ecdh - Add ECDH software support") Signed-off-by: Stephen Rothwell Signed-off-by: Herbert Xu --- crypto/ecc.c | 2 +- crypto/ecc.h | 6 +++--- crypto/ecdh.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ecc.c b/crypto/ecc.c index 9aedec6bbe72..414c78a9c214 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -966,7 +966,7 @@ out: return ret; } -int ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, +int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u8 *private_key, unsigned int private_key_len, const u8 *public_key, unsigned int public_key_len, u8 *secret, unsigned int secret_len) diff --git a/crypto/ecc.h b/crypto/ecc.h index b5db4b989f3c..663d598c7406 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -60,7 +60,7 @@ int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits, u8 *public_key, unsigned int public_key_len); /** - * ecdh_shared_secret() - Compute a shared secret + * crypto_ecdh_shared_secret() - Compute a shared secret * * @curve_id: id representing the curve to use * @private_key: private key of part A @@ -70,13 +70,13 @@ int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits, * @secret: buffer for storing the calculated shared secret * @secret_len: length of the secret buffer * - * Note: It is recommended that you hash the result of ecdh_shared_secret + * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret * before using it for symmetric encryption or HMAC. * * Returns 0 if the shared secret was generated successfully, a negative value * if an error occurred. */ -int ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, +int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u8 *private_key, unsigned int private_key_len, const u8 *public_key, unsigned int public_key_len, u8 *secret, unsigned int secret_len); diff --git a/crypto/ecdh.c b/crypto/ecdh.c index d3a9eeca4b32..3de289806d67 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -79,7 +79,7 @@ static int ecdh_compute_value(struct kpp_request *req) if (copied != 2 * nbytes) return -EINVAL; - ret = ecdh_shared_secret(ctx->curve_id, ctx->ndigits, + ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, (const u8 *)ctx->private_key, nbytes, (const u8 *)ctx->public_key, 2 * nbytes, (u8 *)ctx->shared_secret, nbytes); -- cgit v1.2.3-70-g09d2 From 9be7e24483998fa6a34c2b191c4798b8189f8f9e Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Thu, 23 Jun 2016 18:40:43 -0700 Subject: crypto: sha256-mb - Enable multibuffer support Add the config CRYPTO_SHA256_MB which will enable the computation using the SHA256 multi-buffer algorithm. Signed-off-by: Megha Dey Reviewed-by: Fenghua Yu Reviewed-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Kconfig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 5baaa9d87574..d8cc0f085278 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -704,6 +704,22 @@ config CRYPTO_SHA1_MB lanes remain unfilled, a flush operation will be initiated to process the crypto jobs, adding a slight latency. +config CRYPTO_SHA256_MB + tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)" + depends on X86 && 64BIT + select CRYPTO_SHA256 + select CRYPTO_HASH + select CRYPTO_MCRYPTD + help + SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented + using multi-buffer technique. This algorithm computes on + multiple data lanes concurrently with SIMD instructions for + better throughput. It should not be enabled by default but + used when there is significant amount of work to keep the keep + the data lanes filled to get performance benefit. If the data + lanes remain unfilled, a flush operation will be initiated to + process the crypto jobs, adding a slight latency. + config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH -- cgit v1.2.3-70-g09d2 From 087bcd225c5656a0beac02739471085d000c9680 Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Thu, 23 Jun 2016 18:40:47 -0700 Subject: crypto: tcrypt - Add speed tests for SHA multibuffer algorithms The existing test suite to calculate the speed of the SHA algorithms assumes serial (single buffer)) computation of data. With the SHA multibuffer algorithms, we work on 8 lanes of data in parallel. Hence, the need to introduce a new test suite to calculate the speed for these algorithms. Signed-off-by: Megha Dey Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 4675459e82da..6ef78157a0ab 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -578,6 +578,117 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return ret; } +char ptext[4096]; +struct scatterlist sg[8][8]; +char result[8][64]; +struct ahash_request *req[8]; +struct tcrypt_result tresult[8]; +char *xbuf[8][XBUFSIZE]; +cycles_t start[8], end[8], mid; + +static void test_mb_ahash_speed(const char *algo, unsigned int sec, + struct hash_speed *speed) +{ + unsigned int i, j, k; + void *hash_buff; + int ret = -ENOMEM; + struct crypto_ahash *tfm; + + tfm = crypto_alloc_ahash(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + return; + } + for (i = 0; i < 8; ++i) { + if (testmgr_alloc_buf(xbuf[i])) + goto out_nobuf; + + init_completion(&tresult[i].completion); + + req[i] = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req[i]) { + printk(KERN_ERR "alg: hash: Failed to allocate " + "request for %s\n", algo); + goto out_noreq; + } + ahash_request_set_callback(req[i], CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &tresult[i]); + + hash_buff = xbuf[i][0]; + memcpy(hash_buff, ptext, 4096); + } + + j = 0; + + printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); + + for (i = 0; speed[i].blen != 0; i++) { + if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { + printk(KERN_ERR + "template (%u) too big for tvmem (%lu)\n", + speed[i].blen, TVMEMSIZE * PAGE_SIZE); + goto out; + } + + if (speed[i].klen) + crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); + + for (k = 0; k < 8; ++k) { + sg_init_one(&sg[k][0], (void *) xbuf[k][0], + speed[i].blen); + ahash_request_set_crypt(req[k], sg[k], + result[k], speed[i].blen); + } + + printk(KERN_INFO "test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, + speed[i].blen / speed[i].plen); + + for (k = 0; k < 8; ++k) { + start[k] = get_cycles(); + ret = crypto_ahash_digest(req[k]); + if (ret == -EBUSY || ret == -EINPROGRESS) + continue; + if (ret) { + printk(KERN_ERR + "alg (%s) something wrong, ret = %d ...\n", + algo, ret); + goto out; + } + } + mid = get_cycles(); + + for (k = 0; k < 8; ++k) { + struct tcrypt_result *tr = &tresult[k]; + + ret = wait_for_completion_interruptible + (&tr->completion); + if (ret) + printk(KERN_ERR + "alg(%s): hash: digest failed\n", algo); + end[k] = get_cycles(); + } + + printk("\nBlock: %lld cycles (%lld cycles/byte), %d bytes\n", + (s64) (end[7]-start[0])/1, + (s64) (end[7]-start[0])/(8*speed[i].blen), + 8*speed[i].blen); + } + ret = 0; + +out: + for (k = 0; k < 8; ++k) + ahash_request_free(req[k]); +out_noreq: + for (k = 0; k < 8; ++k) + testmgr_free_buf(xbuf[k]); +out_nobuf: + return; +} + static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, char *out, int secs) { @@ -1820,6 +1931,13 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_ahash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; + case 422: + test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 423: + test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; case 499: break; -- cgit v1.2.3-70-g09d2 From 026bb8aaf5162b881fdd56f12fa8a6f5a052e097 Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Mon, 27 Jun 2016 10:20:05 -0700 Subject: crypto: sha512-mb - Enable SHA512 multibuffer support Add the config CRYPTO_SHA512_MB which will enable the computation using the SHA512 multi-buffer algorithm. Signed-off-by: Megha Dey Reviewed-by: Fenghua Yu Reviewed-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Kconfig | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index d8cc0f085278..62fcbb923753 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -720,6 +720,22 @@ config CRYPTO_SHA256_MB lanes remain unfilled, a flush operation will be initiated to process the crypto jobs, adding a slight latency. +config CRYPTO_SHA512_MB + tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)" + depends on X86 && 64BIT + select CRYPTO_SHA512 + select CRYPTO_HASH + select CRYPTO_MCRYPTD + help + SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented + using multi-buffer technique. This algorithm computes on + multiple data lanes concurrently with SIMD instructions for + better throughput. It should not be enabled by default but + used when there is significant amount of work to keep the keep + the data lanes filled to get performance benefit. If the data + lanes remain unfilled, a flush operation will be initiated to + process the crypto jobs, adding a slight latency. + config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH -- cgit v1.2.3-70-g09d2 From 14009c4bde13d191206d58d0513a9179568a0125 Mon Sep 17 00:00:00 2001 From: Megha Dey Date: Mon, 27 Jun 2016 10:20:09 -0700 Subject: crypto: tcrypt - Add new mode for sha512_mb Add a new mode to calculate the speed of the sha512_mb algorithm Signed-off-by: Megha Dey Reviewed-by: Fenghua Yu Reviewed-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 6ef78157a0ab..158c164cbddf 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1939,6 +1939,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; + case 424: + test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + case 499: break; -- cgit v1.2.3-70-g09d2 From f83f5b12ee674c187f329ef8eb9352dc45dcb25f Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 28 Jun 2016 09:23:06 +0200 Subject: crypto: tcrypt - Fix mixing printk/pr_err and obvious indentation issues The recently added test_mb_ahash_speed() has clearly serious coding style issues. Try to fix some of them: 1. Don't mix pr_err() and printk(); 2. Don't wrap strings; 3. Properly align goto statement in if() block; 4. Align wrapped arguments on new line; 5. Don't wrap functions on first argument; Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 158c164cbddf..8893ba5321b5 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -608,12 +608,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, req[i] = ahash_request_alloc(tfm, GFP_KERNEL); if (!req[i]) { - printk(KERN_ERR "alg: hash: Failed to allocate " - "request for %s\n", algo); + pr_err("alg: hash: Failed to allocate request for %s\n", + algo); goto out_noreq; } ahash_request_set_callback(req[i], CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult[i]); + tcrypt_complete, &tresult[i]); hash_buff = xbuf[i][0]; memcpy(hash_buff, ptext, 4096); @@ -621,15 +621,14 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, j = 0; - printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); + pr_err("\ntesting speed of %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); for (i = 0; speed[i].blen != 0; i++) { if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { - printk(KERN_ERR - "template (%u) too big for tvmem (%lu)\n", - speed[i].blen, TVMEMSIZE * PAGE_SIZE); - goto out; + pr_err("template (%u) too big for tvmem (%lu)\n", + speed[i].blen, TVMEMSIZE * PAGE_SIZE); + goto out; } if (speed[i].klen) @@ -637,13 +636,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, for (k = 0; k < 8; ++k) { sg_init_one(&sg[k][0], (void *) xbuf[k][0], - speed[i].blen); + speed[i].blen); ahash_request_set_crypt(req[k], sg[k], result[k], speed[i].blen); } - printk(KERN_INFO "test%3u " - "(%5u byte blocks,%5u bytes per update,%4u updates): ", + pr_err("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); @@ -653,9 +651,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (ret == -EBUSY || ret == -EINPROGRESS) continue; if (ret) { - printk(KERN_ERR - "alg (%s) something wrong, ret = %d ...\n", - algo, ret); + pr_err("alg (%s) something wrong, ret = %d ...\n", + algo, ret); goto out; } } @@ -664,11 +661,9 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, for (k = 0; k < 8; ++k) { struct tcrypt_result *tr = &tresult[k]; - ret = wait_for_completion_interruptible - (&tr->completion); + ret = wait_for_completion_interruptible(&tr->completion); if (ret) - printk(KERN_ERR - "alg(%s): hash: digest failed\n", algo); + pr_err("alg(%s): hash: digest failed\n", algo); end[k] = get_cycles(); } -- cgit v1.2.3-70-g09d2 From f8de55b6252b30f9bc1dcb62964f7b49e00983fa Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 28 Jun 2016 16:41:38 +0800 Subject: crypto: tcrypt - Use unsigned long for mb ahash cycle counter For the timescales we are working against there is no need to go beyond unsigned long. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8893ba5321b5..59b732722678 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -584,7 +584,7 @@ char result[8][64]; struct ahash_request *req[8]; struct tcrypt_result tresult[8]; char *xbuf[8][XBUFSIZE]; -cycles_t start[8], end[8], mid; +unsigned long start[8], end[8], mid; static void test_mb_ahash_speed(const char *algo, unsigned int sec, struct hash_speed *speed) @@ -593,6 +593,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, void *hash_buff; int ret = -ENOMEM; struct crypto_ahash *tfm; + unsigned long cycles; tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { @@ -667,10 +668,9 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, end[k] = get_cycles(); } - printk("\nBlock: %lld cycles (%lld cycles/byte), %d bytes\n", - (s64) (end[7]-start[0])/1, - (s64) (end[7]-start[0])/(8*speed[i].blen), - 8*speed[i].blen); + cycles = end[7] - start[0]; + printk("\nBlock: %6lu cycles (%4lu cycles/byte)\n", + cycles, cycles / (8 * speed[i].blen)); } ret = 0; -- cgit v1.2.3-70-g09d2 From 72259deb3a9f2c07d18d71d7c9356754e7d88369 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 28 Jun 2016 20:33:52 +0800 Subject: crypto: tcrypt - Fix memory leaks/crashes in multibuffer hash speed test This patch resolves a number of issues with the mb speed test function: * The tfm is never freed. * Memory is allocated even when we're not using mb. * When an error occurs we don't wait for completion for other requests. * When an error occurs during allocation we may leak memory. * The test function ignores plen but still runs for plen != blen. * The backlog flag is incorrectly used (may crash). This patch tries to resolve all these issues as well as making the code consistent with the existing hash speed testing function. Signed-off-by: Herbert Xu Tested-by: Krzysztof Kozlowski --- crypto/tcrypt.c | 129 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 71 insertions(+), 58 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 59b732722678..8a91dc34610e 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -578,54 +578,61 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return ret; } -char ptext[4096]; -struct scatterlist sg[8][8]; -char result[8][64]; -struct ahash_request *req[8]; -struct tcrypt_result tresult[8]; -char *xbuf[8][XBUFSIZE]; -unsigned long start[8], end[8], mid; +struct test_mb_ahash_data { + struct scatterlist sg[TVMEMSIZE]; + char result[64]; + struct ahash_request *req; + struct tcrypt_result tresult; + char *xbuf[XBUFSIZE]; +}; static void test_mb_ahash_speed(const char *algo, unsigned int sec, - struct hash_speed *speed) + struct hash_speed *speed) { - unsigned int i, j, k; - void *hash_buff; - int ret = -ENOMEM; + struct test_mb_ahash_data *data; struct crypto_ahash *tfm; + unsigned long start, end; unsigned long cycles; + unsigned int i, j, k; + int ret; + + data = kzalloc(sizeof(*data) * 8, GFP_KERNEL); + if (!data) + return; tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { pr_err("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); - return; + goto free_data; } + for (i = 0; i < 8; ++i) { - if (testmgr_alloc_buf(xbuf[i])) - goto out_nobuf; + if (testmgr_alloc_buf(data[i].xbuf)) + goto out; - init_completion(&tresult[i].completion); + init_completion(&data[i].tresult.completion); - req[i] = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req[i]) { + data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!data[i].req) { pr_err("alg: hash: Failed to allocate request for %s\n", algo); - goto out_noreq; + goto out; } - ahash_request_set_callback(req[i], CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult[i]); - hash_buff = xbuf[i][0]; - memcpy(hash_buff, ptext, 4096); + ahash_request_set_callback(data[i].req, 0, + tcrypt_complete, &data[i].tresult); + test_hash_sg_init(data[i].sg); } - j = 0; - - pr_err("\ntesting speed of %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); + pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); for (i = 0; speed[i].blen != 0; i++) { + /* For some reason this only tests digests. */ + if (speed[i].blen != speed[i].plen) + continue; + if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { pr_err("template (%u) too big for tvmem (%lu)\n", speed[i].blen, TVMEMSIZE * PAGE_SIZE); @@ -635,53 +642,59 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (speed[i].klen) crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); - for (k = 0; k < 8; ++k) { - sg_init_one(&sg[k][0], (void *) xbuf[k][0], - speed[i].blen); - ahash_request_set_crypt(req[k], sg[k], - result[k], speed[i].blen); - } + for (k = 0; k < 8; k++) + ahash_request_set_crypt(data[k].req, data[k].sg, + data[k].result, speed[i].blen); - pr_err("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", + pr_info("test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); - for (k = 0; k < 8; ++k) { - start[k] = get_cycles(); - ret = crypto_ahash_digest(req[k]); - if (ret == -EBUSY || ret == -EINPROGRESS) + start = get_cycles(); + + for (k = 0; k < 8; k++) { + ret = crypto_ahash_digest(data[k].req); + if (ret == -EINPROGRESS) continue; - if (ret) { - pr_err("alg (%s) something wrong, ret = %d ...\n", - algo, ret); - goto out; - } + + if (ret) + break; + + complete(&data[k].tresult.completion); + data[k].tresult.err = 0; } - mid = get_cycles(); - for (k = 0; k < 8; ++k) { - struct tcrypt_result *tr = &tresult[k]; + for (j = 0; j < k; j++) { + struct tcrypt_result *tr = &data[j].tresult; - ret = wait_for_completion_interruptible(&tr->completion); - if (ret) - pr_err("alg(%s): hash: digest failed\n", algo); - end[k] = get_cycles(); + wait_for_completion(&tr->completion); + if (tr->err) + ret = tr->err; } - cycles = end[7] - start[0]; - printk("\nBlock: %6lu cycles (%4lu cycles/byte)\n", - cycles, cycles / (8 * speed[i].blen)); + end = get_cycles(); + cycles = end - start; + pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", + cycles, cycles / (8 * speed[i].blen)); + + if (ret) { + pr_err("At least one hashing failed ret=%d\n", ret); + break; + } } - ret = 0; out: for (k = 0; k < 8; ++k) - ahash_request_free(req[k]); -out_noreq: + ahash_request_free(data[k].req); + for (k = 0; k < 8; ++k) - testmgr_free_buf(xbuf[k]); -out_nobuf: - return; + testmgr_free_buf(data[k].xbuf); + + crypto_free_ahash(tfm); + +free_data: + kfree(data); } static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, -- cgit v1.2.3-70-g09d2 From c34252fd712c10da3a0b018b3b5dbe552ac1e7b9 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Wed, 29 Jun 2016 00:24:43 +0530 Subject: crypto: authenc - Remove redundant sg_init_table call. Remove redundant sg_init_table call. scatterwalk_ffwd doing the same. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- crypto/authenc.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 55a354d57251..c7cc11d36480 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -206,7 +206,6 @@ static int crypto_authenc_encrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; - sg_init_table(areq_ctx->src, 2); src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); dst = src; @@ -215,7 +214,6 @@ static int crypto_authenc_encrypt(struct aead_request *req) if (err) return err; - sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); } @@ -251,14 +249,11 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, if (crypto_memneq(ihash, ahreq->result, authsize)) return -EBADMSG; - sg_init_table(areq_ctx->src, 2); src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); dst = src; - if (req->src != req->dst) { - sg_init_table(areq_ctx->dst, 2); + if (req->src != req->dst) dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); - } ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), -- cgit v1.2.3-70-g09d2 From 927ef32dccfe6e048a163138b8e714b2d944194d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 18:03:46 +0800 Subject: crypto: authenc - Consider ahash ASYNC bit As it is, if you get an async ahash with a sync skcipher you'll end up with a sync authenc, which is wrong. This patch fixes it by considering the ASYNC bit from ahash as well. It also fixes a little bug where if a sync version of authenc is requested we may still end up using an async ahash. Neither of them should have any effect as none of the authenc users can request for a sync authenc. Signed-off-by: Herbert Xu --- crypto/authenc.c | 6 ++++-- crypto/authencesn.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index c7cc11d36480..309fbc17222d 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -392,7 +392,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl, return -EINVAL; auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, algt->mask)); if (IS_ERR(auth)) return PTR_ERR(auth); @@ -438,7 +439,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; - inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags = (auth_base->cra_flags | enc->cra_flags) & + CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0c0468869e25..0662b1848c5d 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -413,7 +413,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, return -EINVAL; auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, algt->mask)); if (IS_ERR(auth)) return PTR_ERR(auth); @@ -456,7 +457,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; - inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags = (auth_base->cra_flags | enc->cra_flags) & + CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->cra_blocksize; -- cgit v1.2.3-70-g09d2 From 2495cf25f60e085b35beb9b215235dfe1ca4f200 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 18:03:47 +0800 Subject: crypto: ahash - Add padding in crypto_ahash_extsize The function crypto_ahash_extsize did not include padding when computing the tfm context size. This patch fixes this by using the generic crypto_alg_extsize helper. Signed-off-by: Herbert Xu --- crypto/ahash.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 3887a98abcc3..2ce8bcb9049c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -461,10 +461,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { - if (alg->cra_type == &crypto_ahash_type) - return alg->cra_ctxsize; + if (alg->cra_type != &crypto_ahash_type) + return sizeof(struct crypto_shash *); - return sizeof(struct crypto_shash *); + return crypto_alg_extsize(alg); } #ifdef CONFIG_NET -- cgit v1.2.3-70-g09d2 From 7166e589da5b681e1f8ff2d1c491d7d71e9f7671 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 18:03:50 +0800 Subject: crypto: tcrypt - Use skcipher This patch converts tcrypt to use the new skcipher interface as opposed to ablkcipher/blkcipher. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 241 +++++++++++--------------------------------------------- 1 file changed, 44 insertions(+), 197 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8a91dc34610e..68064fc9c8ee 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -92,76 +93,6 @@ static void tcrypt_complete(struct crypto_async_request *req, int err) complete(&res->completion); } -static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, - struct scatterlist *sg, int blen, int secs) -{ - unsigned long start, end; - int bcount; - int ret; - - for (start = jiffies, end = start + secs * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - if (enc) - ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); - else - ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); - - if (ret) - return ret; - } - - printk("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); - return 0; -} - -static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, - struct scatterlist *sg, int blen) -{ - unsigned long cycles = 0; - int ret = 0; - int i; - - local_irq_disable(); - - /* Warm-up run. */ - for (i = 0; i < 4; i++) { - if (enc) - ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); - else - ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); - - if (ret) - goto out; - } - - /* The real thing. */ - for (i = 0; i < 8; i++) { - cycles_t start, end; - - start = get_cycles(); - if (enc) - ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); - else - ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); - end = get_cycles(); - - if (ret) - goto out; - - cycles += end - start; - } - -out: - local_irq_enable(); - - if (ret == 0) - printk("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / 8, blen); - - return ret; -} - static inline int do_one_aead_op(struct aead_request *req, int ret) { if (ret == -EINPROGRESS || ret == -EBUSY) { @@ -455,106 +386,6 @@ out_noxbuf: return; } -static void test_cipher_speed(const char *algo, int enc, unsigned int secs, - struct cipher_speed_template *template, - unsigned int tcount, u8 *keysize) -{ - unsigned int ret, i, j, iv_len; - const char *key; - char iv[128]; - struct crypto_blkcipher *tfm; - struct blkcipher_desc desc; - const char *e; - u32 *b_size; - - if (enc == ENCRYPT) - e = "encryption"; - else - e = "decryption"; - - tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); - - if (IS_ERR(tfm)) { - printk("failed to load transform for %s: %ld\n", algo, - PTR_ERR(tfm)); - return; - } - desc.tfm = tfm; - desc.flags = 0; - - printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, - get_driver_name(crypto_blkcipher, tfm), e); - - i = 0; - do { - - b_size = block_sizes; - do { - struct scatterlist sg[TVMEMSIZE]; - - if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { - printk("template (%u) too big for " - "tvmem (%lu)\n", *keysize + *b_size, - TVMEMSIZE * PAGE_SIZE); - goto out; - } - - printk("test %u (%d bit key, %d byte blocks): ", i, - *keysize * 8, *b_size); - - memset(tvmem[0], 0xff, PAGE_SIZE); - - /* set key, plain text and IV */ - key = tvmem[0]; - for (j = 0; j < tcount; j++) { - if (template[j].klen == *keysize) { - key = template[j].key; - break; - } - } - - ret = crypto_blkcipher_setkey(tfm, key, *keysize); - if (ret) { - printk("setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(tfm)); - goto out; - } - - sg_init_table(sg, TVMEMSIZE); - sg_set_buf(sg, tvmem[0] + *keysize, - PAGE_SIZE - *keysize); - for (j = 1; j < TVMEMSIZE; j++) { - sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); - memset (tvmem[j], 0xff, PAGE_SIZE); - } - - iv_len = crypto_blkcipher_ivsize(tfm); - if (iv_len) { - memset(&iv, 0xff, iv_len); - crypto_blkcipher_set_iv(tfm, iv, iv_len); - } - - if (secs) - ret = test_cipher_jiffies(&desc, enc, sg, - *b_size, secs); - else - ret = test_cipher_cycles(&desc, enc, sg, - *b_size); - - if (ret) { - printk("%s() failed flags=%x\n", e, desc.flags); - break; - } - b_size++; - i++; - } while (*b_size); - keysize++; - } while (*keysize); - -out: - crypto_free_blkcipher(tfm); -} - static void test_hash_sg_init(struct scatterlist *sg) { int i; @@ -932,7 +763,7 @@ static void test_hash_speed(const char *algo, unsigned int secs, return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); } -static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) +static inline int do_one_acipher_op(struct skcipher_request *req, int ret) { if (ret == -EINPROGRESS || ret == -EBUSY) { struct tcrypt_result *tr = req->base.data; @@ -945,7 +776,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) return ret; } -static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, +static int test_acipher_jiffies(struct skcipher_request *req, int enc, int blen, int secs) { unsigned long start, end; @@ -956,10 +787,10 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, time_before(jiffies, end); bcount++) { if (enc) ret = do_one_acipher_op(req, - crypto_ablkcipher_encrypt(req)); + crypto_skcipher_encrypt(req)); else ret = do_one_acipher_op(req, - crypto_ablkcipher_decrypt(req)); + crypto_skcipher_decrypt(req)); if (ret) return ret; @@ -970,7 +801,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, return 0; } -static int test_acipher_cycles(struct ablkcipher_request *req, int enc, +static int test_acipher_cycles(struct skcipher_request *req, int enc, int blen) { unsigned long cycles = 0; @@ -981,10 +812,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc, for (i = 0; i < 4; i++) { if (enc) ret = do_one_acipher_op(req, - crypto_ablkcipher_encrypt(req)); + crypto_skcipher_encrypt(req)); else ret = do_one_acipher_op(req, - crypto_ablkcipher_decrypt(req)); + crypto_skcipher_decrypt(req)); if (ret) goto out; @@ -997,10 +828,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc, start = get_cycles(); if (enc) ret = do_one_acipher_op(req, - crypto_ablkcipher_encrypt(req)); + crypto_skcipher_encrypt(req)); else ret = do_one_acipher_op(req, - crypto_ablkcipher_decrypt(req)); + crypto_skcipher_decrypt(req)); end = get_cycles(); if (ret) @@ -1017,16 +848,16 @@ out: return ret; } -static void test_acipher_speed(const char *algo, int enc, unsigned int secs, - struct cipher_speed_template *template, - unsigned int tcount, u8 *keysize) +static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, + struct cipher_speed_template *template, + unsigned int tcount, u8 *keysize, bool async) { unsigned int ret, i, j, k, iv_len; struct tcrypt_result tresult; const char *key; char iv[128]; - struct ablkcipher_request *req; - struct crypto_ablkcipher *tfm; + struct skcipher_request *req; + struct crypto_skcipher *tfm; const char *e; u32 *b_size; @@ -1037,7 +868,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, init_completion(&tresult.completion); - tfm = crypto_alloc_ablkcipher(algo, 0, 0); + tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("failed to load transform for %s: %ld\n", algo, @@ -1046,17 +877,17 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, } pr_info("\ntesting speed of async %s (%s) %s\n", algo, - get_driver_name(crypto_ablkcipher, tfm), e); + get_driver_name(crypto_skcipher, tfm), e); - req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", algo); goto out; } - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tcrypt_complete, &tresult); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &tresult); i = 0; do { @@ -1086,12 +917,12 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, } } - crypto_ablkcipher_clear_flags(tfm, ~0); + crypto_skcipher_clear_flags(tfm, ~0); - ret = crypto_ablkcipher_setkey(tfm, key, *keysize); + ret = crypto_skcipher_setkey(tfm, key, *keysize); if (ret) { pr_err("setkey() failed flags=%x\n", - crypto_ablkcipher_get_flags(tfm)); + crypto_skcipher_get_flags(tfm)); goto out_free_req; } @@ -1115,11 +946,11 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, sg_set_buf(sg, tvmem[0] + *keysize, *b_size); } - iv_len = crypto_ablkcipher_ivsize(tfm); + iv_len = crypto_skcipher_ivsize(tfm); if (iv_len) memset(&iv, 0xff, iv_len); - ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); + skcipher_request_set_crypt(req, sg, sg, *b_size, iv); if (secs) ret = test_acipher_jiffies(req, enc, @@ -1130,7 +961,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, if (ret) { pr_err("%s() failed flags=%x\n", e, - crypto_ablkcipher_get_flags(tfm)); + crypto_skcipher_get_flags(tfm)); break; } b_size++; @@ -1140,9 +971,25 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs, } while (*keysize); out_free_req: - ablkcipher_request_free(req); + skcipher_request_free(req); out: - crypto_free_ablkcipher(tfm); + crypto_free_skcipher(tfm); +} + +static void test_acipher_speed(const char *algo, int enc, unsigned int secs, + struct cipher_speed_template *template, + unsigned int tcount, u8 *keysize) +{ + return test_skcipher_speed(algo, enc, secs, template, tcount, keysize, + true); +} + +static void test_cipher_speed(const char *algo, int enc, unsigned int secs, + struct cipher_speed_template *template, + unsigned int tcount, u8 *keysize) +{ + return test_skcipher_speed(algo, enc, secs, template, tcount, keysize, + false); } static void test_available(void) -- cgit v1.2.3-70-g09d2 From 32f27c745c26ff4b6351bce265cba049a2c74de5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 18:04:13 +0800 Subject: crypto: api - Add crypto_inst_setname This patch adds the helper crypto_inst_setname because the current helper crypto_alloc_instance2 is no longer useful given that we now look up the algorithm after we allocate the instance object. Signed-off-by: Herbert Xu --- crypto/algapi.c | 24 +++++++++++++++++------- include/crypto/algapi.h | 2 ++ 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 731255a6104f..df939b54b09f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -811,6 +811,21 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num) } EXPORT_SYMBOL_GPL(crypto_attr_u32); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg) +{ + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, + alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", + name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_inst_setname); + void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, unsigned int head) { @@ -825,13 +840,8 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, inst = (void *)(p + head); - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, - alg->cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + err = crypto_inst_setname(inst, name, alg); + if (err) goto err_free_inst; return p; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index eeafd21afb44..0483f652ac27 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -244,6 +244,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, } int crypto_attr_u32(struct rtattr *rta, u32 *num); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, unsigned int head); struct crypto_instance *crypto_alloc_instance(const char *name, -- cgit v1.2.3-70-g09d2 From 1503a24f53f153f8c88be9810bc73efaf6853e1c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 18:04:14 +0800 Subject: crypto: tcrypt - Add speed test for cts This patch adds speed tests for cts(cbc(aes)). Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 68064fc9c8ee..11aedae02382 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1389,6 +1389,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) speed_template_32_48_64); test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, speed_template_32_48_64); + test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, @@ -1818,6 +1822,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) speed_template_32_48_64); test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, speed_template_32_48_64); + test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, -- cgit v1.2.3-70-g09d2 From 50d2b643ea6675927435743633a57c2a9cfd8d83 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:20 +0800 Subject: crypto: testmgr - Allow leading zeros in RSA This patch allows RSA implementations to produce output with leading zeroes. testmgr will skip leading zeroes when comparing the output. This patch also tries to make the RSA test function generic enough to potentially handle other akcipher algorithms. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 537fdc380a7b..38e23be315a0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1911,8 +1911,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, return err; } -static int do_test_rsa(struct crypto_akcipher *tfm, - struct akcipher_testvec *vecs) +static int test_akcipher_one(struct crypto_akcipher *tfm, + struct akcipher_testvec *vecs) { char *xbuf[XBUFSIZE]; struct akcipher_request *req; @@ -1963,17 +1963,18 @@ static int do_test_rsa(struct crypto_akcipher *tfm, /* Run RSA encrypt - c = m^e mod n;*/ err = wait_async_op(&result, crypto_akcipher_encrypt(req)); if (err) { - pr_err("alg: rsa: encrypt test failed. err %d\n", err); + pr_err("alg: akcipher: encrypt test failed. err %d\n", err); goto free_all; } if (req->dst_len != vecs->c_size) { - pr_err("alg: rsa: encrypt test failed. Invalid output len\n"); + pr_err("alg: akcipher: encrypt test failed. Invalid output len\n"); err = -EINVAL; goto free_all; } /* verify that encrypted message is equal to expected */ if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { - pr_err("alg: rsa: encrypt test failed. Invalid output\n"); + pr_err("alg: akcipher: encrypt test failed. Invalid output\n"); + hexdump(outbuf_enc, vecs->c_size); err = -EINVAL; goto free_all; } @@ -2001,18 +2002,22 @@ static int do_test_rsa(struct crypto_akcipher *tfm, /* Run RSA decrypt - m = c^d mod n;*/ err = wait_async_op(&result, crypto_akcipher_decrypt(req)); if (err) { - pr_err("alg: rsa: decrypt test failed. err %d\n", err); + pr_err("alg: akcipher: decrypt test failed. err %d\n", err); goto free_all; } out_len = req->dst_len; - if (out_len != vecs->m_size) { - pr_err("alg: rsa: decrypt test failed. Invalid output len\n"); + if (out_len < vecs->m_size) { + pr_err("alg: akcipher: decrypt test failed. " + "Invalid output len %u\n", out_len); err = -EINVAL; goto free_all; } /* verify that decrypted message is equal to the original msg */ - if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) { - pr_err("alg: rsa: decrypt test failed. Invalid output\n"); + if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) || + memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size, + vecs->m_size)) { + pr_err("alg: akcipher: decrypt test failed. Invalid output\n"); + hexdump(outbuf_dec, out_len); err = -EINVAL; } free_all: @@ -2025,28 +2030,20 @@ free_xbuf: return err; } -static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs, - unsigned int tcount) +static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, + struct akcipher_testvec *vecs, unsigned int tcount) { int ret, i; for (i = 0; i < tcount; i++) { - ret = do_test_rsa(tfm, vecs++); - if (ret) { - pr_err("alg: rsa: test failed on vector %d, err=%d\n", - i + 1, ret); - return ret; - } - } - return 0; -} - -static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, - struct akcipher_testvec *vecs, unsigned int tcount) -{ - if (strncmp(alg, "rsa", 3) == 0) - return test_rsa(tfm, vecs, tcount); + ret = test_akcipher_one(tfm, vecs++); + if (!ret) + continue; + pr_err("alg: akcipher: test failed on vector %d, err=%d\n", + i + 1, ret); + return ret; + } return 0; } -- cgit v1.2.3-70-g09d2 From 9b45b7bba3d22de52e09df63c50f390a193a3f53 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:21 +0800 Subject: crypto: rsa - Generate fixed-length output Every implementation of RSA that we have naturally generates output with leading zeroes. The one and only user of RSA, pkcs1pad wants to have those leading zeroes in place, in fact because they are currently absent it has to write those zeroes itself. So we shouldn't be stripping leading zeroes in the first place. In fact this patch makes rsa-generic produce output with fixed length so that pkcs1pad does not need to do any extra work. This patch also changes DH to use the new interface. Signed-off-by: Herbert Xu --- crypto/dh.c | 2 +- crypto/rsa.c | 8 ++++---- include/linux/mpi.h | 2 +- lib/mpi/mpicoder.c | 55 +++++++++++++++++++++++++---------------------------- 4 files changed, 32 insertions(+), 35 deletions(-) (limited to 'crypto') diff --git a/crypto/dh.c b/crypto/dh.c index 5e960fe28681..9d19360e7189 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -129,7 +129,7 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; - ret = mpi_write_to_sgl(val, req->dst, &req->dst_len, &sign); + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; diff --git a/crypto/rsa.c b/crypto/rsa.c index dc692d43b666..4c280b6a3ea9 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -108,7 +108,7 @@ static int rsa_enc(struct akcipher_request *req) if (ret) goto err_free_m; - ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign); + ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign); if (ret) goto err_free_m; @@ -147,7 +147,7 @@ static int rsa_dec(struct akcipher_request *req) if (ret) goto err_free_c; - ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); + ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign); if (ret) goto err_free_c; @@ -185,7 +185,7 @@ static int rsa_sign(struct akcipher_request *req) if (ret) goto err_free_m; - ret = mpi_write_to_sgl(s, req->dst, &req->dst_len, &sign); + ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign); if (ret) goto err_free_m; @@ -226,7 +226,7 @@ static int rsa_verify(struct akcipher_request *req) if (ret) goto err_free_s; - ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); + ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign); if (ret) goto err_free_s; diff --git a/include/linux/mpi.h b/include/linux/mpi.h index f219559e5e80..1cc5ffb769af 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -80,7 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); -int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); #define log_mpidump g10_log_mpidump diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 823cf5f5196b..7150e5c23604 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -237,16 +237,13 @@ EXPORT_SYMBOL_GPL(mpi_get_buffer); * @a: a multi precision integer * @sgl: scatterlist to write to. Needs to be at least * mpi_get_size(a) long. - * @nbytes: in/out param - it has the be set to the maximum number of - * bytes that can be written to sgl. This has to be at least - * the size of the integer a. On return it receives the actual - * length of the data written on success or the data that would - * be written if buffer was too small. + * @nbytes: the number of bytes to write. Leading bytes will be + * filled with zero. * @sign: if not NULL, it will be set to the sign of a. * * Return: 0 on success or error code in case of error */ -int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, +int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes, int *sign) { u8 *p, *p2; @@ -258,43 +255,44 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, #error please implement for this limb size. #endif unsigned int n = mpi_get_size(a); - int i, x, y = 0, lzeros, buf_len; - - if (!nbytes) - return -EINVAL; + int i, x, buf_len; if (sign) *sign = a->sign; - lzeros = count_lzeros(a); - - if (*nbytes < n - lzeros) { - *nbytes = n - lzeros; + if (nbytes < n) return -EOVERFLOW; - } - *nbytes = n - lzeros; buf_len = sgl->length; p2 = sg_virt(sgl); - for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB, - lzeros %= BYTES_PER_MPI_LIMB; - i >= 0; i--) { + while (nbytes > n) { + if (!buf_len) { + sgl = sg_next(sgl); + if (!sgl) + return -EINVAL; + buf_len = sgl->length; + p2 = sg_virt(sgl); + } + + i = min_t(unsigned, nbytes - n, buf_len); + memset(p2, 0, i); + p2 += i; + buf_len -= i; + nbytes -= i; + } + + for (i = a->nlimbs - 1; i >= 0; i--) { #if BYTES_PER_MPI_LIMB == 4 - alimb = cpu_to_be32(a->d[i]); + alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0; #elif BYTES_PER_MPI_LIMB == 8 - alimb = cpu_to_be64(a->d[i]); + alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0; #else #error please implement for this limb size. #endif - if (lzeros) { - y = lzeros; - lzeros = 0; - } - - p = (u8 *)&alimb + y; + p = (u8 *)&alimb; - for (x = 0; x < sizeof(alimb) - y; x++) { + for (x = 0; x < sizeof(alimb); x++) { if (!buf_len) { sgl = sg_next(sgl); if (!sgl) @@ -305,7 +303,6 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, *p2++ = *p++; buf_len--; } - y = 0; } return 0; } -- cgit v1.2.3-70-g09d2 From c0d20d22e0ad20718702ae98cf8b5c200271d6df Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:23 +0800 Subject: crypto: rsa-pkcs1pad - Require hash to be present The only user of rsa-pkcs1pad always uses the hash so there is no reason to support the case of not having a hash. This patch also changes the digest info lookup so that it is only done once during template instantiation rather than on each operation. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 83 +++++++++++++++++++-------------------------------- 1 file changed, 30 insertions(+), 53 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index ead8dc0d084e..5c1c78e21f84 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -92,13 +92,12 @@ static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) struct pkcs1pad_ctx { struct crypto_akcipher *child; - const char *hash_name; unsigned int key_size; }; struct pkcs1pad_inst_ctx { struct crypto_akcipher_spawn spawn; - const char *hash_name; + const struct rsa_asn1_template *digest_info; }; struct pkcs1pad_request { @@ -416,20 +415,16 @@ static int pkcs1pad_sign(struct akcipher_request *req) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - const struct rsa_asn1_template *digest_info = NULL; + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); + const struct rsa_asn1_template *digest_info = ictx->digest_info; int err; unsigned int ps_end, digest_size = 0; if (!ctx->key_size) return -EINVAL; - if (ctx->hash_name) { - digest_info = rsa_lookup_asn1(ctx->hash_name); - if (!digest_info) - return -EINVAL; - - digest_size = digest_info->size; - } + digest_size = digest_info->size; if (req->src_len + digest_size > ctx->key_size - 11) return -EOVERFLOW; @@ -462,10 +457,8 @@ static int pkcs1pad_sign(struct akcipher_request *req) memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); req_ctx->in_buf[ps_end] = 0x00; - if (digest_info) { - memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, - digest_info->size); - } + memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, + digest_info->size); pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); @@ -499,7 +492,9 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - const struct rsa_asn1_template *digest_info; + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); + const struct rsa_asn1_template *digest_info = ictx->digest_info; unsigned int pos; if (err == -EOVERFLOW) @@ -527,17 +522,11 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) goto done; pos++; - if (ctx->hash_name) { - digest_info = rsa_lookup_asn1(ctx->hash_name); - if (!digest_info) - goto done; + if (memcmp(req_ctx->out_buf + pos, digest_info->data, + digest_info->size)) + goto done; - if (memcmp(req_ctx->out_buf + pos, digest_info->data, - digest_info->size)) - goto done; - - pos += digest_info->size; - } + pos += digest_info->size; err = 0; @@ -626,12 +615,11 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct crypto_akcipher *child_tfm; - child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); + child_tfm = crypto_spawn_akcipher(&ictx->spawn); if (IS_ERR(child_tfm)) return PTR_ERR(child_tfm); ctx->child = child_tfm; - ctx->hash_name = ictx->hash_name; return 0; } @@ -648,12 +636,12 @@ static void pkcs1pad_free(struct akcipher_instance *inst) struct crypto_akcipher_spawn *spawn = &ctx->spawn; crypto_drop_akcipher(spawn); - kfree(ctx->hash_name); kfree(inst); } static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { + const struct rsa_asn1_template *digest_info; struct crypto_attr_type *algt; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -676,7 +664,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) hash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(hash_name)) - hash_name = NULL; + return PTR_ERR(hash_name); + + digest_info = rsa_lookup_asn1(hash_name); + if (!digest_info) + return -EINVAL; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -684,7 +676,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) ctx = akcipher_instance_ctx(inst); spawn = &ctx->spawn; - ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL; + ctx->digest_info = digest_info; crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, @@ -696,27 +688,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) err = -ENAMETOOLONG; - if (!hash_name) { - if (snprintf(inst->alg.base.cra_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_name) >= - CRYPTO_MAX_ALG_NAME || - snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", - rsa_alg->base.cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s,%s)", + rsa_alg->base.cra_driver_name, hash_name) >= + CRYPTO_MAX_ALG_NAME) goto out_drop_alg; - } else { - if (snprintf(inst->alg.base.cra_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", - rsa_alg->base.cra_name, hash_name) >= - CRYPTO_MAX_ALG_NAME || - snprintf(inst->alg.base.cra_driver_name, - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", - rsa_alg->base.cra_driver_name, hash_name) >= - CRYPTO_MAX_ALG_NAME) - goto out_free_hash; - } inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; @@ -738,12 +717,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) err = akcipher_register_instance(tmpl, inst); if (err) - goto out_free_hash; + goto out_drop_alg; return 0; -out_free_hash: - kfree(ctx->hash_name); out_drop_alg: crypto_drop_akcipher(spawn); out_free_inst: -- cgit v1.2.3-70-g09d2 From 0f2c83190bc6756021e35a91c1f282ecae3f0e87 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:24 +0800 Subject: crypto: rsa-pkcs1pad - Remove bogus page splitting The helper pkcs1pad_sg_set_buf tries to split a buffer that crosses a page boundary into two SG entries. This is unnecessary. This patch removes that. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 5c1c78e21f84..d9baefb7d5d1 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -103,7 +103,7 @@ struct pkcs1pad_inst_ctx { struct pkcs1pad_request { struct akcipher_request child_req; - struct scatterlist in_sg[3], out_sg[2]; + struct scatterlist in_sg[2], out_sg[1]; uint8_t *in_buf, *out_buf; }; @@ -163,19 +163,10 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, struct scatterlist *next) { - int nsegs = next ? 1 : 0; - - if (offset_in_page(buf) + len <= PAGE_SIZE) { - nsegs += 1; - sg_init_table(sg, nsegs); - sg_set_buf(sg, buf, len); - } else { - nsegs += 2; - sg_init_table(sg, nsegs); - sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); - sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), - offset_in_page(buf) + len - PAGE_SIZE); - } + int nsegs = next ? 2 : 1; + + sg_init_table(sg, nsegs); + sg_set_buf(sg, buf, len); if (next) sg_chain(sg, nsegs, next); -- cgit v1.2.3-70-g09d2 From 3a32ce507a942962f81cae8abec9ef0d0647d127 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:26 +0800 Subject: crypto: rsa-pkcs1pad - Always use GFP_KERNEL We don't currently support using akcipher in atomic contexts, so GFP_KERNEL should always be used. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d9baefb7d5d1..db19284f2e2b 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -260,8 +260,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) req_ctx->child_req.dst_len = ctx->key_size; req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + GFP_KERNEL); if (!req_ctx->in_buf) return -ENOMEM; @@ -274,9 +273,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); - req_ctx->out_buf = kmalloc(ctx->key_size, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) { kfree(req_ctx->in_buf); return -ENOMEM; @@ -379,9 +376,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) req_ctx->child_req.dst = req_ctx->out_sg; req_ctx->child_req.dst_len = ctx->key_size ; - req_ctx->out_buf = kmalloc(ctx->key_size, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) return -ENOMEM; @@ -438,8 +433,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) req_ctx->child_req.dst_len = ctx->key_size; req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + GFP_KERNEL); if (!req_ctx->in_buf) return -ENOMEM; @@ -454,9 +448,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); - req_ctx->out_buf = kmalloc(ctx->key_size, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) { kfree(req_ctx->in_buf); return -ENOMEM; @@ -577,9 +569,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) req_ctx->child_req.dst = req_ctx->out_sg; req_ctx->child_req.dst_len = ctx->key_size; - req_ctx->out_buf = kmalloc(ctx->key_size, - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); + req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 73f79189603b3cd8af1ba1ad2d71f1b3f0aa796d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:27 +0800 Subject: crypto: rsa-pkcs1pad - Move key size check to setkey Rather than repeatedly checking the key size on each operation, we should be checking it once when the key is set. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 56 ++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 30 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index db19284f2e2b..ebd851474a9d 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -111,40 +111,48 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err, size; + int err; + + ctx->key_size = 0; err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); + if (err) + return err; - if (!err) { - /* Find out new modulus size from rsa implementation */ - size = crypto_akcipher_maxsize(ctx->child); + /* Find out new modulus size from rsa implementation */ + err = crypto_akcipher_maxsize(ctx->child); + if (err < 0) + return err; - ctx->key_size = size > 0 ? size : 0; - if (size <= 0) - err = size; - } + if (err > PAGE_SIZE) + return -ENOTSUPP; - return err; + ctx->key_size = err; + return 0; } static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); - int err, size; + int err; + + ctx->key_size = 0; err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); + if (err) + return err; - if (!err) { - /* Find out new modulus size from rsa implementation */ - size = crypto_akcipher_maxsize(ctx->child); + /* Find out new modulus size from rsa implementation */ + err = crypto_akcipher_maxsize(ctx->child); + if (err < 0) + return err; - ctx->key_size = size > 0 ? size : 0; - if (size <= 0) - err = size; - } + if (err > PAGE_SIZE) + return -ENOTSUPP; - return err; + ctx->key_size = err; + return 0; } static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) @@ -247,9 +255,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) return -EOVERFLOW; } - if (ctx->key_size > PAGE_SIZE) - return -ENOTSUPP; - /* * Replace both input and output to add the padding in the input and * the potential missing leading zeros in the output. @@ -367,9 +372,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) if (!ctx->key_size || req->src_len != ctx->key_size) return -EINVAL; - if (ctx->key_size > PAGE_SIZE) - return -ENOTSUPP; - /* Reuse input buffer, output to a new buffer */ req_ctx->child_req.src = req->src; req_ctx->child_req.src_len = req->src_len; @@ -420,9 +422,6 @@ static int pkcs1pad_sign(struct akcipher_request *req) return -EOVERFLOW; } - if (ctx->key_size > PAGE_SIZE) - return -ENOTSUPP; - /* * Replace both input and output to add the padding in the input and * the potential missing leading zeros in the output. @@ -560,9 +559,6 @@ static int pkcs1pad_verify(struct akcipher_request *req) if (!ctx->key_size || req->src_len < ctx->key_size) return -EINVAL; - if (ctx->key_size > PAGE_SIZE) - return -ENOTSUPP; - /* Reuse input buffer, output to a new buffer */ req_ctx->child_req.src = req->src; req_ctx->child_req.src_len = req->src_len; -- cgit v1.2.3-70-g09d2 From d858b0713849be51406fe84722d0877fb57d201c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jun 2016 19:32:28 +0800 Subject: crypto: rsa-pkcs1pad - Avoid copying output when possible In the vast majority of cases (2^-32 on 32-bit and 2^-64 on 64-bit) cases, the result from encryption/signing will require no padding. This patch makes these two operations write their output directly to the final destination. Only in the exceedingly rare cases where fixup is needed to we copy it out and back to add the leading zeroes. This patch also makes use of the crypto_akcipher_set_crypt API instead of writing the akcipher request directly. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 112 ++++++++++++++++++++------------------------------ 1 file changed, 45 insertions(+), 67 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index ebd851474a9d..8ccfdd7c926e 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -185,37 +185,36 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; - size_t chunk_len, pad_left; - struct sg_mapping_iter miter; - - if (!err) { - if (pad_len) { - sg_miter_start(&miter, req->dst, - sg_nents_for_len(req->dst, pad_len), - SG_MITER_ATOMIC | SG_MITER_TO_SG); - - pad_left = pad_len; - while (pad_left) { - sg_miter_next(&miter); - - chunk_len = min(miter.length, pad_left); - memset(miter.addr, 0, chunk_len); - pad_left -= chunk_len; - } - - sg_miter_stop(&miter); - } - - sg_pcopy_from_buffer(req->dst, - sg_nents_for_len(req->dst, ctx->key_size), - req_ctx->out_buf, req_ctx->child_req.dst_len, - pad_len); - } + unsigned int pad_len; + unsigned int len; + u8 *out_buf; + + if (err) + goto out; + + len = req_ctx->child_req.dst_len; + pad_len = ctx->key_size - len; + + /* Four billion to one */ + if (likely(!pad_len)) + goto out; + + out_buf = kzalloc(ctx->key_size, GFP_ATOMIC); + err = -ENOMEM; + if (!out_buf) + goto out; + + sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len), + out_buf + pad_len, len); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, ctx->key_size), + out_buf, ctx->key_size); + kzfree(out_buf); + +out: req->dst_len = ctx->key_size; kfree(req_ctx->in_buf); - kzfree(req_ctx->out_buf); return err; } @@ -255,15 +254,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) return -EOVERFLOW; } - /* - * Replace both input and output to add the padding in the input and - * the potential missing leading zeros in the output. - */ - req_ctx->child_req.src = req_ctx->in_sg; - req_ctx->child_req.src_len = ctx->key_size - 1; - req_ctx->child_req.dst = req_ctx->out_sg; - req_ctx->child_req.dst_len = ctx->key_size; - req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, GFP_KERNEL); if (!req_ctx->in_buf) @@ -291,6 +281,10 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_encrypt_sign_complete_cb, req); + /* Reuse output buffer */ + akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, + req->dst, ctx->key_size - 1, req->dst_len); + err = crypto_akcipher_encrypt(&req_ctx->child_req); if (err != -EINPROGRESS && (err != -EBUSY || @@ -372,12 +366,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) if (!ctx->key_size || req->src_len != ctx->key_size) return -EINVAL; - /* Reuse input buffer, output to a new buffer */ - req_ctx->child_req.src = req->src; - req_ctx->child_req.src_len = req->src_len; - req_ctx->child_req.dst = req_ctx->out_sg; - req_ctx->child_req.dst_len = ctx->key_size ; - req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) return -ENOMEM; @@ -389,6 +377,11 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_decrypt_complete_cb, req); + /* Reuse input buffer, output to a new buffer */ + akcipher_request_set_crypt(&req_ctx->child_req, req->src, + req_ctx->out_sg, req->src_len, + ctx->key_size); + err = crypto_akcipher_decrypt(&req_ctx->child_req); if (err != -EINPROGRESS && (err != -EBUSY || @@ -422,15 +415,6 @@ static int pkcs1pad_sign(struct akcipher_request *req) return -EOVERFLOW; } - /* - * Replace both input and output to add the padding in the input and - * the potential missing leading zeros in the output. - */ - req_ctx->child_req.src = req_ctx->in_sg; - req_ctx->child_req.src_len = ctx->key_size - 1; - req_ctx->child_req.dst = req_ctx->out_sg; - req_ctx->child_req.dst_len = ctx->key_size; - req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, GFP_KERNEL); if (!req_ctx->in_buf) @@ -447,19 +431,14 @@ static int pkcs1pad_sign(struct akcipher_request *req) pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); - req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); - if (!req_ctx->out_buf) { - kfree(req_ctx->in_buf); - return -ENOMEM; - } - - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, - ctx->key_size, NULL); - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_encrypt_sign_complete_cb, req); + /* Reuse output buffer */ + akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, + req->dst, ctx->key_size - 1, req->dst_len); + err = crypto_akcipher_sign(&req_ctx->child_req); if (err != -EINPROGRESS && (err != -EBUSY || @@ -559,12 +538,6 @@ static int pkcs1pad_verify(struct akcipher_request *req) if (!ctx->key_size || req->src_len < ctx->key_size) return -EINVAL; - /* Reuse input buffer, output to a new buffer */ - req_ctx->child_req.src = req->src; - req_ctx->child_req.src_len = req->src_len; - req_ctx->child_req.dst = req_ctx->out_sg; - req_ctx->child_req.dst_len = ctx->key_size; - req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); if (!req_ctx->out_buf) return -ENOMEM; @@ -576,6 +549,11 @@ static int pkcs1pad_verify(struct akcipher_request *req) akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_verify_complete_cb, req); + /* Reuse input buffer, output to a new buffer */ + akcipher_request_set_crypt(&req_ctx->child_req, req->src, + req_ctx->out_sg, req->src_len, + ctx->key_size); + err = crypto_akcipher_verify(&req_ctx->child_req); if (err != -EINPROGRESS && (err != -EBUSY || -- cgit v1.2.3-70-g09d2 From d13cd11fbccb5239c8fb4c1e70e0ca2e811ba2c4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 30 Jun 2016 11:00:13 +0800 Subject: crypto: tcrypt - Do not bail on EINPROGRESS in multibuffer hash test The multibuffer hash speed test is incorrectly bailing because of an EINPROGRESS return value. This patch fixes it by setting ret to zero if it is equal to -EINPROGRESS. Reported-by: Megha Dey Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 11aedae02382..202cfa10076c 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -486,8 +486,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, for (k = 0; k < 8; k++) { ret = crypto_ahash_digest(data[k].req); - if (ret == -EINPROGRESS) + if (ret == -EINPROGRESS) { + ret = 0; continue; + } if (ret) break; -- cgit v1.2.3-70-g09d2 From 98eca72fa04a9bbf28dba95efaec5aa95588fe23 Mon Sep 17 00:00:00 2001 From: raveendra padasalagi Date: Fri, 1 Jul 2016 11:16:54 +0530 Subject: crypto: sha3 - Add HMAC-SHA3 test modes and test vectors This patch adds HMAC-SHA3 test modes in tcrypt module and related test vectors. Signed-off-by: Raveendra Padasalagi Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 16 +++ crypto/testmgr.c | 40 ++++++ crypto/testmgr.h | 388 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 444 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 202cfa10076c..ae22f05d5936 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1313,6 +1313,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) ret += tcrypt_test("hmac(crc32)"); break; + case 111: + ret += tcrypt_test("hmac(sha3-224)"); + break; + + case 112: + ret += tcrypt_test("hmac(sha3-256)"); + break; + + case 113: + ret += tcrypt_test("hmac(sha3-384)"); + break; + + case 114: + ret += tcrypt_test("hmac(sha3-512)"); + break; + case 150: ret += tcrypt_test("ansi_cprng"); break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 38e23be315a0..8ea0d3fcb580 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3399,6 +3399,46 @@ static const struct alg_test_desc alg_test_descs[] = { .count = HMAC_SHA256_TEST_VECTORS } } + }, { + .alg = "hmac(sha3-224)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = hmac_sha3_224_tv_template, + .count = HMAC_SHA3_224_TEST_VECTORS + } + } + }, { + .alg = "hmac(sha3-256)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = hmac_sha3_256_tv_template, + .count = HMAC_SHA3_256_TEST_VECTORS + } + } + }, { + .alg = "hmac(sha3-384)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = hmac_sha3_384_tv_template, + .count = HMAC_SHA3_384_TEST_VECTORS + } + } + }, { + .alg = "hmac(sha3-512)", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = { + .vecs = hmac_sha3_512_tv_template, + .count = HMAC_SHA3_512_TEST_VECTORS + } + } }, { .alg = "hmac(sha384)", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 7358931b3082..4ce2d866919d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3694,6 +3694,394 @@ static struct hash_testvec hmac_sha512_tv_template[] = { }, }; +#define HMAC_SHA3_224_TEST_VECTORS 4 + +static struct hash_testvec hmac_sha3_224_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b", + .ksize = 20, + .plaintext = "Hi There", + .psize = 8, + .digest = "\x3b\x16\x54\x6b\xbc\x7b\xe2\x70" + "\x6a\x03\x1d\xca\xfd\x56\x37\x3d" + "\x98\x84\x36\x76\x41\xd8\xc5\x9a" + "\xf3\xc8\x60\xf7", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\x7f\xdb\x8d\xd8\x8b\xd2\xf6\x0d" + "\x1b\x79\x86\x34\xad\x38\x68\x11" + "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b" + "\xba\xce\x5e\x66", + .np = 4, + .tap = { 7, 7, 7, 7 } + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = "Test Using Large" + "r Than Block-Siz" + "e Key - Hash Key" + " First", + .psize = 54, + .digest = "\xb4\xa1\xf0\x4c\x00\x28\x7a\x9b" + "\x7f\x60\x75\xb3\x13\xd2\x79\xb8" + "\x33\xbc\x8f\x75\x12\x43\x52\xd0" + "\x5f\xb9\x99\x5f", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = + "This is a test u" + "sing a larger th" + "an block-size ke" + "y and a larger t" + "han block-size d" + "ata. The key nee" + "ds to be hashed " + "before being use" + "d by the HMAC al" + "gorithm.", + .psize = 152, + .digest = "\x05\xd8\xcd\x6d\x00\xfa\xea\x8d" + "\x1e\xb6\x8a\xde\x28\x73\x0b\xbd" + "\x3c\xba\xb6\x92\x9f\x0a\x08\x6b" + "\x29\xcd\x62\xa0", + }, +}; + +#define HMAC_SHA3_256_TEST_VECTORS 4 + +static struct hash_testvec hmac_sha3_256_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b", + .ksize = 20, + .plaintext = "Hi There", + .psize = 8, + .digest = "\xba\x85\x19\x23\x10\xdf\xfa\x96" + "\xe2\xa3\xa4\x0e\x69\x77\x43\x51" + "\x14\x0b\xb7\x18\x5e\x12\x02\xcd" + "\xcc\x91\x75\x89\xf9\x5e\x16\xbb", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\xc7\xd4\x07\x2e\x78\x88\x77\xae" + "\x35\x96\xbb\xb0\xda\x73\xb8\x87" + "\xc9\x17\x1f\x93\x09\x5b\x29\x4a" + "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5", + .np = 4, + .tap = { 7, 7, 7, 7 } + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = "Test Using Large" + "r Than Block-Siz" + "e Key - Hash Key" + " First", + .psize = 54, + .digest = "\xed\x73\xa3\x74\xb9\x6c\x00\x52" + "\x35\xf9\x48\x03\x2f\x09\x67\x4a" + "\x58\xc0\xce\x55\x5c\xfc\x1f\x22" + "\x3b\x02\x35\x65\x60\x31\x2c\x3b", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = + "This is a test u" + "sing a larger th" + "an block-size ke" + "y and a larger t" + "han block-size d" + "ata. The key nee" + "ds to be hashed " + "before being use" + "d by the HMAC al" + "gorithm.", + .psize = 152, + .digest = "\x65\xc5\xb0\x6d\x4c\x3d\xe3\x2a" + "\x7a\xef\x87\x63\x26\x1e\x49\xad" + "\xb6\xe2\x29\x3e\xc8\xe7\xc6\x1e" + "\x8d\xe6\x17\x01\xfc\x63\xe1\x23", + }, +}; + +#define HMAC_SHA3_384_TEST_VECTORS 4 + +static struct hash_testvec hmac_sha3_384_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b", + .ksize = 20, + .plaintext = "Hi There", + .psize = 8, + .digest = "\x68\xd2\xdc\xf7\xfd\x4d\xdd\x0a" + "\x22\x40\xc8\xa4\x37\x30\x5f\x61" + "\xfb\x73\x34\xcf\xb5\xd0\x22\x6e" + "\x1b\xc2\x7d\xc1\x0a\x2e\x72\x3a" + "\x20\xd3\x70\xb4\x77\x43\x13\x0e" + "\x26\xac\x7e\x3d\x53\x28\x86\xbd", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\xf1\x10\x1f\x8c\xbf\x97\x66\xfd" + "\x67\x64\xd2\xed\x61\x90\x3f\x21" + "\xca\x9b\x18\xf5\x7c\xf3\xe1\xa2" + "\x3c\xa1\x35\x08\xa9\x32\x43\xce" + "\x48\xc0\x45\xdc\x00\x7f\x26\xa2" + "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a", + .np = 4, + .tap = { 7, 7, 7, 7 } + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = "Test Using Large" + "r Than Block-Siz" + "e Key - Hash Key" + " First", + .psize = 54, + .digest = "\x0f\xc1\x95\x13\xbf\x6b\xd8\x78" + "\x03\x70\x16\x70\x6a\x0e\x57\xbc" + "\x52\x81\x39\x83\x6b\x9a\x42\xc3" + "\xd4\x19\xe4\x98\xe0\xe1\xfb\x96" + "\x16\xfd\x66\x91\x38\xd3\x3a\x11" + "\x05\xe0\x7c\x72\xb6\x95\x3b\xcc", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = + "This is a test u" + "sing a larger th" + "an block-size ke" + "y and a larger t" + "han block-size d" + "ata. The key nee" + "ds to be hashed " + "before being use" + "d by the HMAC al" + "gorithm.", + .psize = 152, + .digest = "\x02\x6f\xdf\x6b\x50\x74\x1e\x37" + "\x38\x99\xc9\xf7\xd5\x40\x6d\x4e" + "\xb0\x9f\xc6\x66\x56\x36\xfc\x1a" + "\x53\x00\x29\xdd\xf5\xcf\x3c\xa5" + "\xa9\x00\xed\xce\x01\xf5\xf6\x1e" + "\x2f\x40\x8c\xdf\x2f\xd3\xe7\xe8", + }, +}; + +#define HMAC_SHA3_512_TEST_VECTORS 4 + +static struct hash_testvec hmac_sha3_512_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" + "\x0b\x0b\x0b\x0b", + .ksize = 20, + .plaintext = "Hi There", + .psize = 8, + .digest = "\xeb\x3f\xbd\x4b\x2e\xaa\xb8\xf5" + "\xc5\x04\xbd\x3a\x41\x46\x5a\xac" + "\xec\x15\x77\x0a\x7c\xab\xac\x53" + "\x1e\x48\x2f\x86\x0b\x5e\xc7\xba" + "\x47\xcc\xb2\xc6\xf2\xaf\xce\x8f" + "\x88\xd2\x2b\x6d\xc6\x13\x80\xf2" + "\x3a\x66\x8f\xd3\x88\x8b\xb8\x05" + "\x37\xc0\xa0\xb8\x64\x07\x68\x9e", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\x5a\x4b\xfe\xab\x61\x66\x42\x7c" + "\x7a\x36\x47\xb7\x47\x29\x2b\x83" + "\x84\x53\x7c\xdb\x89\xaf\xb3\xbf" + "\x56\x65\xe4\xc5\xe7\x09\x35\x0b" + "\x28\x7b\xae\xc9\x21\xfd\x7c\xa0" + "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e" + "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83" + "\x96\x02\x75\xbe\xb4\xe6\x20\x24", + .np = 4, + .tap = { 7, 7, 7, 7 } + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = "Test Using Large" + "r Than Block-Siz" + "e Key - Hash Key" + " First", + .psize = 54, + .digest = "\x00\xf7\x51\xa9\xe5\x06\x95\xb0" + "\x90\xed\x69\x11\xa4\xb6\x55\x24" + "\x95\x1c\xdc\x15\xa7\x3a\x5d\x58" + "\xbb\x55\x21\x5e\xa2\xcd\x83\x9a" + "\xc7\x9d\x2b\x44\xa3\x9b\xaf\xab" + "\x27\xe8\x3f\xde\x9e\x11\xf6\x34" + "\x0b\x11\xd9\x91\xb1\xb9\x1b\xf2" + "\xee\xe7\xfc\x87\x24\x26\xc3\xa4", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa", + .ksize = 131, + .plaintext = + "This is a test u" + "sing a larger th" + "an block-size ke" + "y and a larger t" + "han block-size d" + "ata. The key nee" + "ds to be hashed " + "before being use" + "d by the HMAC al" + "gorithm.", + .psize = 152, + .digest = "\x38\xa4\x56\xa0\x04\xbd\x10\xd3" + "\x2c\x9a\xb8\x33\x66\x84\x11\x28" + "\x62\xc3\xdb\x61\xad\xcc\xa3\x18" + "\x29\x35\x5e\xaf\x46\xfd\x5c\x73" + "\xd0\x6a\x1f\x0d\x13\xfe\xc9\xa6" + "\x52\xfb\x38\x11\xb5\x77\xb1\xb1" + "\xd1\xb9\x78\x9f\x97\xae\x5b\x83" + "\xc6\xf4\x4d\xfc\xf1\xd6\x7e\xba", + }, +}; + /* * Poly1305 test vectors from RFC7539 A.3. */ -- cgit v1.2.3-70-g09d2 From 27710b8ea3defcbd7d340dbd0423d911b4eb7c4f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 3 Jul 2016 10:46:11 +0800 Subject: crypto: rsa-pkcs1pad - Fix regression from leading zeros As the software RSA implementation now produces fixed-length output, we need to eliminate leading zeros in the calling code instead. This patch does just that for pkcs1pad signature verification. Fixes: 9b45b7bba3d2 ("crypto: rsa - Generate fixed-length output") Reported-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8ccfdd7c926e..880d3db57a25 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -456,49 +456,55 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) struct akcipher_instance *inst = akcipher_alg_instance(tfm); struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); const struct rsa_asn1_template *digest_info = ictx->digest_info; + unsigned int dst_len; unsigned int pos; - - if (err == -EOVERFLOW) - /* Decrypted value had no leading 0 byte */ - err = -EINVAL; + u8 *out_buf; if (err) goto done; - if (req_ctx->child_req.dst_len != ctx->key_size - 1) { - err = -EINVAL; + err = -EINVAL; + dst_len = req_ctx->child_req.dst_len; + if (dst_len < ctx->key_size - 1) goto done; + + out_buf = req_ctx->out_buf; + if (dst_len == ctx->key_size) { + if (out_buf[0] != 0x00) + /* Decrypted value had no leading 0 byte */ + goto done; + + dst_len--; + out_buf++; } err = -EBADMSG; - if (req_ctx->out_buf[0] != 0x01) + if (out_buf[0] != 0x01) goto done; - for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) - if (req_ctx->out_buf[pos] != 0xff) + for (pos = 1; pos < dst_len; pos++) + if (out_buf[pos] != 0xff) break; - if (pos < 9 || pos == req_ctx->child_req.dst_len || - req_ctx->out_buf[pos] != 0x00) + if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) goto done; pos++; - if (memcmp(req_ctx->out_buf + pos, digest_info->data, - digest_info->size)) + if (memcmp(out_buf + pos, digest_info->data, digest_info->size)) goto done; pos += digest_info->size; err = 0; - if (req->dst_len < req_ctx->child_req.dst_len - pos) + if (req->dst_len < dst_len - pos) err = -EOVERFLOW; - req->dst_len = req_ctx->child_req.dst_len - pos; + req->dst_len = dst_len - pos; if (!err) sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, req->dst_len), - req_ctx->out_buf + pos, req->dst_len); + out_buf + pos, req->dst_len); done: kzfree(req_ctx->out_buf); -- cgit v1.2.3-70-g09d2 From 6dd7a82cc54ebd2936763befd3dcd4beb727a704 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 1 Jul 2016 08:19:45 +1000 Subject: crypto: powerpc - Add POWER8 optimised crc32c Use the vector polynomial multiply-sum instructions in POWER8 to speed up crc32c. This is just over 41x faster than the slice-by-8 method that it replaces. Measurements on a 4.1 GHz POWER8 show it sustaining 52 GiB/sec. A simple btrfs write performance test: dd if=/dev/zero of=/mnt/tmpfile bs=1M count=4096 sync is over 3.7x faster. Signed-off-by: Anton Blanchard Signed-off-by: Herbert Xu --- arch/powerpc/crypto/Makefile | 2 + arch/powerpc/crypto/crc32c-vpmsum_asm.S | 1553 ++++++++++++++++++++++++++++++ arch/powerpc/crypto/crc32c-vpmsum_glue.c | 167 ++++ arch/powerpc/include/asm/ppc-opcode.h | 12 + crypto/Kconfig | 11 + 5 files changed, 1745 insertions(+) create mode 100644 arch/powerpc/crypto/crc32c-vpmsum_asm.S create mode 100644 arch/powerpc/crypto/crc32c-vpmsum_glue.c (limited to 'crypto') diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 9c221b69c181..7998c177f0a2 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -9,9 +9,11 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o +obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o md5-ppc-y := md5-asm.o md5-glue.o sha1-powerpc-y := sha1-powerpc-asm.o sha1.o sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o +crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/crypto/crc32c-vpmsum_asm.S new file mode 100644 index 000000000000..dc640b212299 --- /dev/null +++ b/arch/powerpc/crypto/crc32c-vpmsum_asm.S @@ -0,0 +1,1553 @@ +/* + * Calculate the checksum of data that is 16 byte aligned and a multiple of + * 16 bytes. + * + * The first step is to reduce it to 1024 bits. We do this in 8 parallel + * chunks in order to mask the latency of the vpmsum instructions. If we + * have more than 32 kB of data to checksum we repeat this step multiple + * times, passing in the previous 1024 bits. + * + * The next step is to reduce the 1024 bits to 64 bits. This step adds + * 32 bits of 0s to the end - this matches what a CRC does. We just + * calculate constants that land the data in this 32 bits. + * + * We then use fixed point Barrett reduction to compute a mod n over GF(2) + * for n = CRC using POWER8 instructions. We use x = 32. + * + * http://en.wikipedia.org/wiki/Barrett_reduction + * + * Copyright (C) 2015 Anton Blanchard , IBM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include + + .section .rodata +.balign 16 + +.byteswap_constant: + /* byte reverse permute constant */ + .octa 0x0F0E0D0C0B0A09080706050403020100 + +#define MAX_SIZE 32768 +.constants: + + /* Reduce 262144 kbits to 1024 bits */ + /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */ + .octa 0x00000000b6ca9e20000000009c37c408 + + /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */ + .octa 0x00000000350249a800000001b51df26c + + /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */ + .octa 0x00000001862dac54000000000724b9d0 + + /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */ + .octa 0x00000001d87fb48c00000001c00532fe + + /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */ + .octa 0x00000001f39b699e00000000f05a9362 + + /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */ + .octa 0x0000000101da11b400000001e1007970 + + /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */ + .octa 0x00000001cab571e000000000a57366ee + + /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */ + .octa 0x00000000c7020cfe0000000192011284 + + /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */ + .octa 0x00000000cdaed1ae0000000162716d9a + + /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */ + .octa 0x00000001e804effc00000000cd97ecde + + /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */ + .octa 0x0000000077c3ea3a0000000058812bc0 + + /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */ + .octa 0x0000000068df31b40000000088b8c12e + + /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */ + .octa 0x00000000b059b6c200000001230b234c + + /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */ + .octa 0x0000000145fb8ed800000001120b416e + + /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */ + .octa 0x00000000cbc0916800000001974aecb0 + + /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */ + .octa 0x000000005ceeedc2000000008ee3f226 + + /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */ + .octa 0x0000000047d74e8600000001089aba9a + + /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */ + .octa 0x00000001407e9e220000000065113872 + + /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */ + .octa 0x00000001da967bda000000005c07ec10 + + /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */ + .octa 0x000000006c8983680000000187590924 + + /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */ + .octa 0x00000000f2d14c9800000000e35da7c6 + + /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */ + .octa 0x00000001993c6ad4000000000415855a + + /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */ + .octa 0x000000014683d1ac0000000073617758 + + /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */ + .octa 0x00000001a7c93e6c0000000176021d28 + + /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */ + .octa 0x000000010211e90a00000001c358fd0a + + /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */ + .octa 0x000000001119403e00000001ff7a2c18 + + /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */ + .octa 0x000000001c3261aa00000000f2d9f7e4 + + /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */ + .octa 0x000000014e37a634000000016cf1f9c8 + + /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */ + .octa 0x0000000073786c0c000000010af9279a + + /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */ + .octa 0x000000011dc037f80000000004f101e8 + + /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */ + .octa 0x0000000031433dfc0000000070bcf184 + + /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */ + .octa 0x000000009cde8348000000000a8de642 + + /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */ + .octa 0x0000000038d3c2a60000000062ea130c + + /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */ + .octa 0x000000011b25f26000000001eb31cbb2 + + /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */ + .octa 0x000000001629e6f00000000170783448 + + /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */ + .octa 0x0000000160838b4c00000001a684b4c6 + + /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */ + .octa 0x000000007a44011c00000000253ca5b4 + + /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */ + .octa 0x00000000226f417a0000000057b4b1e2 + + /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */ + .octa 0x0000000045eb2eb400000000b6bd084c + + /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */ + .octa 0x000000014459d70c0000000123c2d592 + + /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */ + .octa 0x00000001d406ed8200000000159dafce + + /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */ + .octa 0x0000000160c8e1a80000000127e1a64e + + /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */ + .octa 0x0000000027ba80980000000056860754 + + /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */ + .octa 0x000000006d92d01800000001e661aae8 + + /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */ + .octa 0x000000012ed7e3f200000000f82c6166 + + /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */ + .octa 0x000000002dc8778800000000c4f9c7ae + + /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */ + .octa 0x0000000018240bb80000000074203d20 + + /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */ + .octa 0x000000001ad381580000000198173052 + + /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */ + .octa 0x00000001396b78f200000001ce8aba54 + + /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */ + .octa 0x000000011a68133400000001850d5d94 + + /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */ + .octa 0x000000012104732e00000001d609239c + + /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */ + .octa 0x00000000a140d90c000000001595f048 + + /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */ + .octa 0x00000001b7215eda0000000042ccee08 + + /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */ + .octa 0x00000001aaf1df3c000000010a389d74 + + /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */ + .octa 0x0000000029d15b8a000000012a840da6 + + /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */ + .octa 0x00000000f1a96922000000001d181c0c + + /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */ + .octa 0x00000001ac80d03c0000000068b7d1f6 + + /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */ + .octa 0x000000000f11d56a000000005b0f14fc + + /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */ + .octa 0x00000001f1c022a20000000179e9e730 + + /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */ + .octa 0x0000000173d00ae200000001ce1368d6 + + /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */ + .octa 0x00000001d4ffe4ac0000000112c3a84c + + /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */ + .octa 0x000000016edc5ae400000000de940fee + + /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */ + .octa 0x00000001f1a0214000000000fe896b7e + + /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */ + .octa 0x00000000ca0b28a000000001f797431c + + /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */ + .octa 0x00000001928e30a20000000053e989ba + + /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */ + .octa 0x0000000097b1b002000000003920cd16 + + /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */ + .octa 0x00000000b15bf90600000001e6f579b8 + + /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */ + .octa 0x00000000411c5d52000000007493cb0a + + /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */ + .octa 0x00000001c36f330000000001bdd376d8 + + /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */ + .octa 0x00000001119227e0000000016badfee6 + + /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */ + .octa 0x00000000114d47020000000071de5c58 + + /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */ + .octa 0x00000000458b5b9800000000453f317c + + /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */ + .octa 0x000000012e31fb8e0000000121675cce + + /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */ + .octa 0x000000005cf619d800000001f409ee92 + + /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */ + .octa 0x0000000063f4d8b200000000f36b9c88 + + /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */ + .octa 0x000000004138dc8a0000000036b398f4 + + /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */ + .octa 0x00000001d29ee8e000000001748f9adc + + /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */ + .octa 0x000000006a08ace800000001be94ec00 + + /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */ + .octa 0x0000000127d4201000000000b74370d6 + + /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */ + .octa 0x0000000019d76b6200000001174d0b98 + + /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */ + .octa 0x00000001b1471f6e00000000befc06a4 + + /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */ + .octa 0x00000001f64c19cc00000001ae125288 + + /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */ + .octa 0x00000000003c0ea00000000095c19b34 + + /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */ + .octa 0x000000014d73abf600000001a78496f2 + + /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */ + .octa 0x00000001620eb84400000001ac5390a0 + + /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */ + .octa 0x0000000147655048000000002a80ed6e + + /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */ + .octa 0x0000000067b5077e00000001fa9b0128 + + /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */ + .octa 0x0000000010ffe20600000001ea94929e + + /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */ + .octa 0x000000000fee8f1e0000000125f4305c + + /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */ + .octa 0x00000001da26fbae00000001471e2002 + + /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */ + .octa 0x00000001b3a8bd880000000132d2253a + + /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */ + .octa 0x00000000e8f3898e00000000f26b3592 + + /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */ + .octa 0x00000000b0d0d28c00000000bc8b67b0 + + /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */ + .octa 0x0000000030f2a798000000013a826ef2 + + /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */ + .octa 0x000000000fba10020000000081482c84 + + /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */ + .octa 0x00000000bdb9bd7200000000e77307c2 + + /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */ + .octa 0x0000000075d3bf5a00000000d4a07ec8 + + /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */ + .octa 0x00000000ef1f98a00000000017102100 + + /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */ + .octa 0x00000000689c760200000000db406486 + + /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */ + .octa 0x000000016d5fa5fe0000000192db7f88 + + /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */ + .octa 0x00000001d0d2b9ca000000018bf67b1e + + /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */ + .octa 0x0000000041e7b470000000007c09163e + + /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */ + .octa 0x00000001cbb6495e000000000adac060 + + /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */ + .octa 0x000000010052a0b000000000bd8316ae + + /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */ + .octa 0x00000001d8effb5c000000019f09ab54 + + /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */ + .octa 0x00000001d969853c0000000125155542 + + /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */ + .octa 0x00000000523ccce2000000018fdb5882 + + /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */ + .octa 0x000000001e2436bc00000000e794b3f4 + + /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */ + .octa 0x00000000ddd1c3a2000000016f9bb022 + + /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */ + .octa 0x0000000019fcfe3800000000290c9978 + + /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */ + .octa 0x00000001ce95db640000000083c0f350 + + /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */ + .octa 0x00000000af5828060000000173ea6628 + + /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */ + .octa 0x00000001006388f600000001c8b4e00a + + /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */ + .octa 0x0000000179eca00a00000000de95d6aa + + /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */ + .octa 0x0000000122410a6a000000010b7f7248 + + /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */ + .octa 0x000000004288e87c00000001326e3a06 + + /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */ + .octa 0x000000016c5490da00000000bb62c2e6 + + /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */ + .octa 0x00000000d1c71f6e0000000156a4b2c2 + + /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */ + .octa 0x00000001b4ce08a6000000011dfe763a + + /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */ + .octa 0x00000001466ba60c000000007bcca8e2 + + /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */ + .octa 0x00000001f6c488a40000000186118faa + + /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */ + .octa 0x000000013bfb06820000000111a65a88 + + /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */ + .octa 0x00000000690e9e54000000003565e1c4 + + /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */ + .octa 0x00000000281346b6000000012ed02a82 + + /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */ + .octa 0x000000015646402400000000c486ecfc + + /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */ + .octa 0x000000016063a8dc0000000001b951b2 + + /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */ + .octa 0x0000000116a663620000000048143916 + + /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */ + .octa 0x000000017e8aa4d200000001dc2ae124 + + /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */ + .octa 0x00000001728eb10c00000001416c58d6 + + /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */ + .octa 0x00000001b08fd7fa00000000a479744a + + /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */ + .octa 0x00000001092a16e80000000096ca3a26 + + /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */ + .octa 0x00000000a505637c00000000ff223d4e + + /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */ + .octa 0x00000000d94869b2000000010e84da42 + + /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */ + .octa 0x00000001c8b203ae00000001b61ba3d0 + + /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */ + .octa 0x000000005704aea000000000680f2de8 + + /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */ + .octa 0x000000012e295fa2000000008772a9a8 + + /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */ + .octa 0x000000011d0908bc0000000155f295bc + + /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */ + .octa 0x0000000193ed97ea00000000595f9282 + + /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */ + .octa 0x000000013a0f1c520000000164b1c25a + + /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */ + .octa 0x000000010c2c40c000000000fbd67c50 + + /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */ + .octa 0x00000000ff6fac3e0000000096076268 + + /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */ + .octa 0x000000017b3609c000000001d288e4cc + + /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */ + .octa 0x0000000088c8c92200000001eaac1bdc + + /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */ + .octa 0x00000001751baae600000001f1ea39e2 + + /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */ + .octa 0x000000010795297200000001eb6506fc + + /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */ + .octa 0x0000000162b00abe000000010f806ffe + + /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */ + .octa 0x000000000d7b404c000000010408481e + + /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */ + .octa 0x00000000763b13d40000000188260534 + + /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */ + .octa 0x00000000f6dc22d80000000058fc73e0 + + /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */ + .octa 0x000000007daae06000000000391c59b8 + + /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */ + .octa 0x000000013359ab7c000000018b638400 + + /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */ + .octa 0x000000008add438a000000011738f5c4 + + /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */ + .octa 0x00000001edbefdea000000008cf7c6da + + /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */ + .octa 0x000000004104e0f800000001ef97fb16 + + /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */ + .octa 0x00000000b48a82220000000102130e20 + + /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */ + .octa 0x00000001bcb4684400000000db968898 + + /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */ + .octa 0x000000013293ce0a00000000b5047b5e + + /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */ + .octa 0x00000001710d0844000000010b90fdb2 + + /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */ + .octa 0x0000000117907f6e000000004834a32e + + /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */ + .octa 0x0000000087ddf93e0000000059c8f2b0 + + /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */ + .octa 0x000000005970e9b00000000122cec508 + + /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */ + .octa 0x0000000185b2b7d0000000000a330cda + + /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */ + .octa 0x00000001dcee0efc000000014a47148c + + /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */ + .octa 0x0000000030da27220000000042c61cb8 + + /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */ + .octa 0x000000012f925a180000000012fe6960 + + /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */ + .octa 0x00000000dd2e357c00000000dbda2c20 + + /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */ + .octa 0x00000000071c80de000000011122410c + + /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */ + .octa 0x000000011513140a00000000977b2070 + + /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */ + .octa 0x00000001df876e8e000000014050438e + + /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */ + .octa 0x000000015f81d6ce0000000147c840e8 + + /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */ + .octa 0x000000019dd94dbe00000001cc7c88ce + + /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */ + .octa 0x00000001373d206e00000001476b35a4 + + /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */ + .octa 0x00000000668ccade000000013d52d508 + + /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */ + .octa 0x00000001b192d268000000008e4be32e + + /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */ + .octa 0x00000000e30f3a7800000000024120fe + + /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */ + .octa 0x000000010ef1f7bc00000000ddecddb4 + + /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */ + .octa 0x00000001f5ac738000000000d4d403bc + + /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */ + .octa 0x000000011822ea7000000001734b89aa + + /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */ + .octa 0x00000000c3a33848000000010e7a58d6 + + /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */ + .octa 0x00000001bd151c2400000001f9f04e9c + + /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */ + .octa 0x0000000056002d7600000000b692225e + + /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */ + .octa 0x000000014657c4f4000000019b8d3f3e + + /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */ + .octa 0x0000000113742d7c00000001a874f11e + + /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */ + .octa 0x000000019c5920ba000000010d5a4254 + + /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */ + .octa 0x000000005216d2d600000000bbb2f5d6 + + /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */ + .octa 0x0000000136f5ad8a0000000179cc0e36 + + /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */ + .octa 0x000000018b07beb600000001dca1da4a + + /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */ + .octa 0x00000000db1e93b000000000feb1a192 + + /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */ + .octa 0x000000000b96fa3a00000000d1eeedd6 + + /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */ + .octa 0x00000001d9968af0000000008fad9bb4 + + /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */ + .octa 0x000000000e4a77a200000001884938e4 + + /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */ + .octa 0x00000000508c2ac800000001bc2e9bc0 + + /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */ + .octa 0x0000000021572a8000000001f9658a68 + + /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */ + .octa 0x00000001b859daf2000000001b9224fc + + /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */ + .octa 0x000000016f7884740000000055b2fb84 + + /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */ + .octa 0x00000001b438810e000000018b090348 + + /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */ + .octa 0x0000000095ddc6f2000000011ccbd5ea + + /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */ + .octa 0x00000001d977c20c0000000007ae47f8 + + /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */ + .octa 0x00000000ebedb99a0000000172acbec0 + + /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */ + .octa 0x00000001df9e9e9200000001c6e3ff20 + + /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */ + .octa 0x00000001a4a3f95200000000e1b38744 + + /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */ + .octa 0x00000000e2f5122000000000791585b2 + + /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */ + .octa 0x000000004aa01f3e00000000ac53b894 + + /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */ + .octa 0x00000000b3e90a5800000001ed5f2cf4 + + /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */ + .octa 0x000000000c9ca2aa00000001df48b2e0 + + /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */ + .octa 0x000000015168231600000000049c1c62 + + /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */ + .octa 0x0000000036fce78c000000017c460c12 + + /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */ + .octa 0x000000009037dc10000000015be4da7e + + /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */ + .octa 0x00000000d3298582000000010f38f668 + + /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */ + .octa 0x00000001b42e8ad60000000039f40a00 + + /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */ + .octa 0x00000000142a983800000000bd4c10c4 + + /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */ + .octa 0x0000000109c7f1900000000042db1d98 + + /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */ + .octa 0x0000000056ff931000000001c905bae6 + + /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */ + .octa 0x00000001594513aa00000000069d40ea + + /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */ + .octa 0x00000001e3b5b1e8000000008e4fbad0 + + /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */ + .octa 0x000000011dd5fc080000000047bedd46 + + /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */ + .octa 0x00000001675f0cc20000000026396bf8 + + /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */ + .octa 0x00000000d1c8dd4400000000379beb92 + + /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */ + .octa 0x0000000115ebd3d8000000000abae54a + + /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */ + .octa 0x00000001ecbd0dac0000000007e6a128 + + /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */ + .octa 0x00000000cdf67af2000000000ade29d2 + + /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */ + .octa 0x000000004c01ff4c00000000f974c45c + + /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */ + .octa 0x00000000f2d8657e00000000e77ac60a + + /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */ + .octa 0x000000006bae74c40000000145895816 + + /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */ + .octa 0x0000000152af8aa00000000038e362be + + /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */ + .octa 0x0000000004663802000000007f991a64 + + /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */ + .octa 0x00000001ab2f5afc00000000fa366d3a + + /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */ + .octa 0x0000000074a4ebd400000001a2bb34f0 + + /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */ + .octa 0x00000001d7ab3a4c0000000028a9981e + + /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */ + .octa 0x00000001a8da60c600000001dbc672be + + /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */ + .octa 0x000000013cf6382000000000b04d77f6 + + /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */ + .octa 0x00000000bec12e1e0000000124400d96 + + /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */ + .octa 0x00000001c6368010000000014ca4b414 + + /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */ + .octa 0x00000001e6e78758000000012fe2c938 + + /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */ + .octa 0x000000008d7f2b3c00000001faed01e6 + + /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */ + .octa 0x000000016b4a156e000000007e80ecfe + + /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */ + .octa 0x00000001c63cfeb60000000098daee94 + + /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */ + .octa 0x000000015f902670000000010a04edea + + /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */ + .octa 0x00000001cd5de11e00000001c00b4524 + + /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */ + .octa 0x000000001acaec540000000170296550 + + /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */ + .octa 0x000000002bd0ca780000000181afaa48 + + /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */ + .octa 0x0000000032d63d5c0000000185a31ffa + + /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */ + .octa 0x000000001c6d4e4c000000002469f608 + + /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */ + .octa 0x0000000106a60b92000000006980102a + + /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */ + .octa 0x00000000d3855e120000000111ea9ca8 + + /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */ + .octa 0x00000000e312563600000001bd1d29ce + + /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */ + .octa 0x000000009e8f7ea400000001b34b9580 + + /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */ + .octa 0x00000001c82e562c000000003076054e + + /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */ + .octa 0x00000000ca9f09ce000000012a608ea4 + + /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */ + .octa 0x00000000c63764e600000000784d05fe + + /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */ + .octa 0x0000000168d2e49e000000016ef0d82a + + /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */ + .octa 0x00000000e986c1480000000075bda454 + + /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */ + .octa 0x00000000cfb65894000000003dc0a1c4 + + /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */ + .octa 0x0000000111cadee400000000e9a5d8be + + /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */ + .octa 0x0000000171fb63ce00000001609bc4b4 + +.short_constants: + + /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */ + /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */ + .octa 0x7fec2963e5bf80485cf015c388e56f72 + + /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */ + .octa 0x38e888d4844752a9963a18920246e2e6 + + /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */ + .octa 0x42316c00730206ad419a441956993a31 + + /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */ + .octa 0x543d5c543e65ddf9924752ba2b830011 + + /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */ + .octa 0x78e87aaf56767c9255bd7f9518e4a304 + + /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */ + .octa 0x8f68fcec1903da7f6d76739fe0553f1e + + /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */ + .octa 0x3f4840246791d588c133722b1fe0b5c3 + + /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */ + .octa 0x34c96751b04de25a64b67ee0e55ef1f3 + + /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */ + .octa 0x156c8e180b4a395b069db049b8fdb1e7 + + /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */ + .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e + + /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */ + .octa 0x041d37768cd75659817cdc5119b29a35 + + /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */ + .octa 0x3a0777818cfaa9651ce9d94b36c41f1c + + /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */ + .octa 0x0e148e8252377a554f256efcb82be955 + + /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */ + .octa 0x9c25531d19e65ddeec1631edb2dea967 + + /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */ + .octa 0x790606ff9957c0a65d27e147510ac59a + + /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */ + .octa 0x82f63b786ea2d55ca66805eb18b8ea18 + + +.barrett_constants: + /* 33 bit reflected Barrett constant m - (4^32)/n */ + .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */ + /* 33 bit reflected Barrett constant n */ + .octa 0x00000000000000000000000105ec76f1 + + .text + +#if defined(__BIG_ENDIAN__) +#define BYTESWAP_DATA +#else +#undef BYTESWAP_DATA +#endif + +#define off16 r25 +#define off32 r26 +#define off48 r27 +#define off64 r28 +#define off80 r29 +#define off96 r30 +#define off112 r31 + +#define const1 v24 +#define const2 v25 + +#define byteswap v26 +#define mask_32bit v27 +#define mask_64bit v28 +#define zeroes v29 + +#ifdef BYTESWAP_DATA +#define VPERM(A, B, C, D) vperm A, B, C, D +#else +#define VPERM(A, B, C, D) +#endif + +/* unsigned int __crc32c_vpmsum(unsigned int crc, void *p, unsigned long len) */ +FUNC_START(__crc32c_vpmsum) + std r31,-8(r1) + std r30,-16(r1) + std r29,-24(r1) + std r28,-32(r1) + std r27,-40(r1) + std r26,-48(r1) + std r25,-56(r1) + + li off16,16 + li off32,32 + li off48,48 + li off64,64 + li off80,80 + li off96,96 + li off112,112 + li r0,0 + + /* Enough room for saving 10 non volatile VMX registers */ + subi r6,r1,56+10*16 + subi r7,r1,56+2*16 + + stvx v20,0,r6 + stvx v21,off16,r6 + stvx v22,off32,r6 + stvx v23,off48,r6 + stvx v24,off64,r6 + stvx v25,off80,r6 + stvx v26,off96,r6 + stvx v27,off112,r6 + stvx v28,0,r7 + stvx v29,off16,r7 + + mr r10,r3 + + vxor zeroes,zeroes,zeroes + vspltisw v0,-1 + + vsldoi mask_32bit,zeroes,v0,4 + vsldoi mask_64bit,zeroes,v0,8 + + /* Get the initial value into v8 */ + vxor v8,v8,v8 + MTVRD(v8, R3) + vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */ + +#ifdef BYTESWAP_DATA + addis r3,r2,.byteswap_constant@toc@ha + addi r3,r3,.byteswap_constant@toc@l + + lvx byteswap,0,r3 + addi r3,r3,16 +#endif + + cmpdi r5,256 + blt .Lshort + + rldicr r6,r5,0,56 + + /* Checksum in blocks of MAX_SIZE */ +1: lis r7,MAX_SIZE@h + ori r7,r7,MAX_SIZE@l + mr r9,r7 + cmpd r6,r7 + bgt 2f + mr r7,r6 +2: subf r6,r7,r6 + + /* our main loop does 128 bytes at a time */ + srdi r7,r7,7 + + /* + * Work out the offset into the constants table to start at. Each + * constant is 16 bytes, and it is used against 128 bytes of input + * data - 128 / 16 = 8 + */ + sldi r8,r7,4 + srdi r9,r9,3 + subf r8,r8,r9 + + /* We reduce our final 128 bytes in a separate step */ + addi r7,r7,-1 + mtctr r7 + + addis r3,r2,.constants@toc@ha + addi r3,r3,.constants@toc@l + + /* Find the start of our constants */ + add r3,r3,r8 + + /* zero v0-v7 which will contain our checksums */ + vxor v0,v0,v0 + vxor v1,v1,v1 + vxor v2,v2,v2 + vxor v3,v3,v3 + vxor v4,v4,v4 + vxor v5,v5,v5 + vxor v6,v6,v6 + vxor v7,v7,v7 + + lvx const1,0,r3 + + /* + * If we are looping back to consume more data we use the values + * already in v16-v23. + */ + cmpdi r0,1 + beq 2f + + /* First warm up pass */ + lvx v16,0,r4 + lvx v17,off16,r4 + VPERM(v16,v16,v16,byteswap) + VPERM(v17,v17,v17,byteswap) + lvx v18,off32,r4 + lvx v19,off48,r4 + VPERM(v18,v18,v18,byteswap) + VPERM(v19,v19,v19,byteswap) + lvx v20,off64,r4 + lvx v21,off80,r4 + VPERM(v20,v20,v20,byteswap) + VPERM(v21,v21,v21,byteswap) + lvx v22,off96,r4 + lvx v23,off112,r4 + VPERM(v22,v22,v22,byteswap) + VPERM(v23,v23,v23,byteswap) + addi r4,r4,8*16 + + /* xor in initial value */ + vxor v16,v16,v8 + +2: bdz .Lfirst_warm_up_done + + addi r3,r3,16 + lvx const2,0,r3 + + /* Second warm up pass */ + VPMSUMD(v8,v16,const1) + lvx v16,0,r4 + VPERM(v16,v16,v16,byteswap) + ori r2,r2,0 + + VPMSUMD(v9,v17,const1) + lvx v17,off16,r4 + VPERM(v17,v17,v17,byteswap) + ori r2,r2,0 + + VPMSUMD(v10,v18,const1) + lvx v18,off32,r4 + VPERM(v18,v18,v18,byteswap) + ori r2,r2,0 + + VPMSUMD(v11,v19,const1) + lvx v19,off48,r4 + VPERM(v19,v19,v19,byteswap) + ori r2,r2,0 + + VPMSUMD(v12,v20,const1) + lvx v20,off64,r4 + VPERM(v20,v20,v20,byteswap) + ori r2,r2,0 + + VPMSUMD(v13,v21,const1) + lvx v21,off80,r4 + VPERM(v21,v21,v21,byteswap) + ori r2,r2,0 + + VPMSUMD(v14,v22,const1) + lvx v22,off96,r4 + VPERM(v22,v22,v22,byteswap) + ori r2,r2,0 + + VPMSUMD(v15,v23,const1) + lvx v23,off112,r4 + VPERM(v23,v23,v23,byteswap) + + addi r4,r4,8*16 + + bdz .Lfirst_cool_down + + /* + * main loop. We modulo schedule it such that it takes three iterations + * to complete - first iteration load, second iteration vpmsum, third + * iteration xor. + */ + .balign 16 +4: lvx const1,0,r3 + addi r3,r3,16 + ori r2,r2,0 + + vxor v0,v0,v8 + VPMSUMD(v8,v16,const2) + lvx v16,0,r4 + VPERM(v16,v16,v16,byteswap) + ori r2,r2,0 + + vxor v1,v1,v9 + VPMSUMD(v9,v17,const2) + lvx v17,off16,r4 + VPERM(v17,v17,v17,byteswap) + ori r2,r2,0 + + vxor v2,v2,v10 + VPMSUMD(v10,v18,const2) + lvx v18,off32,r4 + VPERM(v18,v18,v18,byteswap) + ori r2,r2,0 + + vxor v3,v3,v11 + VPMSUMD(v11,v19,const2) + lvx v19,off48,r4 + VPERM(v19,v19,v19,byteswap) + lvx const2,0,r3 + ori r2,r2,0 + + vxor v4,v4,v12 + VPMSUMD(v12,v20,const1) + lvx v20,off64,r4 + VPERM(v20,v20,v20,byteswap) + ori r2,r2,0 + + vxor v5,v5,v13 + VPMSUMD(v13,v21,const1) + lvx v21,off80,r4 + VPERM(v21,v21,v21,byteswap) + ori r2,r2,0 + + vxor v6,v6,v14 + VPMSUMD(v14,v22,const1) + lvx v22,off96,r4 + VPERM(v22,v22,v22,byteswap) + ori r2,r2,0 + + vxor v7,v7,v15 + VPMSUMD(v15,v23,const1) + lvx v23,off112,r4 + VPERM(v23,v23,v23,byteswap) + + addi r4,r4,8*16 + + bdnz 4b + +.Lfirst_cool_down: + /* First cool down pass */ + lvx const1,0,r3 + addi r3,r3,16 + + vxor v0,v0,v8 + VPMSUMD(v8,v16,const1) + ori r2,r2,0 + + vxor v1,v1,v9 + VPMSUMD(v9,v17,const1) + ori r2,r2,0 + + vxor v2,v2,v10 + VPMSUMD(v10,v18,const1) + ori r2,r2,0 + + vxor v3,v3,v11 + VPMSUMD(v11,v19,const1) + ori r2,r2,0 + + vxor v4,v4,v12 + VPMSUMD(v12,v20,const1) + ori r2,r2,0 + + vxor v5,v5,v13 + VPMSUMD(v13,v21,const1) + ori r2,r2,0 + + vxor v6,v6,v14 + VPMSUMD(v14,v22,const1) + ori r2,r2,0 + + vxor v7,v7,v15 + VPMSUMD(v15,v23,const1) + ori r2,r2,0 + +.Lsecond_cool_down: + /* Second cool down pass */ + vxor v0,v0,v8 + vxor v1,v1,v9 + vxor v2,v2,v10 + vxor v3,v3,v11 + vxor v4,v4,v12 + vxor v5,v5,v13 + vxor v6,v6,v14 + vxor v7,v7,v15 + + /* + * vpmsumd produces a 96 bit result in the least significant bits + * of the register. Since we are bit reflected we have to shift it + * left 32 bits so it occupies the least significant bits in the + * bit reflected domain. + */ + vsldoi v0,v0,zeroes,4 + vsldoi v1,v1,zeroes,4 + vsldoi v2,v2,zeroes,4 + vsldoi v3,v3,zeroes,4 + vsldoi v4,v4,zeroes,4 + vsldoi v5,v5,zeroes,4 + vsldoi v6,v6,zeroes,4 + vsldoi v7,v7,zeroes,4 + + /* xor with last 1024 bits */ + lvx v8,0,r4 + lvx v9,off16,r4 + VPERM(v8,v8,v8,byteswap) + VPERM(v9,v9,v9,byteswap) + lvx v10,off32,r4 + lvx v11,off48,r4 + VPERM(v10,v10,v10,byteswap) + VPERM(v11,v11,v11,byteswap) + lvx v12,off64,r4 + lvx v13,off80,r4 + VPERM(v12,v12,v12,byteswap) + VPERM(v13,v13,v13,byteswap) + lvx v14,off96,r4 + lvx v15,off112,r4 + VPERM(v14,v14,v14,byteswap) + VPERM(v15,v15,v15,byteswap) + + addi r4,r4,8*16 + + vxor v16,v0,v8 + vxor v17,v1,v9 + vxor v18,v2,v10 + vxor v19,v3,v11 + vxor v20,v4,v12 + vxor v21,v5,v13 + vxor v22,v6,v14 + vxor v23,v7,v15 + + li r0,1 + cmpdi r6,0 + addi r6,r6,128 + bne 1b + + /* Work out how many bytes we have left */ + andi. r5,r5,127 + + /* Calculate where in the constant table we need to start */ + subfic r6,r5,128 + add r3,r3,r6 + + /* How many 16 byte chunks are in the tail */ + srdi r7,r5,4 + mtctr r7 + + /* + * Reduce the previously calculated 1024 bits to 64 bits, shifting + * 32 bits to include the trailing 32 bits of zeros + */ + lvx v0,0,r3 + lvx v1,off16,r3 + lvx v2,off32,r3 + lvx v3,off48,r3 + lvx v4,off64,r3 + lvx v5,off80,r3 + lvx v6,off96,r3 + lvx v7,off112,r3 + addi r3,r3,8*16 + + VPMSUMW(v0,v16,v0) + VPMSUMW(v1,v17,v1) + VPMSUMW(v2,v18,v2) + VPMSUMW(v3,v19,v3) + VPMSUMW(v4,v20,v4) + VPMSUMW(v5,v21,v5) + VPMSUMW(v6,v22,v6) + VPMSUMW(v7,v23,v7) + + /* Now reduce the tail (0 - 112 bytes) */ + cmpdi r7,0 + beq 1f + + lvx v16,0,r4 + lvx v17,0,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off16,r4 + lvx v17,off16,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off32,r4 + lvx v17,off32,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off48,r4 + lvx v17,off48,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off64,r4 + lvx v17,off64,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off80,r4 + lvx v17,off80,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + bdz 1f + + lvx v16,off96,r4 + lvx v17,off96,r3 + VPERM(v16,v16,v16,byteswap) + VPMSUMW(v16,v16,v17) + vxor v0,v0,v16 + + /* Now xor all the parallel chunks together */ +1: vxor v0,v0,v1 + vxor v2,v2,v3 + vxor v4,v4,v5 + vxor v6,v6,v7 + + vxor v0,v0,v2 + vxor v4,v4,v6 + + vxor v0,v0,v4 + +.Lbarrett_reduction: + /* Barrett constants */ + addis r3,r2,.barrett_constants@toc@ha + addi r3,r3,.barrett_constants@toc@l + + lvx const1,0,r3 + lvx const2,off16,r3 + + vsldoi v1,v0,v0,8 + vxor v0,v0,v1 /* xor two 64 bit results together */ + + /* shift left one bit */ + vspltisb v1,1 + vsl v0,v0,v1 + + vand v0,v0,mask_64bit + + /* + * The reflected version of Barrett reduction. Instead of bit + * reflecting our data (which is expensive to do), we bit reflect our + * constants and our algorithm, which means the intermediate data in + * our vector registers goes from 0-63 instead of 63-0. We can reflect + * the algorithm because we don't carry in mod 2 arithmetic. + */ + vand v1,v0,mask_32bit /* bottom 32 bits of a */ + VPMSUMD(v1,v1,const1) /* ma */ + vand v1,v1,mask_32bit /* bottom 32bits of ma */ + VPMSUMD(v1,v1,const2) /* qn */ + vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */ + + /* + * Since we are bit reflected, the result (ie the low 32 bits) is in + * the high 32 bits. We just need to shift it left 4 bytes + * V0 [ 0 1 X 3 ] + * V0 [ 0 X 2 3 ] + */ + vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */ + + /* Get it into r3 */ + MFVRD(R3, v0) + +.Lout: + subi r6,r1,56+10*16 + subi r7,r1,56+2*16 + + lvx v20,0,r6 + lvx v21,off16,r6 + lvx v22,off32,r6 + lvx v23,off48,r6 + lvx v24,off64,r6 + lvx v25,off80,r6 + lvx v26,off96,r6 + lvx v27,off112,r6 + lvx v28,0,r7 + lvx v29,off16,r7 + + ld r31,-8(r1) + ld r30,-16(r1) + ld r29,-24(r1) + ld r28,-32(r1) + ld r27,-40(r1) + ld r26,-48(r1) + ld r25,-56(r1) + + blr + +.Lfirst_warm_up_done: + lvx const1,0,r3 + addi r3,r3,16 + + VPMSUMD(v8,v16,const1) + VPMSUMD(v9,v17,const1) + VPMSUMD(v10,v18,const1) + VPMSUMD(v11,v19,const1) + VPMSUMD(v12,v20,const1) + VPMSUMD(v13,v21,const1) + VPMSUMD(v14,v22,const1) + VPMSUMD(v15,v23,const1) + + b .Lsecond_cool_down + +.Lshort: + cmpdi r5,0 + beq .Lzero + + addis r3,r2,.short_constants@toc@ha + addi r3,r3,.short_constants@toc@l + + /* Calculate where in the constant table we need to start */ + subfic r6,r5,256 + add r3,r3,r6 + + /* How many 16 byte chunks? */ + srdi r7,r5,4 + mtctr r7 + + vxor v19,v19,v19 + vxor v20,v20,v20 + + lvx v0,0,r4 + lvx v16,0,r3 + VPERM(v0,v0,v16,byteswap) + vxor v0,v0,v8 /* xor in initial value */ + VPMSUMW(v0,v0,v16) + bdz .Lv0 + + lvx v1,off16,r4 + lvx v17,off16,r3 + VPERM(v1,v1,v17,byteswap) + VPMSUMW(v1,v1,v17) + bdz .Lv1 + + lvx v2,off32,r4 + lvx v16,off32,r3 + VPERM(v2,v2,v16,byteswap) + VPMSUMW(v2,v2,v16) + bdz .Lv2 + + lvx v3,off48,r4 + lvx v17,off48,r3 + VPERM(v3,v3,v17,byteswap) + VPMSUMW(v3,v3,v17) + bdz .Lv3 + + lvx v4,off64,r4 + lvx v16,off64,r3 + VPERM(v4,v4,v16,byteswap) + VPMSUMW(v4,v4,v16) + bdz .Lv4 + + lvx v5,off80,r4 + lvx v17,off80,r3 + VPERM(v5,v5,v17,byteswap) + VPMSUMW(v5,v5,v17) + bdz .Lv5 + + lvx v6,off96,r4 + lvx v16,off96,r3 + VPERM(v6,v6,v16,byteswap) + VPMSUMW(v6,v6,v16) + bdz .Lv6 + + lvx v7,off112,r4 + lvx v17,off112,r3 + VPERM(v7,v7,v17,byteswap) + VPMSUMW(v7,v7,v17) + bdz .Lv7 + + addi r3,r3,128 + addi r4,r4,128 + + lvx v8,0,r4 + lvx v16,0,r3 + VPERM(v8,v8,v16,byteswap) + VPMSUMW(v8,v8,v16) + bdz .Lv8 + + lvx v9,off16,r4 + lvx v17,off16,r3 + VPERM(v9,v9,v17,byteswap) + VPMSUMW(v9,v9,v17) + bdz .Lv9 + + lvx v10,off32,r4 + lvx v16,off32,r3 + VPERM(v10,v10,v16,byteswap) + VPMSUMW(v10,v10,v16) + bdz .Lv10 + + lvx v11,off48,r4 + lvx v17,off48,r3 + VPERM(v11,v11,v17,byteswap) + VPMSUMW(v11,v11,v17) + bdz .Lv11 + + lvx v12,off64,r4 + lvx v16,off64,r3 + VPERM(v12,v12,v16,byteswap) + VPMSUMW(v12,v12,v16) + bdz .Lv12 + + lvx v13,off80,r4 + lvx v17,off80,r3 + VPERM(v13,v13,v17,byteswap) + VPMSUMW(v13,v13,v17) + bdz .Lv13 + + lvx v14,off96,r4 + lvx v16,off96,r3 + VPERM(v14,v14,v16,byteswap) + VPMSUMW(v14,v14,v16) + bdz .Lv14 + + lvx v15,off112,r4 + lvx v17,off112,r3 + VPERM(v15,v15,v17,byteswap) + VPMSUMW(v15,v15,v17) + +.Lv15: vxor v19,v19,v15 +.Lv14: vxor v20,v20,v14 +.Lv13: vxor v19,v19,v13 +.Lv12: vxor v20,v20,v12 +.Lv11: vxor v19,v19,v11 +.Lv10: vxor v20,v20,v10 +.Lv9: vxor v19,v19,v9 +.Lv8: vxor v20,v20,v8 +.Lv7: vxor v19,v19,v7 +.Lv6: vxor v20,v20,v6 +.Lv5: vxor v19,v19,v5 +.Lv4: vxor v20,v20,v4 +.Lv3: vxor v19,v19,v3 +.Lv2: vxor v20,v20,v2 +.Lv1: vxor v19,v19,v1 +.Lv0: vxor v20,v20,v0 + + vxor v0,v19,v20 + + b .Lbarrett_reduction + +.Lzero: + mr r3,r10 + b .Lout + +FUNC_END(__crc32_vpmsum) diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c new file mode 100644 index 000000000000..bfe3d37a24ef --- /dev/null +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -0,0 +1,167 @@ +#include +#include +#include +#include +#include +#include +#include + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN-1) + +#define VECTOR_BREAKPOINT 512 + +u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len); + +static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) +{ + unsigned int prealign; + unsigned int tail; + + if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt()) + return __crc32c_le(crc, p, len); + + if ((unsigned long)p & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); + crc = __crc32c_le(crc, p, prealign); + len -= prealign; + p += prealign; + } + + if (len & ~VMX_ALIGN_MASK) { + pagefault_disable(); + enable_kernel_altivec(); + crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + pagefault_enable(); + } + + tail = len & VMX_ALIGN_MASK; + if (tail) { + p += len & ~VMX_ALIGN_MASK; + crc = __crc32c_le(crc, p, tail); + } + + return crc; +} + +static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = 0; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_shash_ctx(hash); + + if (keylen != sizeof(u32)) { + crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + *mctx = le32_to_cpup((__le32 *)key); + return 0; +} + +static int crc32c_vpmsum_init(struct shash_desc *desc) +{ + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *crcp = shash_desc_ctx(desc); + + *crcp = *mctx; + + return 0; +} + +static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *crcp = shash_desc_ctx(desc); + + *crcp = crc32c_vpmsum(*crcp, data, len); + + return 0; +} + +static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len)); + + return 0; +} + +static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out); +} + +static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out) +{ + u32 *crcp = shash_desc_ctx(desc); + + *(__le32 *)out = ~cpu_to_le32p(crcp); + + return 0; +} + +static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len, + out); +} + +static struct shash_alg alg = { + .setkey = crc32c_vpmsum_setkey, + .init = crc32c_vpmsum_init, + .update = crc32c_vpmsum_update, + .final = crc32c_vpmsum_final, + .finup = crc32c_vpmsum_finup, + .digest = crc32c_vpmsum_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-vpmsum", + .cra_priority = 200, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_init = crc32c_vpmsum_cra_init, + } +}; + +static int __init crc32c_vpmsum_mod_init(void) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return -ENODEV; + + return crypto_register_shash(&alg); +} + +static void __exit crc32c_vpmsum_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc32c_vpmsum_mod_init); +module_exit(crc32c_vpmsum_mod_fini); + +MODULE_AUTHOR("Anton Blanchard "); +MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CRYPTO("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c-vpmsum"); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 1d035c1cc889..49cd8760aa7c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -174,6 +174,8 @@ #define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff #define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 #define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff +#define PPC_INST_MFVSRD 0x7c000066 +#define PPC_INST_MTVSRD 0x7c000166 #define PPC_INST_SLBFEE 0x7c0007a7 #define PPC_INST_STRING 0x7c00042a @@ -188,6 +190,8 @@ #define PPC_INST_WAIT 0x7c00007c #define PPC_INST_TLBIVAX 0x7c000624 #define PPC_INST_TLBSRX_DOT 0x7c0006a5 +#define PPC_INST_VPMSUMW 0x10000488 +#define PPC_INST_VPMSUMD 0x100004c8 #define PPC_INST_XXLOR 0xf0000510 #define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 @@ -359,6 +363,14 @@ VSX_XX1((s), a, b)) #define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ VSX_XX1((s), a, b)) +#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \ + VSX_XX1((t)+32, a, R0)) +#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \ + VSX_XX1((t)+32, a, R0)) +#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \ + VSX_XX3((t), a, b)) +#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \ + VSX_XX3((t), a, b)) #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ VSX_XX3((t), a, b)) #define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \ diff --git a/crypto/Kconfig b/crypto/Kconfig index 62fcbb923753..a9377bef25e3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -437,6 +437,17 @@ config CRYPTO_CRC32C_INTEL gain performance compared with software implementation. Module will be crc32c-intel. +config CRYPT_CRC32C_VPMSUM + tristate "CRC32c CRC algorithm (powerpc64)" + depends on PPC64 + select CRYPTO_HASH + select CRC32 + help + CRC32c algorithm implemented using vector polynomial multiply-sum + (vpmsum) instructions, introduced in POWER8. Enable on POWER8 + and newer processors for improved performance. + + config CRYPTO_CRC32C_SPARC64 tristate "CRC32c CRC algorithm (SPARC64)" depends on SPARC64 -- cgit v1.2.3-70-g09d2 From 57763f5ec7488d5864e4d6ec9d4197b8f52214bd Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Mon, 4 Jul 2016 10:52:34 +0100 Subject: crypto: testmgr - Set err before proceeding Report correct error in case of failure Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/testmgr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 8ea0d3fcb580..769cc2a88040 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1941,6 +1941,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, if (err) goto free_req; + err = -ENOMEM; out_len_max = crypto_akcipher_maxsize(tfm); outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); if (!outbuf_enc) -- cgit v1.2.3-70-g09d2 From 8be0b84e58a9b07c314f920792926c5c5a53d3da Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Mon, 4 Jul 2016 17:21:38 +0100 Subject: crypto: rsa - Store rest of the private key components When parsing a private key, store all non-optional fields. These are required for enabling CRT mode for decrypt and verify Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/rsa_helper.c | 75 +++++++++++++++++++++++++++++++++++++++++++ crypto/rsaprivkey.asn1 | 10 +++--- include/crypto/internal/rsa.h | 20 ++++++++++++ 3 files changed, 100 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 583656af4fe2..4df6451e7543 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c @@ -78,6 +78,81 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, return 0; } +int rsa_get_p(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + /* invalid key provided */ + if (!value || !vlen || vlen > key->n_sz) + return -EINVAL; + + key->p = value; + key->p_sz = vlen; + + return 0; +} + +int rsa_get_q(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + /* invalid key provided */ + if (!value || !vlen || vlen > key->n_sz) + return -EINVAL; + + key->q = value; + key->q_sz = vlen; + + return 0; +} + +int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + /* invalid key provided */ + if (!value || !vlen || vlen > key->n_sz) + return -EINVAL; + + key->dp = value; + key->dp_sz = vlen; + + return 0; +} + +int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + /* invalid key provided */ + if (!value || !vlen || vlen > key->n_sz) + return -EINVAL; + + key->dq = value; + key->dq_sz = vlen; + + return 0; +} + +int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + /* invalid key provided */ + if (!value || !vlen || vlen > key->n_sz) + return -EINVAL; + + key->qinv = value; + key->qinv_sz = vlen; + + return 0; +} + /** * rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the * provided struct rsa_key, pointers to the raw key as is, diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1 index 731aea5edb0c..4ce06758e8af 100644 --- a/crypto/rsaprivkey.asn1 +++ b/crypto/rsaprivkey.asn1 @@ -3,9 +3,9 @@ RsaPrivKey ::= SEQUENCE { n INTEGER ({ rsa_get_n }), e INTEGER ({ rsa_get_e }), d INTEGER ({ rsa_get_d }), - prime1 INTEGER, - prime2 INTEGER, - exponent1 INTEGER, - exponent2 INTEGER, - coefficient INTEGER + prime1 INTEGER ({ rsa_get_p }), + prime2 INTEGER ({ rsa_get_q }), + exponent1 INTEGER ({ rsa_get_dp }), + exponent2 INTEGER ({ rsa_get_dq }), + coefficient INTEGER ({ rsa_get_qinv }) } diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index d6c042a2ee52..9e8f1590de98 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -19,17 +19,37 @@ * @n : RSA modulus raw byte stream * @e : RSA public exponent raw byte stream * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream * @n_sz : length in bytes of RSA modulus n * @e_sz : length in bytes of RSA public exponent * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field */ struct rsa_key { const u8 *n; const u8 *e; const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; size_t n_sz; size_t e_sz; size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; }; int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, -- cgit v1.2.3-70-g09d2 From c8afbc84042980ca6978ea4b140f848e2820b96a Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Mon, 4 Jul 2016 17:21:39 +0100 Subject: crypto: testmgr - Add 4K private key to RSA testvector Key generated with openssl. It also contains all fields required for testing CRT mode Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu --- crypto/testmgr.h | 200 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 199 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 4ce2d866919d..acb6bbff781a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -152,7 +152,7 @@ static char zeroed_string[48]; #ifdef CONFIG_CRYPTO_FIPS #define RSA_TEST_VECTORS 2 #else -#define RSA_TEST_VECTORS 4 +#define RSA_TEST_VECTORS 5 #endif static struct akcipher_testvec rsa_tv_template[] = { { @@ -338,6 +338,204 @@ static struct akcipher_testvec rsa_tv_template[] = { .m_size = 8, .c_size = 256, .public_key_vec = true, + }, { + .key = + "\x30\x82\x09\x29" /* sequence of 2345 bytes */ + "\x02\x01\x00" /* version integer of 1 byte */ + "\x02\x82\x02\x01" /* modulus - integer of 513 bytes */ + "\x00\xC3\x8B\x55\x7B\x73\x4D\xFF\xE9\x9B\xC6\xDC\x67\x3C\xB4\x8E" + "\xA0\x86\xED\xF2\xB9\x50\x5C\x54\x5C\xBA\xE4\xA1\xB2\xA7\xAE\x2F" + "\x1B\x7D\xF1\xFB\xAC\x79\xC5\xDF\x1A\x00\xC9\xB2\xC1\x61\x25\x33" + "\xE6\x9C\xE9\xCF\xD6\x27\xC4\x4E\x44\x30\x44\x5E\x08\xA1\x87\x52" + "\xCC\x6B\x97\x70\x8C\xBC\xA5\x06\x31\x0C\xD4\x2F\xD5\x7D\x26\x24" + "\xA2\xE2\xAC\x78\xF4\x53\x14\xCE\xF7\x19\x2E\xD7\xF7\xE6\x0C\xB9" + "\x56\x7F\x0B\xF1\xB1\xE2\x43\x70\xBD\x86\x1D\xA1\xCC\x2B\x19\x08" + "\x76\xEF\x91\xAC\xBF\x20\x24\x0D\x38\xC0\x89\xB8\x9A\x70\xB3\x64" + "\xD9\x8F\x80\x41\x10\x5B\x9F\xB1\xCB\x76\x43\x00\x21\x25\x36\xD4" + "\x19\xFC\x55\x95\x10\xE4\x26\x74\x98\x2C\xD9\xBD\x0B\x2B\x04\xC2" + "\xAC\x82\x38\xB4\xDD\x4C\x04\x7E\x51\x36\x40\x1E\x0B\xC4\x7C\x25" + "\xDD\x4B\xB2\xE7\x20\x0A\x57\xF9\xB4\x94\xC3\x08\x33\x22\x6F\x8B" + "\x48\xDB\x03\x68\x5A\x5B\xBA\xAE\xF3\xAD\xCF\xC3\x6D\xBA\xF1\x28" + "\x67\x7E\x6C\x79\x07\xDE\xFC\xED\xE7\x96\xE3\x6C\xE0\x2C\x87\xF8" + "\x02\x01\x28\x38\x43\x21\x53\x84\x69\x75\x78\x15\x7E\xEE\xD2\x1B" + "\xB9\x23\x40\xA8\x86\x1E\x38\x83\xB2\x73\x1D\x53\xFB\x9E\x2A\x8A" + "\xB2\x75\x35\x01\xC3\xC3\xC4\x94\xE8\x84\x86\x64\x81\xF4\x42\xAA" + "\x3C\x0E\xD6\x4F\xBC\x0A\x09\x2D\xE7\x1B\xD4\x10\xA8\x54\xEA\x89" + "\x84\x8A\xCB\xF7\x5A\x3C\xCA\x76\x08\x29\x62\xB4\x6A\x22\xDF\x14" + "\x95\x71\xFD\xB6\x86\x39\xB8\x8B\xF8\x91\x7F\x38\xAA\x14\xCD\xE5" + "\xF5\x1D\xC2\x6D\x53\x69\x52\x84\x7F\xA3\x1A\x5E\x26\x04\x83\x06" + "\x73\x52\x56\xCF\x76\x26\xC9\xDD\x75\xD7\xFC\xF4\x69\xD8\x7B\x55" + "\xB7\x68\x13\x53\xB9\xE7\x89\xC3\xE8\xD6\x6E\xA7\x6D\xEA\x81\xFD" + "\xC4\xB7\x05\x5A\xB7\x41\x0A\x23\x8E\x03\x8A\x1C\xAE\xD3\x1E\xCE" + "\xE3\x5E\xFC\x19\x4A\xEE\x61\x9B\x8E\xE5\xE5\xDD\x85\xF9\x41\xEC" + "\x14\x53\x92\xF7\xDD\x06\x85\x02\x91\xE3\xEB\x6C\x43\x03\xB1\x36" + "\x7B\x89\x5A\xA8\xEB\xFC\xD5\xA8\x35\xDC\x81\xD9\x5C\xBD\xCA\xDC" + "\x9B\x98\x0B\x06\x5D\x0C\x5B\xEE\xF3\xD5\xCC\x57\xC9\x71\x2F\x90" + "\x3B\x3C\xF0\x8E\x4E\x35\x48\xAE\x63\x74\xA9\xFC\x72\x75\x8E\x34" + "\xA8\xF2\x1F\xEA\xDF\x3A\x37\x2D\xE5\x39\x39\xF8\x57\x58\x3C\x04" + "\xFE\x87\x06\x98\xBC\x7B\xD3\x21\x36\x60\x25\x54\xA7\x3D\xFA\x91" + "\xCC\xA8\x0B\x92\x8E\xB4\xF7\x06\xFF\x1E\x95\xCB\x07\x76\x97\x3B" + "\x9D" + "\x02\x03\x01\x00\x01" /* public key integer of 3 bytes */ + "\x02\x82\x02\x00" /* private key integer of 512 bytes */ + "\x74\xA9\xE0\x6A\x32\xB4\xCA\x85\xD9\x86\x9F\x60\x88\x7B\x40\xCC" + "\xCD\x33\x91\xA8\xB6\x25\x1F\xBF\xE3\x51\x1C\x97\xB6\x2A\xD9\xB8" + "\x11\x40\x19\xE3\x21\x13\xC8\xB3\x7E\xDC\xD7\x65\x40\x4C\x2D\xD6" + "\xDC\xAF\x32\x6C\x96\x75\x2C\x2C\xCA\x8F\x3F\x7A\xEE\xC4\x09\xC6" + "\x24\x3A\xC9\xCF\x6D\x8D\x17\x50\x94\x52\xD3\xE7\x0F\x2F\x7E\x94" + "\x1F\xA0\xBE\xD9\x25\xE8\x38\x42\x7C\x27\xD2\x79\xF8\x2A\x87\x38" + "\xEF\xBB\x74\x8B\xA8\x6E\x8C\x08\xC6\xC7\x4F\x0C\xBC\x79\xC6\xEF" + "\x0E\xA7\x5E\xE4\xF8\x8C\x09\xC7\x5E\x37\xCC\x87\x77\xCD\xCF\xD1" + "\x6D\x28\x1B\xA9\x62\xC0\xB8\x16\xA7\x8B\xF9\xBB\xCC\xB4\x15\x7F" + "\x1B\x69\x03\xF2\x7B\xEB\xE5\x8C\x14\xD6\x23\x4F\x52\x6F\x18\xA6" + "\x4B\x5B\x01\xAD\x35\xF9\x48\x53\xB3\x86\x35\x66\xD7\xE7\x29\xC0" + "\x09\xB5\xC6\xE6\xFA\xC4\xDA\x19\xBE\xD7\x4D\x41\x14\xBE\x6F\xDF" + "\x1B\xAB\xC0\xCA\x88\x07\xAC\xF1\x7D\x35\x83\x67\x28\x2D\x50\xE9" + "\xCE\x27\x71\x5E\x1C\xCF\xD2\x30\x65\x79\x72\x2F\x9C\xE1\xD2\x39" + "\x7F\xEF\x3B\x01\xF2\x14\x1D\xDF\xBD\x51\xD3\xA1\x53\x62\xCF\x5F" + "\x79\x84\xCE\x06\x96\x69\x29\x49\x82\x1C\x71\x4A\xA1\x66\xC8\x2F" + "\xFD\x7B\x96\x7B\xFC\xC4\x26\x58\xC4\xFC\x7C\xAF\xB5\xE8\x95\x83" + "\x87\xCB\x46\xDE\x97\xA7\xB3\xA2\x54\x5B\xD7\xAF\xAB\xEB\xC8\xF3" + "\x55\x9D\x48\x2B\x30\x9C\xDC\x26\x4B\xC2\x89\x45\x13\xB2\x01\x9A" + "\xA4\x65\xC3\xEC\x24\x2D\x26\x97\xEB\x80\x8A\x9D\x03\xBC\x59\x66" + "\x9E\xE2\xBB\xBB\x63\x19\x64\x93\x11\x7B\x25\x65\x30\xCD\x5B\x4B" + "\x2C\xFF\xDC\x2D\x30\x87\x1F\x3C\x88\x07\xD0\xFC\x48\xCC\x05\x8A" + "\xA2\xC8\x39\x3E\xD5\x51\xBC\x0A\xBE\x6D\xA8\xA0\xF6\x88\x06\x79" + "\x13\xFF\x1B\x45\xDA\x54\xC9\x24\x25\x8A\x75\x0A\x26\xD1\x69\x81" + "\x14\x14\xD1\x79\x7D\x8E\x76\xF2\xE0\xEB\xDD\x0F\xDE\xC2\xEC\x80" + "\xD7\xDC\x16\x99\x92\xBE\xCB\x40\x0C\xCE\x7C\x3B\x46\xA2\x5B\x5D" + "\x0C\x45\xEB\xE1\x00\xDE\x72\x50\xB1\xA6\x0B\x76\xC5\x8D\xFC\x82" + "\x38\x6D\x99\x14\x1D\x1A\x4A\xD3\x7C\x53\xB8\x12\x46\xA2\x30\x38" + "\x82\xF4\x96\x6E\x8C\xCE\x47\x0D\xAF\x0A\x3B\x45\xB7\x43\x95\x43" + "\x9E\x02\x2C\x44\x07\x6D\x1F\x3C\x66\x89\x09\xB6\x1F\x06\x30\xCC" + "\xAD\xCE\x7D\x9A\xDE\x3E\xFB\x6C\xE4\x58\x43\xD2\x4F\xA5\x9E\x5E" + "\xA7\x7B\xAE\x3A\xF6\x7E\xD9\xDB\xD3\xF5\xC5\x41\xAF\xE6\x9C\x91" + "\x02\x82\x01\x01" /* prime1 - integer of 257 bytes */ + "\x00\xE0\xA6\x6C\xF0\xA2\xF8\x81\x85\x36\x43\xD0\x13\x0B\x33\x8B" + "\x8F\x78\x3D\xAC\xC7\x5E\x46\x6A\x7F\x05\xAE\x3E\x26\x0A\xA6\xD0" + "\x51\xF3\xC8\x61\xF5\x77\x22\x48\x10\x87\x4C\xD5\xA4\xD5\xAE\x2D" + "\x4E\x7A\xFE\x1C\x31\xE7\x6B\xFF\xA4\x69\x20\xF9\x2A\x0B\x99\xBE" + "\x7C\x32\x68\xAD\xB0\xC6\x94\x81\x41\x75\xDC\x06\x78\x0A\xB4\xCF" + "\xCD\x1B\x2D\x31\xE4\x7B\xEA\xA8\x35\x99\x75\x57\xC6\x0E\xF6\x78" + "\x4F\xA0\x92\x4A\x00\x1B\xE7\x96\xF2\x5B\xFD\x2C\x0A\x0A\x13\x81" + "\xAF\xCB\x59\x87\x31\xD9\x83\x65\xF2\x22\x48\xD0\x03\x67\x39\xF6" + "\xFF\xA8\x36\x07\x3A\x68\xE3\x7B\xA9\x64\xFD\x9C\xF7\xB1\x3D\xBF" + "\x26\x5C\xCC\x7A\xFC\xA2\x8F\x51\xD1\xE1\xE2\x3C\xEC\x06\x75\x7C" + "\x34\xF9\xA9\x33\x70\x11\xAD\x5A\xDC\x5F\xCF\x50\xF6\x23\x2F\x39" + "\xAC\x92\x48\x53\x4D\x01\x96\x3C\xD8\xDC\x1F\x23\x23\x78\x80\x34" + "\x54\x14\x76\x8B\xB6\xBB\xFB\x88\x78\x31\x59\x28\xD2\xB1\x75\x17" + "\x88\x04\x4A\x78\x62\x18\x2E\xF5\xFB\x9B\xEF\x15\xD8\x16\x47\xC6" + "\x42\xB1\x02\xDA\x9E\xE3\x84\x90\xB4\x2D\xC3\xCE\x13\xC9\x12\x7D" + "\x3E\xCD\x39\x39\xC9\xAD\xA1\x1A\xE6\xD5\xAD\x5A\x09\x4D\x1B\x0C" + "\xAB" + "\x02\x82\x01\x01" /* prime 2 - integer of 257 bytes */ + "\x00\xDE\xD5\x1B\xF6\xCD\x83\xB1\xC6\x47\x7E\xB9\xC0\x6B\xA9\xB8" + "\x02\xF3\xAE\x40\x5D\xFC\xD3\xE5\x4E\xF1\xE3\x39\x04\x52\x84\x89" + "\x40\x37\xBB\xC2\xCD\x7F\x71\x77\x17\xDF\x6A\x4C\x31\x24\x7F\xB9" + "\x7E\x7F\xC8\x43\x4A\x3C\xEB\x8D\x1B\x7F\x21\x51\x67\x45\x8F\xA0" + "\x36\x29\x3A\x18\x45\xA5\x32\xEC\x74\x88\x3C\x98\x5D\x67\x3B\xD7" + "\x51\x1F\xE9\xAE\x09\x01\xDE\xDE\x7C\xFB\x60\xD1\xA5\x6C\xE9\x6A" + "\x93\x04\x02\x3A\xBB\x67\x02\xB9\xFD\x23\xF0\x02\x2B\x49\x85\xC9" + "\x5B\xE7\x4B\xDF\xA3\xF4\xEE\x59\x4C\x45\xEF\x8B\xC1\x6B\xDE\xDE" + "\xBC\x1A\xFC\xD2\x76\x3F\x33\x74\xA9\x8E\xA3\x7E\x0C\xC6\xCE\x70" + "\xA1\x5B\xA6\x77\xEA\x76\xEB\x18\xCE\xB9\xD7\x78\x8D\xAE\x06\xBB" + "\xD3\x1F\x16\x0D\x05\xAB\x4F\xC6\x52\xC8\x6B\x36\x51\x7D\x1D\x27" + "\xAF\x88\x9A\x6F\xCC\x25\x2E\x74\x06\x72\xCE\x9E\xDB\xE0\x9D\x30" + "\xEF\x55\xA5\x58\x21\xA7\x42\x12\x2C\x2C\x23\x87\xC1\x0F\xE8\x51" + "\xDA\x53\xDA\xFC\x05\x36\xDF\x08\x0E\x08\x36\xBE\x5C\x86\x9E\xCA" + "\x68\x90\x33\x12\x0B\x14\x82\xAB\x90\x1A\xD4\x49\x32\x9C\xBD\xAA" + "\xAB\x4E\x38\xF1\xEE\xED\x3D\x3F\xE8\xBD\x48\x56\xA6\x64\xEE\xC8" + "\xD7" + "\x02\x82\x01\x01" /* exponent 1 - integer of 257 bytes */ + "\x00\x96\x5E\x6F\x8F\x06\xD6\xE6\x03\x1F\x96\x76\x81\x38\xBF\x30" + "\xCC\x40\x84\xAF\xD0\xE7\x06\xA5\x24\x0E\xCE\x59\xA5\x26\xFE\x0F" + "\x74\xBB\x83\xC6\x26\x02\xAF\x3C\xA3\x6B\x9C\xFF\x68\x0C\xEB\x40" + "\x42\x46\xCB\x2E\x5E\x2C\xF4\x3A\x32\x77\x77\xED\xAF\xBA\x02\x17" + "\xE1\x93\xF0\x43\x4A\x8F\x31\x39\xEF\x72\x0F\x6B\x79\x10\x59\x84" + "\xBA\x5A\x55\x7F\x0E\xDB\xEE\xEE\xD6\xA9\xB8\x44\x9F\x3A\xC6\xB9" + "\x33\x3B\x5C\x90\x11\xD0\x9B\xCC\x8A\xBF\x0E\x10\x5B\x4B\xF1\x50" + "\x9E\x35\xB3\xE0\x6D\x7A\x95\x9C\x38\x5D\xC0\x75\x13\xC2\x15\xA7" + "\x81\xEA\xBA\xF7\x4D\x9E\x85\x9D\xF1\x7D\xBA\xD0\x45\x6F\x2A\xD0" + "\x76\xC2\x28\xD0\xAD\xA7\xB5\xDC\xE3\x6A\x99\xFF\x83\x50\xB3\x75" + "\x07\x14\x91\xAF\xEF\x74\xB5\x9F\x9A\xE0\xBA\xA9\x0B\x87\xF3\x85" + "\x5C\x40\xB2\x0E\xA7\xFD\xC6\xED\x45\x8E\xD9\x7C\xB0\xB2\x68\xC6" + "\x1D\xFD\x70\x78\x06\x41\x7F\x95\x12\x36\x9D\xE2\x58\x5D\x15\xEE" + "\x41\x49\xF5\xFA\xEC\x56\x19\xA0\xE6\xE0\xB2\x40\xE1\xD9\xD0\x03" + "\x22\x02\xCF\xD1\x3C\x07\x38\x65\x8F\x65\x0E\xAA\x32\xCE\x25\x05" + "\x16\x73\x51\xB9\x9F\x88\x0B\xCD\x30\xF3\x97\xCC\x2B\x6B\xA4\x0E" + "\x6F" + "\x02\x82\x01\x00" /* exponent 2 - integer of 256 bytes */ + "\x2A\x5F\x3F\xB8\x08\x90\x58\x47\xA9\xE4\xB1\x11\xA3\xE7\x5B\xF4" + "\x43\xBE\x08\xC3\x56\x86\x3C\x7E\x6C\x84\x96\x9C\xF9\xCB\xF6\x05" + "\x5E\x13\xB8\x11\x37\x80\xAD\xF2\xBE\x2B\x0A\x5D\xF5\xE0\xCB\xB7" + "\x00\x39\x66\x82\x41\x5F\x51\x2F\xBF\x56\xE8\x91\xC8\xAA\x6C\xFE" + "\x9F\x8C\x4A\x7D\x43\xD2\x91\x1F\xFF\x9F\xF6\x21\x1C\xB6\x46\x55" + "\x48\xCA\x38\xAB\xC1\xCD\x4D\x65\x5A\xAF\xA8\x6D\xDA\x6D\xF0\x34" + "\x10\x79\x14\x0D\xFA\xA2\x8C\x17\x54\xB4\x18\xD5\x7E\x5F\x90\x50" + "\x87\x84\xE7\xFB\xD7\x61\x53\x5D\xAB\x96\xC7\x6E\x7A\x42\xA0\xFC" + "\x07\xED\xB7\x5F\x80\xD9\x19\xFF\xFB\xFD\x9E\xC4\x73\x31\x62\x3D" + "\x6C\x9E\x15\x03\x62\xA5\x85\xCC\x19\x8E\x9D\x7F\xE3\x6D\xA8\x5D" + "\x96\xF5\xAC\x78\x3D\x81\x27\xE7\x29\xF1\x29\x1D\x09\xBB\x77\x86" + "\x6B\x65\x62\x88\xE1\x31\x1A\x22\xF7\xC5\xCE\x73\x65\x1C\xBE\xE7" + "\x63\xD3\xD3\x14\x63\x27\xAF\x28\xF3\x23\xB6\x76\xC1\xBD\x9D\x82" + "\xF4\x9B\x19\x7D\x2C\x57\xF0\xC2\x2A\x51\xAE\x95\x0D\x8C\x38\x54" + "\xF5\xC6\xA0\x51\xB7\x0E\xB9\xEC\xE7\x0D\x22\xF6\x1A\xD3\xFE\x16" + "\x21\x03\xB7\x0D\x85\xD3\x35\xC9\xDD\xE4\x59\x85\xBE\x7F\xA1\x75" + "\x02\x82\x01\x01" /* coefficient - integer of 257 bytes */ + "\x00\xB9\x48\xD2\x54\x2F\x19\x54\x64\xAE\x62\x80\x61\x89\x80\xB4" + "\x48\x0B\x8D\x7E\x1B\x0F\x50\x08\x82\x3F\xED\x75\x84\xB7\x13\xE4" + "\xF8\x8D\xA8\xBB\x54\x21\x4C\x5A\x54\x07\x16\x4B\xB4\xA4\x9E\x30" + "\xBF\x7A\x30\x1B\x39\x60\xA3\x21\x53\xFB\xB0\xDC\x0F\x7C\x2C\xFB" + "\xAA\x95\x7D\x51\x39\x28\x33\x1F\x25\x31\x53\xF5\xD2\x64\x2B\xF2" + "\x1E\xB3\xC0\x6A\x0B\xC9\xA4\x42\x64\x5C\xFB\x15\xA3\xE8\x4C\x3A" + "\x9C\x3C\xBE\xA3\x39\x83\x23\xE3\x6D\x18\xCC\xC2\xDC\x63\x8D\xBA" + "\x98\xE0\xE0\x31\x4A\x2B\x37\x9C\x4D\x6B\xF3\x9F\x51\xE4\x43\x5C" + "\x83\x5F\xBF\x5C\xFE\x92\x45\x01\xAF\xF5\xC2\xF4\xB7\x56\x93\xA5" + "\xF4\xAA\x67\x3C\x48\x37\xBD\x9A\x3C\xFE\xA5\x9A\xB0\xD1\x6B\x85" + "\xDD\x81\xD4\xFA\xAD\x31\x83\xA8\x22\x9B\xFD\xB4\x61\xDC\x7A\x51" + "\x59\x62\x10\x1B\x7E\x44\xA3\xFE\x90\x51\x5A\x3E\x02\x87\xAD\xFA" + "\xDD\x0B\x1F\x3D\x35\xAF\xEE\x13\x85\x51\xA7\x42\xC0\xEE\x9E\x20" + "\xE9\xD0\x29\xB2\xE4\x21\xE4\x6D\x62\xB9\xF4\x48\x4A\xD8\x46\x8E" + "\x61\xA6\x2C\x5D\xDF\x8F\x97\x2B\x3A\x75\x1D\x83\x17\x6F\xC6\xB0" + "\xDE\xFC\x14\x25\x06\x5A\x60\xBB\xB8\x21\x89\xD1\xEF\x57\xF1\x71" + "\x3D", + .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", + .c = + "\x5c\xce\x9c\xd7\x9a\x9e\xa1\xfe\x7a\x82\x3c\x68\x27\x98\xe3\x5d" + "\xd5\xd7\x07\x29\xf5\xfb\xc3\x1a\x7f\x63\x1e\x62\x31\x3b\x19\x87" + "\x79\x4f\xec\x7b\xf3\xcb\xea\x9b\x95\x52\x3a\x40\xe5\x87\x7b\x72" + "\xd1\x72\xc9\xfb\x54\x63\xd8\xc9\xd7\x2c\xfc\x7b\xc3\x14\x1e\xbc" + "\x18\xb4\x34\xa1\xbf\x14\xb1\x37\x31\x6e\xf0\x1b\x35\x19\x54\x07" + "\xf7\x99\xec\x3e\x63\xe2\xcd\x61\x28\x65\xc3\xcd\xb1\x38\x36\xa5" + "\xb2\xd7\xb0\xdc\x1f\xf5\xef\x19\xc7\x53\x32\x2d\x1c\x26\xda\xe4" + "\x0d\xd6\x90\x7e\x28\xd8\xdc\xe4\x61\x05\xd2\x25\x90\x01\xd3\x96" + "\x6d\xa6\xcf\x58\x20\xbb\x03\xf4\x01\xbc\x79\xb9\x18\xd8\xb8\xba" + "\xbd\x93\xfc\xf2\x62\x5d\x8c\x66\x1e\x0e\x84\x59\x93\xdd\xe2\x93" + "\xa2\x62\x7d\x08\x82\x7a\xdd\xfc\xb8\xbc\xc5\x4f\x9c\x4e\xbf\xb4" + "\xfc\xf4\xc5\x01\xe8\x00\x70\x4d\x28\x26\xcc\x2e\xfe\x0e\x58\x41" + "\x8b\xec\xaf\x7c\x4b\x54\xd0\xa0\x64\xf9\x32\xf4\x2e\x47\x65\x0a" + "\x67\x88\x39\x3a\xdb\xb2\xdb\x7b\xb5\xf6\x17\xa8\xd9\xc6\x5e\x28" + "\x13\x82\x8a\x99\xdb\x60\x08\xa5\x23\x37\xfa\x88\x90\x31\xc8\x9d" + "\x8f\xec\xfb\x85\x9f\xb1\xce\xa6\x24\x50\x46\x44\x47\xcb\x65\xd1" + "\xdf\xc0\xb1\x6c\x90\x1f\x99\x8e\x4d\xd5\x9e\x31\x07\x66\x87\xdf" + "\x01\xaa\x56\x3c\x71\xe0\x2b\x6f\x67\x3b\x23\xed\xc2\xbd\x03\x30" + "\x79\x76\x02\x10\x10\x98\x85\x8a\xff\xfd\x0b\xda\xa5\xd9\x32\x48" + "\x02\xa0\x0b\xb9\x2a\x8a\x18\xca\xc6\x8f\x3f\xbb\x16\xb2\xaa\x98" + "\x27\xe3\x60\x43\xed\x15\x70\xd4\x57\x15\xfe\x19\xd4\x9b\x13\x78" + "\x8a\xf7\x21\xf1\xa2\xa2\x2d\xb3\x09\xcf\x44\x91\x6e\x08\x3a\x30" + "\x81\x3e\x90\x93\x8a\x67\x33\x00\x59\x54\x9a\x25\xd3\x49\x8e\x9f" + "\xc1\x4b\xe5\x86\xf3\x50\x4c\xbc\xc5\xd3\xf5\x3a\x54\xe1\x36\x3f" + "\xe2\x5a\xb4\x37\xc0\xeb\x70\x35\xec\xf6\xb7\xe8\x44\x3b\x7b\xf3" + "\xf1\xf2\x1e\xdb\x60\x7d\xd5\xbe\xf0\x71\x34\x90\x4c\xcb\xd4\x35" + "\x51\xc7\xdd\xd8\xc9\x81\xf5\x5d\x57\x46\x2c\xb1\x7b\x9b\xaa\xcb" + "\xd1\x22\x25\x49\x44\xa3\xd4\x6b\x29\x7b\xd8\xb2\x07\x93\xbf\x3d" + "\x52\x49\x84\x79\xef\xb8\xe5\xc4\xad\xca\xa8\xc6\xf6\xa6\x76\x70" + "\x5b\x0b\xe5\x83\xc6\x0e\xef\x55\xf2\xe7\xff\x04\xea\xe6\x13\xbe" + "\x40\xe1\x40\x45\x48\x66\x75\x31\xae\x35\x64\x91\x11\x6f\xda\xee" + "\x26\x86\x45\x6f\x0b\xd5\x9f\x03\xb1\x65\x5b\xdb\xa4\xe4\xf9\x45", + .key_len = 2349, + .m_size = 8, + .c_size = 512, } }; -- cgit v1.2.3-70-g09d2 From 4e6c3df4d729f85997cbf276bfa8ffd8579b8e77 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:31 +0800 Subject: crypto: skcipher - Add low-level skcipher interface This patch allows skcipher algorithms and instances to be created and registered with the crypto API. They are accessible through the top-level skcipher interface, along with ablkcipher/blkcipher algorithms and instances. This patch also introduces a new parameter called chunk size which is meant for ciphers such as CTR and CTS which ostensibly can handle arbitrary lengths, but still behave like block ciphers in that you can only process a partial block at the very end. For these ciphers the block size will continue to be set to 1 as it is now while the chunk size will be set to the underlying block size. Signed-off-by: Herbert Xu --- crypto/skcipher.c | 196 +++++++++++++++++++++++++++++++++++-- include/crypto/internal/skcipher.h | 87 ++++++++++++++++ include/crypto/skcipher.h | 130 ++++++++++++++++++++++++ include/linux/crypto.h | 1 + 4 files changed, 407 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 69230e9d4ac9..d248008e7f7b 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -16,7 +16,11 @@ #include #include +#include #include +#include +#include +#include #include "internal.h" @@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); - BUG_ON(alg->cra_type != &crypto_ablkcipher_type && - alg->cra_type != &crypto_givcipher_type); + if (alg->cra_type == &crypto_ablkcipher_type || + alg->cra_type == &crypto_givcipher_type) + return sizeof(struct crypto_ablkcipher *); - return sizeof(struct crypto_ablkcipher *); + return crypto_alg_extsize(alg); } static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, @@ -216,26 +221,118 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) return 0; } +static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + + alg->exit(skcipher); +} + static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { + struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); + if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); - BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && - tfm->__crt_alg->cra_type != &crypto_givcipher_type); + if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || + tfm->__crt_alg->cra_type == &crypto_givcipher_type) + return crypto_init_skcipher_ops_ablkcipher(tfm); + + skcipher->setkey = alg->setkey; + skcipher->encrypt = alg->encrypt; + skcipher->decrypt = alg->decrypt; + skcipher->ivsize = alg->ivsize; + skcipher->keysize = alg->max_keysize; + + if (alg->exit) + skcipher->base.exit = crypto_skcipher_exit_tfm; - return crypto_init_skcipher_ops_ablkcipher(tfm); + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_skcipher_free_instance(struct crypto_instance *inst) +{ + struct skcipher_instance *skcipher = + container_of(inst, struct skcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + seq_printf(m, "type : skcipher\n"); + seq_printf(m, "async : %s\n", + alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->chunksize); } +#ifdef CONFIG_NET +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_blkcipher rblkcipher; + struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, + base); + + strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->min_keysize; + rblkcipher.max_keysize = skcipher->max_keysize; + rblkcipher.ivsize = skcipher->ivsize; + + if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(struct crypto_report_blkcipher), &rblkcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + static const struct crypto_type crypto_skcipher_type2 = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, + .free = crypto_skcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_skcipher_show, +#endif + .report = crypto_skcipher_report, .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, - .type = CRYPTO_ALG_TYPE_BLKCIPHER, + .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; +int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_skcipher_type2; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_skcipher2); + struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) { @@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, } EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, + type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_skcipher2); + +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->chunksize) + alg->chunksize = base->cra_blocksize; + + base->cra_type = &crypto_skcipher_type2; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + +int crypto_register_skcipher(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_skcipher); + +void crypto_unregister_skcipher(struct skcipher_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); + +int crypto_register_skciphers(struct skcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_skciphers); + +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); + +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst) +{ + int err; + + err = skcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(skcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2cf7a61ece59..ce6619c339fe 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -19,12 +19,46 @@ struct rtattr; +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; extern const struct crypto_type crypto_givcipher_type; +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + static inline void crypto_set_skcipher_spawn( struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) { @@ -33,6 +67,8 @@ static inline void crypto_set_skcipher_spawn( int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask); +int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask); struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); @@ -47,6 +83,12 @@ static inline struct crypto_alg *crypto_skcipher_spawn_alg( return spawn->base.alg; } +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + static inline struct crypto_ablkcipher *crypto_spawn_skcipher( struct crypto_skcipher_spawn *spawn) { @@ -55,6 +97,25 @@ static inline struct crypto_ablkcipher *crypto_spawn_skcipher( crypto_skcipher_mask(0))); } +static inline struct crypto_skcipher *crypto_spawn_skcipher2( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); const char *crypto_default_geniv(const struct crypto_alg *alg); @@ -122,5 +183,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.min_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.min_keysize; + + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.max_keysize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.max_keysize; + + return alg->max_keysize; +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 0f987f50bb52..a381f57ea695 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -65,6 +65,75 @@ struct crypto_skcipher { struct crypto_tfm base; }; +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + #define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ @@ -231,12 +300,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type, crypto_skcipher_mask(mask)); } +/** + * crypto_has_skcipher2() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_skcipher_driver_name( struct crypto_skcipher *tfm) { return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blkcipher.ivsize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_ablkcipher.ivsize; + + return alg->ivsize; +} + /** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle @@ -251,6 +351,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) return tfm->ivsize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 992cfc2e5df1..37a652d1639d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -47,6 +47,7 @@ #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_RNG 0x0000000c -- cgit v1.2.3-70-g09d2 From a0129733a3f5eaa973c1ea76848d905b851548f1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:32 +0800 Subject: crypto: null - Add new default null skcipher Current the default null skcipher is actually a crypto_blkcipher. This patch creates a synchronous crypto_skcipher version of the null cipher which unfortunately has to settle for the name skcipher2. Signed-off-by: Herbert Xu --- crypto/crypto_null.c | 38 ++++++++++++++++++++++++++++++++++++++ include/crypto/null.h | 2 ++ 2 files changed, 40 insertions(+) (limited to 'crypto') diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 941c9a434d50..c3f683910d07 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -28,6 +28,8 @@ static DEFINE_MUTEX(crypto_default_null_skcipher_lock); static struct crypto_blkcipher *crypto_default_null_skcipher; static int crypto_default_null_skcipher_refcnt; +static struct crypto_skcipher *crypto_default_null_skcipher2; +static int crypto_default_null_skcipher2_refcnt; static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) @@ -188,6 +190,42 @@ void crypto_put_default_null_skcipher(void) } EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); +struct crypto_skcipher *crypto_get_default_null_skcipher2(void) +{ + struct crypto_skcipher *tfm; + + mutex_lock(&crypto_default_null_skcipher_lock); + tfm = crypto_default_null_skcipher2; + + if (!tfm) { + tfm = crypto_alloc_skcipher("ecb(cipher_null)", + 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto unlock; + + crypto_default_null_skcipher2 = tfm; + } + + crypto_default_null_skcipher2_refcnt++; + +unlock: + mutex_unlock(&crypto_default_null_skcipher_lock); + + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher2); + +void crypto_put_default_null_skcipher2(void) +{ + mutex_lock(&crypto_default_null_skcipher_lock); + if (!--crypto_default_null_skcipher2_refcnt) { + crypto_free_skcipher(crypto_default_null_skcipher2); + crypto_default_null_skcipher2 = NULL; + } + mutex_unlock(&crypto_default_null_skcipher_lock); +} +EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher2); + static int __init crypto_null_mod_init(void) { int ret = 0; diff --git a/include/crypto/null.h b/include/crypto/null.h index 06dc30d9f56e..dda87cbb62b2 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -10,5 +10,7 @@ struct crypto_blkcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); +struct crypto_skcipher *crypto_get_default_null_skcipher2(void); +void crypto_put_default_null_skcipher2(void); #endif -- cgit v1.2.3-70-g09d2 From 7a530aa9cf3f282b6b123b9296dfce80ed8f068e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:33 +0800 Subject: crypto: aead - Add chunk size This patch adds a chunk size parameter to aead algorithms, just like the chunk size for skcipher algorithms. However, unlike skcipher we do not currently export this to AEAD users. It is only meant to be used by AEAD implementors for now. Signed-off-by: Herbert Xu --- crypto/aead.c | 6 +++++- include/crypto/aead.h | 12 +++++++----- include/crypto/internal/aead.h | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index 9b18a1e40d6a..b155cbc3a0dd 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -346,9 +346,13 @@ static int aead_prepare_alg(struct aead_alg *alg) { struct crypto_alg *base = &alg->base; - if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) + if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) > + PAGE_SIZE / 8) return -EINVAL; + if (!alg->chunksize) + alg->chunksize = base->cra_blocksize; + base->cra_type = &crypto_aead_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 75174f80a106..12f84327ca36 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -112,11 +112,12 @@ struct aead_request { * supplied during the decryption operation. This function is also * responsible for checking the authentication tag size for * validity. - * @setkey: see struct ablkcipher_alg - * @encrypt: see struct ablkcipher_alg - * @decrypt: see struct ablkcipher_alg - * @geniv: see struct ablkcipher_alg - * @ivsize: see struct ablkcipher_alg + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @geniv: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function * is used to initialize the cryptographic transformation object. * This function is called only once at the instantiation time, right @@ -145,6 +146,7 @@ struct aead_alg { unsigned int ivsize; unsigned int maxauthsize; + unsigned int chunksize; struct crypto_alg base; }; diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index da3864991d4c..6ad8e31d3868 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -159,6 +159,27 @@ static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) return req ? container_of(req, struct aead_request, base) : NULL; } +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + int crypto_register_aead(struct aead_alg *alg); void crypto_unregister_aead(struct aead_alg *alg); int crypto_register_aeads(struct aead_alg *algs, int count); -- cgit v1.2.3-70-g09d2 From 7217d49f1684b8cd8f6e0e9010efb49f81787cd1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:34 +0800 Subject: crypto: authenc - Use skcipher This patch converts authenc to use the new skcipher interface as opposed to ablkcipher. It also fixes a little bug where if a sync version of authenc is requested we may still end up using an async ahash. This should have no effect as none of the authenc users can request for a sync authenc. Signed-off-by: Herbert Xu --- crypto/authenc.c | 107 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 51 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 309fbc17222d..a7e1ac786c5d 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -32,8 +32,8 @@ struct authenc_instance_ctx { struct crypto_authenc_ctx { struct crypto_ahash *auth; - struct crypto_ablkcipher *enc; - struct crypto_blkcipher *null; + struct crypto_skcipher *enc; + struct crypto_skcipher *null; }; struct authenc_request_ctx { @@ -83,7 +83,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, { struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_ahash *auth = ctx->auth; - struct crypto_ablkcipher *enc = ctx->enc; + struct crypto_skcipher *enc = ctx->enc; struct crypto_authenc_keys keys; int err = -EINVAL; @@ -100,11 +100,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, if (err) goto out; - crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & - CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); - crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); + crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); out: @@ -184,12 +184,15 @@ static int crypto_authenc_copy_assoc(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct blkcipher_desc desc = { - .tfm = ctx->null, - }; + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - return crypto_blkcipher_encrypt(&desc, req->dst, req->src, - req->assoclen); + skcipher_request_set_tfm(skreq, ctx->null); + skcipher_request_set_callback(skreq, aead_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, + NULL); + + return crypto_skcipher_encrypt(skreq); } static int crypto_authenc_encrypt(struct aead_request *req) @@ -199,10 +202,10 @@ static int crypto_authenc_encrypt(struct aead_request *req) struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_ablkcipher *enc = ctx->enc; + struct crypto_skcipher *enc = ctx->enc; unsigned int cryptlen = req->cryptlen; - struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + - ictx->reqoff); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); struct scatterlist *src, *dst; int err; @@ -217,12 +220,12 @@ static int crypto_authenc_encrypt(struct aead_request *req) dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); } - ablkcipher_request_set_tfm(abreq, enc); - ablkcipher_request_set_callback(abreq, aead_request_flags(req), - crypto_authenc_encrypt_done, req); - ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); + skcipher_request_set_tfm(skreq, enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + crypto_authenc_encrypt_done, req); + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); - err = crypto_ablkcipher_encrypt(abreq); + err = crypto_skcipher_encrypt(skreq); if (err) return err; @@ -238,8 +241,8 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); - struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + - ictx->reqoff); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc); u8 *ihash = ahreq->result + authsize; struct scatterlist *src, *dst; @@ -255,13 +258,13 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, if (req->src != req->dst) dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); - ablkcipher_request_set_tfm(abreq, ctx->enc); - ablkcipher_request_set_callback(abreq, aead_request_flags(req), - req->base.complete, req->base.data); - ablkcipher_request_set_crypt(abreq, src, dst, - req->cryptlen - authsize, req->iv); + skcipher_request_set_tfm(skreq, ctx->enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(skreq, src, dst, + req->cryptlen - authsize, req->iv); - return crypto_ablkcipher_decrypt(abreq); + return crypto_skcipher_decrypt(skreq); } static void authenc_verify_ahash_done(struct crypto_async_request *areq, @@ -313,20 +316,20 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; - struct crypto_ablkcipher *enc; - struct crypto_blkcipher *null; + struct crypto_skcipher *enc; + struct crypto_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); if (IS_ERR(auth)) return PTR_ERR(auth); - enc = crypto_spawn_skcipher(&ictx->enc); + enc = crypto_spawn_skcipher2(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); + null = crypto_get_default_null_skcipher2(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_skcipher; @@ -342,13 +345,13 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) max_t(unsigned int, crypto_ahash_reqsize(auth) + sizeof(struct ahash_request), - sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(enc))); + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(enc))); return 0; err_free_skcipher: - crypto_free_ablkcipher(enc); + crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -359,8 +362,8 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->auth); - crypto_free_ablkcipher(ctx->enc); - crypto_put_default_null_skcipher(); + crypto_free_skcipher(ctx->enc); + crypto_put_default_null_skcipher2(); } static void crypto_authenc_free(struct aead_instance *inst) @@ -379,7 +382,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct aead_instance *inst; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct crypto_alg *enc; + struct skcipher_alg *enc; struct authenc_instance_ctx *ctx; const char *enc_name; int err; @@ -417,38 +420,40 @@ static int crypto_authenc_create(struct crypto_template *tmpl, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_auth; - enc = crypto_skcipher_spawn_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg(&ctx->enc); ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, auth_base->cra_alignmask + 1); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= + "authenc(%s,%s)", auth_base->cra_name, + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", auth_base->cra_driver_name, - enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; - inst->alg.base.cra_flags = (auth_base->cra_flags | enc->cra_flags) & - CRYPTO_ALG_ASYNC; - inst->alg.base.cra_priority = enc->cra_priority * 10 + + inst->alg.base.cra_flags = (auth_base->cra_flags | + enc->base.cra_flags) & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; - inst->alg.base.cra_blocksize = enc->cra_blocksize; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->cra_alignmask; + enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); - inst->alg.ivsize = enc->cra_ablkcipher.ivsize; + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_init_tfm; -- cgit v1.2.3-70-g09d2 From e75445a8445215ab4623f180cbc930ecf271181f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:35 +0800 Subject: crypto: authencesn - Use skcipher This patch converts authencesn to use the new skcipher interface as opposed to ablkcipher. It also fixes a little bug where if a sync version of authencesn is requested we may still end up using an async ahash. This should have no effect as none of the authencesn users can request for a sync authencesn. Signed-off-by: Herbert Xu --- crypto/authencesn.c | 104 +++++++++++++++++++++++++++------------------------- 1 file changed, 54 insertions(+), 50 deletions(-) (limited to 'crypto') diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0662b1848c5d..121010ac9962 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -35,8 +35,8 @@ struct authenc_esn_instance_ctx { struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; - struct crypto_ablkcipher *enc; - struct crypto_blkcipher *null; + struct crypto_skcipher *enc; + struct crypto_skcipher *null; }; struct authenc_esn_request_ctx { @@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * { struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; - struct crypto_ablkcipher *enc = ctx->enc; + struct crypto_skcipher *enc = ctx->enc; struct crypto_authenc_keys keys; int err = -EINVAL; @@ -82,11 +82,11 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * if (err) goto out; - crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); - crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); + crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); out: @@ -182,11 +182,14 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - struct blkcipher_desc desc = { - .tfm = ctx->null, - }; + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len); + skcipher_request_set_tfm(skreq, ctx->null); + skcipher_request_set_callback(skreq, aead_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL); + + return crypto_skcipher_encrypt(skreq); } static int crypto_authenc_esn_encrypt(struct aead_request *req) @@ -194,9 +197,9 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req) struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - struct ablkcipher_request *abreq = (void *)(areq_ctx->tail - + ctx->reqoff); - struct crypto_ablkcipher *enc = ctx->enc; + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ctx->reqoff); + struct crypto_skcipher *enc = ctx->enc; unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; struct scatterlist *src, *dst; @@ -215,12 +218,12 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req) dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); } - ablkcipher_request_set_tfm(abreq, enc); - ablkcipher_request_set_callback(abreq, aead_request_flags(req), - crypto_authenc_esn_encrypt_done, req); - ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); + skcipher_request_set_tfm(skreq, enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + crypto_authenc_esn_encrypt_done, req); + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); - err = crypto_ablkcipher_encrypt(abreq); + err = crypto_skcipher_encrypt(skreq); if (err) return err; @@ -234,8 +237,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, unsigned int authsize = crypto_aead_authsize(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - struct ablkcipher_request *abreq = (void *)(areq_ctx->tail - + ctx->reqoff); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ctx->reqoff); struct crypto_ahash *auth = ctx->auth; u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, crypto_ahash_alignmask(auth) + 1); @@ -256,12 +259,12 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); - ablkcipher_request_set_tfm(abreq, ctx->enc); - ablkcipher_request_set_callback(abreq, flags, - req->base.complete, req->base.data); - ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv); + skcipher_request_set_tfm(skreq, ctx->enc); + skcipher_request_set_callback(skreq, flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv); - return crypto_ablkcipher_decrypt(abreq); + return crypto_skcipher_decrypt(skreq); } static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, @@ -331,20 +334,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; - struct crypto_ablkcipher *enc; - struct crypto_blkcipher *null; + struct crypto_skcipher *enc; + struct crypto_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); if (IS_ERR(auth)) return PTR_ERR(auth); - enc = crypto_spawn_skcipher(&ictx->enc); + enc = crypto_spawn_skcipher2(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); + null = crypto_get_default_null_skcipher2(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_skcipher; @@ -361,15 +364,15 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) sizeof(struct authenc_esn_request_ctx) + ctx->reqoff + max_t(unsigned int, - crypto_ahash_reqsize(auth) + - sizeof(struct ahash_request), - sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(enc))); + crypto_ahash_reqsize(auth) + + sizeof(struct ahash_request), + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(enc))); return 0; err_free_skcipher: - crypto_free_ablkcipher(enc); + crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -380,8 +383,8 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->auth); - crypto_free_ablkcipher(ctx->enc); - crypto_put_default_null_skcipher(); + crypto_free_skcipher(ctx->enc); + crypto_put_default_null_skcipher2(); } static void crypto_authenc_esn_free(struct aead_instance *inst) @@ -400,7 +403,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct aead_instance *inst; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct crypto_alg *enc; + struct skcipher_alg *enc; struct authenc_esn_instance_ctx *ctx; const char *enc_name; int err; @@ -438,35 +441,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_auth; - enc = crypto_skcipher_spawn_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_name, - enc->cra_name) >= CRYPTO_MAX_ALG_NAME) + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "authencesn(%s,%s)", auth_base->cra_driver_name, - enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; - inst->alg.base.cra_flags = (auth_base->cra_flags | enc->cra_flags) & - CRYPTO_ALG_ASYNC; - inst->alg.base.cra_priority = enc->cra_priority * 10 + + inst->alg.base.cra_flags = (auth_base->cra_flags | + enc->base.cra_flags) & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; - inst->alg.base.cra_blocksize = enc->cra_blocksize; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->cra_alignmask; + enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); - inst->alg.ivsize = enc->cra_ablkcipher.ivsize; + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_esn_init_tfm; -- cgit v1.2.3-70-g09d2 From b2b39c2f976bc7b69e26dbe4530593d2867544eb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:37 +0800 Subject: crypto: ctr - Use skcipher in rfc3686 This patch converts rfc3686 to use the new skcipher interface as opposed to ablkcipher. Signed-off-by: Herbert Xu --- crypto/ctr.c | 183 ++++++++++++++++++++++++++++++----------------------------- 1 file changed, 94 insertions(+), 89 deletions(-) (limited to 'crypto') diff --git a/crypto/ctr.c b/crypto/ctr.c index 2386f7313952..ff4d21eddb83 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -26,13 +26,13 @@ struct crypto_ctr_ctx { }; struct crypto_rfc3686_ctx { - struct crypto_ablkcipher *child; + struct crypto_skcipher *child; u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; struct crypto_rfc3686_req_ctx { u8 iv[CTR_RFC3686_BLOCK_SIZE]; - struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; + struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; }; static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, @@ -249,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = { .module = THIS_MODULE, }; -static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, +static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { - struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); - struct crypto_ablkcipher *child = ctx->child; + struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; int err; /* the nonce is stored in bytes at end of key */ @@ -265,173 +265,178 @@ static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, keylen -= CTR_RFC3686_NONCE_SIZE; - crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(child, key, keylen); - crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } -static int crypto_rfc3686_crypt(struct ablkcipher_request *req) +static int crypto_rfc3686_crypt(struct skcipher_request *req) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct crypto_ablkcipher *child = ctx->child; - unsigned long align = crypto_ablkcipher_alignmask(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child = ctx->child; + unsigned long align = crypto_skcipher_alignmask(tfm); struct crypto_rfc3686_req_ctx *rctx = - (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); - struct ablkcipher_request *subreq = &rctx->subreq; + (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1); + struct skcipher_request *subreq = &rctx->subreq; u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); - memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); + memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); - ablkcipher_request_set_tfm(subreq, child); - ablkcipher_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, - iv); + skcipher_request_set_tfm(subreq, child); + skcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, iv); - return crypto_ablkcipher_encrypt(subreq); + return crypto_skcipher_encrypt(subreq); } -static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) +static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_ablkcipher *cipher; + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *cipher; unsigned long align; + unsigned int reqsize; - cipher = crypto_spawn_skcipher(spawn); + cipher = crypto_spawn_skcipher2(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; - align = crypto_tfm_alg_alignmask(tfm); + align = crypto_skcipher_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_ablkcipher.reqsize = align + - sizeof(struct crypto_rfc3686_req_ctx) + - crypto_ablkcipher_reqsize(cipher); + reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) + + crypto_skcipher_reqsize(cipher); + crypto_skcipher_set_reqsize(tfm, reqsize); return 0; } -static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) +static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm) { - struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->child); +} - crypto_free_ablkcipher(ctx->child); +static void crypto_rfc3686_free(struct skcipher_instance *inst) +{ + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(spawn); + kfree(inst); } -static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) +static int crypto_rfc3686_create(struct crypto_template *tmpl, + struct rtattr **tb) { struct crypto_attr_type *algt; - struct crypto_instance *inst; - struct crypto_alg *alg; + struct skcipher_instance *inst; + struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; const char *cipher_name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) - return ERR_CAST(algt); + return PTR_ERR(algt); - if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return -EINVAL; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) - return ERR_CAST(cipher_name); + return PTR_ERR(cipher_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) - return ERR_PTR(-ENOMEM); + return -ENOMEM; - spawn = crypto_instance_ctx(inst); + spawn = skcipher_instance_ctx(inst); - crypto_set_skcipher_spawn(spawn, inst); - err = crypto_grab_skcipher(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); + err = crypto_grab_skcipher2(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(spawn); + alg = crypto_spawn_skcipher_alg(spawn); /* We only support 16-byte blocks. */ err = -EINVAL; - if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) + if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) goto err_drop_spawn; /* Not a stream cipher? */ - if (alg->cra_blocksize != 1) + if (alg->base.cra_blocksize != 1) goto err_drop_spawn; err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", - alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_spawn; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "rfc3686(%s)", alg->cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc3686(%s)", alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) goto err_drop_spawn; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | - (alg->cra_flags & CRYPTO_ALG_ASYNC); - inst->alg.cra_type = &crypto_ablkcipher_type; + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.cra_ablkcipher.min_keysize = - alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_ablkcipher.max_keysize = - alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.ivsize = CTR_RFC3686_IV_SIZE; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + + CTR_RFC3686_NONCE_SIZE; + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_ablkcipher.geniv = "seqiv"; + inst->alg.setkey = crypto_rfc3686_setkey; + inst->alg.encrypt = crypto_rfc3686_crypt; + inst->alg.decrypt = crypto_rfc3686_crypt; - inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; - inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; - inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; + inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); - inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); + inst->alg.init = crypto_rfc3686_init_tfm; + inst->alg.exit = crypto_rfc3686_exit_tfm; - inst->alg.cra_init = crypto_rfc3686_init_tfm; - inst->alg.cra_exit = crypto_rfc3686_exit_tfm; + inst->free = crypto_rfc3686_free; - return inst; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + +out: + return err; err_drop_spawn: crypto_drop_skcipher(spawn); err_free_inst: kfree(inst); - return ERR_PTR(err); -} - -static void crypto_rfc3686_free(struct crypto_instance *inst) -{ - struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); - - crypto_drop_skcipher(spawn); - kfree(inst); + goto out; } static struct crypto_template crypto_rfc3686_tmpl = { .name = "rfc3686", - .alloc = crypto_rfc3686_alloc, - .free = crypto_rfc3686_free, + .create = crypto_rfc3686_create, .module = THIS_MODULE, }; -- cgit v1.2.3-70-g09d2 From 464b93a3c7d16831bca9e02e6c9624e793d7e5cc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:38 +0800 Subject: crypto: ccm - Use skcipher This patch converts ccm to use the new skcipher interface as opposed to ablkcipher. Signed-off-by: Herbert Xu --- crypto/ccm.c | 72 +++++++++++++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 35 deletions(-) (limited to 'crypto') diff --git a/crypto/ccm.c b/crypto/ccm.c index cc31ea4335bf..006d8575ef5c 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -28,7 +28,7 @@ struct ccm_instance_ctx { struct crypto_ccm_ctx { struct crypto_cipher *cipher; - struct crypto_ablkcipher *ctr; + struct crypto_skcipher *ctr; }; struct crypto_rfc4309_ctx { @@ -50,7 +50,7 @@ struct crypto_ccm_req_priv_ctx { u32 flags; struct scatterlist src[3]; struct scatterlist dst[3]; - struct ablkcipher_request abreq; + struct skcipher_request skreq; }; static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( @@ -83,15 +83,15 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); - struct crypto_ablkcipher *ctr = ctx->ctr; + struct crypto_skcipher *ctr = ctx->ctr; struct crypto_cipher *tfm = ctx->cipher; int err = 0; - crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & + crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(ctr, key, keylen); + crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); if (err) goto out; @@ -347,7 +347,7 @@ static int crypto_ccm_encrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->abreq; + struct skcipher_request *skreq = &pctx->skreq; struct scatterlist *dst; unsigned int cryptlen = req->cryptlen; u8 *odata = pctx->odata; @@ -366,11 +366,11 @@ static int crypto_ccm_encrypt(struct aead_request *req) if (req->src != req->dst) dst = pctx->dst; - ablkcipher_request_set_tfm(abreq, ctx->ctr); - ablkcipher_request_set_callback(abreq, pctx->flags, - crypto_ccm_encrypt_done, req); - ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); - err = crypto_ablkcipher_encrypt(abreq); + skcipher_request_set_tfm(skreq, ctx->ctr); + skcipher_request_set_callback(skreq, pctx->flags, + crypto_ccm_encrypt_done, req); + skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_skcipher_encrypt(skreq); if (err) return err; @@ -407,7 +407,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->abreq; + struct skcipher_request *skreq = &pctx->skreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; @@ -429,11 +429,11 @@ static int crypto_ccm_decrypt(struct aead_request *req) if (req->src != req->dst) dst = pctx->dst; - ablkcipher_request_set_tfm(abreq, ctx->ctr); - ablkcipher_request_set_callback(abreq, pctx->flags, - crypto_ccm_decrypt_done, req); - ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); - err = crypto_ablkcipher_decrypt(abreq); + skcipher_request_set_tfm(skreq, ctx->ctr); + skcipher_request_set_callback(skreq, pctx->flags, + crypto_ccm_decrypt_done, req); + skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_skcipher_decrypt(skreq); if (err) return err; @@ -454,7 +454,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_cipher *cipher; - struct crypto_ablkcipher *ctr; + struct crypto_skcipher *ctr; unsigned long align; int err; @@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctr = crypto_spawn_skcipher(&ictx->ctr); + ctr = crypto_spawn_skcipher2(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; @@ -475,7 +475,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) crypto_aead_set_reqsize( tfm, align + sizeof(struct crypto_ccm_req_priv_ctx) + - crypto_ablkcipher_reqsize(ctr)); + crypto_skcipher_reqsize(ctr)); return 0; @@ -489,7 +489,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm) struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_cipher(ctx->cipher); - crypto_free_ablkcipher(ctx->ctr); + crypto_free_skcipher(ctx->ctr); } static void crypto_ccm_free(struct aead_instance *inst) @@ -509,7 +509,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, { struct crypto_attr_type *algt; struct aead_instance *inst; - struct crypto_alg *ctr; + struct skcipher_alg *ctr; struct crypto_alg *cipher; struct ccm_instance_ctx *ictx; int err; @@ -544,39 +544,40 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, goto err_free_inst; crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_cipher; - ctr = crypto_skcipher_spawn_alg(&ictx->ctr); + ctr = crypto_spawn_skcipher_alg(&ictx->ctr); /* Not a stream cipher? */ err = -EINVAL; - if (ctr->cra_blocksize != 1) + if (ctr->base.cra_blocksize != 1) goto err_drop_ctr; /* We want the real thing! */ - if (ctr->cra_ablkcipher.ivsize != 16) + if (crypto_skcipher_alg_ivsize(ctr) != 16) goto err_drop_ctr; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "ccm_base(%s,%s)", ctr->cra_driver_name, + "ccm_base(%s,%s)", ctr->base.cra_driver_name, cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (cipher->cra_priority + - ctr->cra_priority) / 2; + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = cipher->cra_alignmask | - ctr->cra_alignmask | + ctr->base.cra_alignmask | (__alignof__(u32) - 1); inst->alg.ivsize = 16; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); inst->alg.maxauthsize = 16; inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.init = crypto_ccm_init_tfm; @@ -863,6 +864,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = 8; + inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = 16; inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); -- cgit v1.2.3-70-g09d2 From 16f37ecdd068884fc4f4177f83ac77910f4fc113 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:39 +0800 Subject: crypto: gcm - Use skcipher This patch converts gcm to use the new skcipher interface as opposed to ablkcipher. Signed-off-by: Herbert Xu --- crypto/gcm.c | 111 +++++++++++++++++++++++++++++++---------------------------- 1 file changed, 58 insertions(+), 53 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index d9ea5f9c0574..70a892e87ccb 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -29,7 +29,7 @@ struct gcm_instance_ctx { }; struct crypto_gcm_ctx { - struct crypto_ablkcipher *ctr; + struct crypto_skcipher *ctr; struct crypto_ahash *ghash; }; @@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx { struct crypto_rfc4543_ctx { struct crypto_aead *child; - struct crypto_blkcipher *null; + struct crypto_skcipher *null; u8 nonce[4]; }; @@ -74,7 +74,7 @@ struct crypto_gcm_req_priv_ctx { struct crypto_gcm_ghash_ctx ghash_ctx; union { struct ahash_request ahreq; - struct ablkcipher_request abreq; + struct skcipher_request skreq; } u; }; @@ -114,7 +114,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ahash *ghash = ctx->ghash; - struct crypto_ablkcipher *ctr = ctx->ctr; + struct crypto_skcipher *ctr = ctx->ctr; struct { be128 hash; u8 iv[8]; @@ -122,35 +122,35 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, struct crypto_gcm_setkey_result result; struct scatterlist sg[1]; - struct ablkcipher_request req; + struct skcipher_request req; } *data; int err; - crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & + crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(ctr, key, keylen); + crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); if (err) return err; - data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), + data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), GFP_KERNEL); if (!data) return -ENOMEM; init_completion(&data->result.completion); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); - ablkcipher_request_set_tfm(&data->req, ctr); - ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_gcm_setkey_done, - &data->result); - ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, - sizeof(data->hash), data->iv); - - err = crypto_ablkcipher_encrypt(&data->req); + skcipher_request_set_tfm(&data->req, ctr); + skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_gcm_setkey_done, + &data->result); + skcipher_request_set_crypt(&data->req, data->sg, data->sg, + sizeof(data->hash), data->iv); + + err = crypto_skcipher_encrypt(&data->req); if (err == -EINPROGRESS || err == -EBUSY) { err = wait_for_completion_interruptible( &data->result.completion); @@ -223,13 +223,13 @@ static void crypto_gcm_init_crypt(struct aead_request *req, struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct ablkcipher_request *ablk_req = &pctx->u.abreq; + struct skcipher_request *skreq = &pctx->u.skreq; struct scatterlist *dst; dst = req->src == req->dst ? pctx->src : pctx->dst; - ablkcipher_request_set_tfm(ablk_req, ctx->ctr); - ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, + skcipher_request_set_tfm(skreq, ctx->ctr); + skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), pctx->iv); } @@ -494,14 +494,14 @@ out: static int crypto_gcm_encrypt(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->u.abreq; + struct skcipher_request *skreq = &pctx->u.skreq; u32 flags = aead_request_flags(req); crypto_gcm_init_common(req); crypto_gcm_init_crypt(req, req->cryptlen); - ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req); + skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req); - return crypto_ablkcipher_encrypt(abreq) ?: + return crypto_skcipher_encrypt(skreq) ?: gcm_encrypt_continue(req, flags); } @@ -533,12 +533,12 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err) static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->u.abreq; + struct skcipher_request *skreq = &pctx->u.skreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; crypto_gcm_init_crypt(req, gctx->cryptlen); - ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req); - return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req); + skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req); + return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req); } static int crypto_gcm_decrypt(struct aead_request *req) @@ -566,7 +566,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) struct aead_instance *inst = aead_alg_instance(tfm); struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); - struct crypto_ablkcipher *ctr; + struct crypto_skcipher *ctr; struct crypto_ahash *ghash; unsigned long align; int err; @@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) if (IS_ERR(ghash)) return PTR_ERR(ghash); - ctr = crypto_spawn_skcipher(&ictx->ctr); + ctr = crypto_spawn_skcipher2(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_hash; @@ -587,8 +587,8 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize(tfm, align + offsetof(struct crypto_gcm_req_priv_ctx, u) + - max(sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(ctr), + max(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(ctr), sizeof(struct ahash_request) + crypto_ahash_reqsize(ghash))); @@ -604,7 +604,7 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm) struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->ghash); - crypto_free_ablkcipher(ctx->ctr); + crypto_free_skcipher(ctx->ctr); } static void crypto_gcm_free(struct aead_instance *inst) @@ -624,7 +624,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, { struct crypto_attr_type *algt; struct aead_instance *inst; - struct crypto_alg *ctr; + struct skcipher_alg *ctr; struct crypto_alg *ghash_alg; struct hash_alg_common *ghash; struct gcm_instance_ctx *ctx; @@ -663,41 +663,42 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, goto err_drop_ghash; crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_ghash; - ctr = crypto_skcipher_spawn_alg(&ctx->ctr); + ctr = crypto_spawn_skcipher_alg(&ctx->ctr); /* We only support 16-byte blocks. */ - if (ctr->cra_ablkcipher.ivsize != 16) + if (crypto_skcipher_alg_ivsize(ctr) != 16) goto out_put_ctr; /* Not a stream cipher? */ err = -EINVAL; - if (ctr->cra_blocksize != 1) + if (ctr->base.cra_blocksize != 1) goto out_put_ctr; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "gcm_base(%s,%s)", ctr->cra_driver_name, + "gcm_base(%s,%s)", ctr->base.cra_driver_name, ghash_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) & - CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags = (ghash->base.cra_flags | + ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + - ctr->cra_priority) / 2; + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | - ctr->cra_alignmask; + ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = 12; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; @@ -982,6 +983,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); inst->alg.ivsize = 8; + inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4106_init_tfm; @@ -1086,11 +1088,13 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) unsigned int authsize = crypto_aead_authsize(aead); unsigned int nbytes = req->assoclen + req->cryptlen - (enc ? 0 : authsize); - struct blkcipher_desc desc = { - .tfm = ctx->null, - }; + SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); + + skcipher_request_set_tfm(nreq, ctx->null); + skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); + skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); - return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); + return crypto_skcipher_encrypt(nreq); } static int crypto_rfc4543_encrypt(struct aead_request *req) @@ -1110,7 +1114,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; - struct crypto_blkcipher *null; + struct crypto_skcipher *null; unsigned long align; int err = 0; @@ -1118,7 +1122,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_get_default_null_skcipher(); + null = crypto_get_default_null_skcipher2(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; @@ -1146,7 +1150,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); + crypto_put_default_null_skcipher2(); } static void crypto_rfc4543_free(struct aead_instance *inst) @@ -1221,6 +1225,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); inst->alg.ivsize = 8; + inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4543_init_tfm; -- cgit v1.2.3-70-g09d2 From 1e1f006112b50640d7fc11851346f906d223df09 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:40 +0800 Subject: crypto: chacha20poly1305 - Use skcipher This patch converts chacha20poly1305 to use the new skcipher interface as opposed to ablkcipher. It also fixes a buglet where we may end up with an async poly1305 when the user asks for a async algorithm. This shouldn't be a problem yet as there aren't any async implementations of poly1305 out there. Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 89 ++++++++++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 43 deletions(-) (limited to 'crypto') diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 7b6b935cef23..e899ef51dc8e 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -31,7 +31,7 @@ struct chachapoly_instance_ctx { }; struct chachapoly_ctx { - struct crypto_ablkcipher *chacha; + struct crypto_skcipher *chacha; struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; @@ -53,7 +53,7 @@ struct poly_req { struct chacha_req { u8 iv[CHACHA20_IV_SIZE]; struct scatterlist src[1]; - struct ablkcipher_request req; /* must be last member */ + struct skcipher_request req; /* must be last member */ }; struct chachapoly_req_ctx { @@ -144,12 +144,12 @@ static int chacha_decrypt(struct aead_request *req) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); } - ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), - chacha_decrypt_done, req); - ablkcipher_request_set_tfm(&creq->req, ctx->chacha); - ablkcipher_request_set_crypt(&creq->req, src, dst, - rctx->cryptlen, creq->iv); - err = crypto_ablkcipher_decrypt(&creq->req); + skcipher_request_set_callback(&creq->req, aead_request_flags(req), + chacha_decrypt_done, req); + skcipher_request_set_tfm(&creq->req, ctx->chacha); + skcipher_request_set_crypt(&creq->req, src, dst, + rctx->cryptlen, creq->iv); + err = crypto_skcipher_decrypt(&creq->req); if (err) return err; @@ -393,13 +393,13 @@ static int poly_genkey(struct aead_request *req) chacha_iv(creq->iv, req, 0); - ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), - poly_genkey_done, req); - ablkcipher_request_set_tfm(&creq->req, ctx->chacha); - ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src, - POLY1305_KEY_SIZE, creq->iv); + skcipher_request_set_callback(&creq->req, aead_request_flags(req), + poly_genkey_done, req); + skcipher_request_set_tfm(&creq->req, ctx->chacha); + skcipher_request_set_crypt(&creq->req, creq->src, creq->src, + POLY1305_KEY_SIZE, creq->iv); - err = crypto_ablkcipher_decrypt(&creq->req); + err = crypto_skcipher_decrypt(&creq->req); if (err) return err; @@ -433,12 +433,12 @@ static int chacha_encrypt(struct aead_request *req) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); } - ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), - chacha_encrypt_done, req); - ablkcipher_request_set_tfm(&creq->req, ctx->chacha); - ablkcipher_request_set_crypt(&creq->req, src, dst, - req->cryptlen, creq->iv); - err = crypto_ablkcipher_encrypt(&creq->req); + skcipher_request_set_callback(&creq->req, aead_request_flags(req), + chacha_encrypt_done, req); + skcipher_request_set_tfm(&creq->req, ctx->chacha); + skcipher_request_set_crypt(&creq->req, src, dst, + req->cryptlen, creq->iv); + err = crypto_skcipher_encrypt(&creq->req); if (err) return err; @@ -500,13 +500,13 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, keylen -= ctx->saltlen; memcpy(ctx->salt, key + keylen, ctx->saltlen); - crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); - crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & - CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen); - crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) & - CRYPTO_TFM_RES_MASK); + err = crypto_skcipher_setkey(ctx->chacha, key, keylen); + crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) & + CRYPTO_TFM_RES_MASK); return err; } @@ -524,7 +524,7 @@ static int chachapoly_init(struct crypto_aead *tfm) struct aead_instance *inst = aead_alg_instance(tfm); struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - struct crypto_ablkcipher *chacha; + struct crypto_skcipher *chacha; struct crypto_ahash *poly; unsigned long align; @@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm) if (IS_ERR(poly)) return PTR_ERR(poly); - chacha = crypto_spawn_skcipher(&ictx->chacha); + chacha = crypto_spawn_skcipher2(&ictx->chacha); if (IS_ERR(chacha)) { crypto_free_ahash(poly); return PTR_ERR(chacha); @@ -548,8 +548,8 @@ static int chachapoly_init(struct crypto_aead *tfm) tfm, align + offsetof(struct chachapoly_req_ctx, u) + max(offsetof(struct chacha_req, req) + - sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(chacha), + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(chacha), offsetof(struct poly_req, req) + sizeof(struct ahash_request) + crypto_ahash_reqsize(poly))); @@ -562,7 +562,7 @@ static void chachapoly_exit(struct crypto_aead *tfm) struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->poly); - crypto_free_ablkcipher(ctx->chacha); + crypto_free_skcipher(ctx->chacha); } static void chachapoly_free(struct aead_instance *inst) @@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, { struct crypto_attr_type *algt; struct aead_instance *inst; - struct crypto_alg *chacha; + struct skcipher_alg *chacha; struct crypto_alg *poly; struct hash_alg_common *poly_hash; struct chachapoly_instance_ctx *ctx; @@ -605,7 +605,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly = crypto_find_alg(poly_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, + algt->mask)); if (IS_ERR(poly)) return PTR_ERR(poly); @@ -623,20 +625,20 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); - err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); + err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_poly; - chacha = crypto_skcipher_spawn_alg(&ctx->chacha); + chacha = crypto_spawn_skcipher_alg(&ctx->chacha); err = -EINVAL; /* Need 16-byte IV size, including Initial Block Counter value */ - if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE) + if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE) goto out_drop_chacha; /* Not a stream cipher? */ - if (chacha->cra_blocksize != 1) + if (chacha->base.cra_blocksize != 1) goto out_drop_chacha; err = -ENAMETOOLONG; @@ -645,20 +647,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_chacha; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->cra_driver_name, + "%s(%s,%s)", name, chacha->base.cra_driver_name, poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_chacha; - inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) & + inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) & CRYPTO_ALG_ASYNC; - inst->alg.base.cra_priority = (chacha->cra_priority + + inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = chacha->cra_alignmask | + inst->alg.base.cra_alignmask = chacha->base.cra_alignmask | poly->cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha); inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; inst->alg.init = chachapoly_init; inst->alg.exit = chachapoly_exit; -- cgit v1.2.3-70-g09d2 From ca0494c093371b1ca3cd4c8e14e1e7a19e6e21b6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:42 +0800 Subject: crypto: aead - Add skcipher null for IV generators This patch adds an skcipher null object alongside the existing null blkcipher so that IV generators using it can switch over to skcipher. Signed-off-by: Herbert Xu --- crypto/aead.c | 10 +++++++++- include/crypto/internal/geniv.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index b155cbc3a0dd..a5d9a83f90e4 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -294,10 +294,15 @@ int aead_init_geniv(struct crypto_aead *aead) if (err) goto out; + ctx->sknull = crypto_get_default_null_skcipher2(); + err = PTR_ERR(ctx->sknull); + if (IS_ERR(ctx->sknull)) + goto out; + ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) - goto out; + goto drop_sknull; child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); @@ -315,6 +320,8 @@ out: drop_null: crypto_put_default_null_skcipher(); +drop_sknull: + crypto_put_default_null_skcipher2(); goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); @@ -325,6 +332,7 @@ void aead_exit_geniv(struct crypto_aead *tfm) crypto_free_aead(ctx->child); crypto_put_default_null_skcipher(); + crypto_put_default_null_skcipher2(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 59333635e712..e8447c9c14e6 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -21,6 +21,7 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; struct crypto_blkcipher *null; + struct crypto_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; -- cgit v1.2.3-70-g09d2 From 0e8bff47f6d3e863bf1829e020000c249c59ecd2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:43 +0800 Subject: crypto: echainiv - Use skcipher This patch replaces use of the obsolete blkcipher with skcipher. Signed-off-by: Herbert Xu --- crypto/echainiv.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/echainiv.c b/crypto/echainiv.c index b96a84560b67..1b01fe98e91f 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -112,13 +113,16 @@ static int echainiv_encrypt(struct aead_request *req) info = req->iv; if (req->src != req->dst) { - struct blkcipher_desc desc = { - .tfm = ctx->null, - }; + SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - err = crypto_blkcipher_encrypt( - &desc, req->dst, req->src, - req->assoclen + req->cryptlen); + skcipher_request_set_tfm(nreq, ctx->sknull); + skcipher_request_set_callback(nreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(nreq, req->src, req->dst, + req->assoclen + req->cryptlen, + NULL); + + err = crypto_skcipher_encrypt(nreq); if (err) return err; } -- cgit v1.2.3-70-g09d2 From ef22871f20e6da987e32719a4ba5e3a9b7d81ae8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:45 +0800 Subject: crypto: seqiv - Use skcipher This patch replaces use of the obsolete blkcipher with skcipher. Signed-off-by: Herbert Xu --- crypto/seqiv.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 15a749a5cab7..a859b3ae239d 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -165,12 +165,16 @@ static int seqiv_aead_encrypt(struct aead_request *req) info = req->iv; if (req->src != req->dst) { - struct blkcipher_desc desc = { - .tfm = ctx->null, - }; + SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, - req->assoclen + req->cryptlen); + skcipher_request_set_tfm(nreq, ctx->sknull); + skcipher_request_set_callback(nreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(nreq, req->src, req->dst, + req->assoclen + req->cryptlen, + NULL); + + err = crypto_skcipher_encrypt(nreq); if (err) return err; } -- cgit v1.2.3-70-g09d2 From da721302a78807e5da2902677fbe116fe052068f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:46 +0800 Subject: crypto: aead - Remove blkcipher null for IV generators The blkcipher null object is no longer used and can now be removed. Signed-off-by: Herbert Xu --- crypto/aead.c | 8 -------- include/crypto/internal/geniv.h | 1 - 2 files changed, 9 deletions(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index a5d9a83f90e4..3f5c5ff004ab 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -299,11 +299,6 @@ int aead_init_geniv(struct crypto_aead *aead) if (IS_ERR(ctx->sknull)) goto out; - ctx->null = crypto_get_default_null_skcipher(); - err = PTR_ERR(ctx->null); - if (IS_ERR(ctx->null)) - goto drop_sknull; - child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); if (IS_ERR(child)) @@ -319,8 +314,6 @@ out: return err; drop_null: - crypto_put_default_null_skcipher(); -drop_sknull: crypto_put_default_null_skcipher2(); goto out; } @@ -331,7 +324,6 @@ void aead_exit_geniv(struct crypto_aead *tfm) struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); crypto_put_default_null_skcipher2(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index e8447c9c14e6..2bcfb931bc5b 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,6 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_blkcipher *null; struct crypto_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; -- cgit v1.2.3-70-g09d2 From 499a66e6b689b13d1a4108bec5d7dcdc829a27a8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:47 +0800 Subject: crypto: null - Remove default null blkcipher The default null blkcipher is no longer used and can now be removed. Signed-off-by: Herbert Xu --- crypto/crypto_null.c | 49 ++++++------------------------------------------- include/crypto/null.h | 14 +++++++++++--- 2 files changed, 17 insertions(+), 46 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index c3f683910d07..20ff2c746e0b 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -26,10 +26,8 @@ #include static DEFINE_MUTEX(crypto_default_null_skcipher_lock); -static struct crypto_blkcipher *crypto_default_null_skcipher; +static struct crypto_skcipher *crypto_default_null_skcipher; static int crypto_default_null_skcipher_refcnt; -static struct crypto_skcipher *crypto_default_null_skcipher2; -static int crypto_default_null_skcipher2_refcnt; static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) @@ -155,15 +153,16 @@ MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); -struct crypto_blkcipher *crypto_get_default_null_skcipher(void) +struct crypto_skcipher *crypto_get_default_null_skcipher(void) { - struct crypto_blkcipher *tfm; + struct crypto_skcipher *tfm; mutex_lock(&crypto_default_null_skcipher_lock); tfm = crypto_default_null_skcipher; if (!tfm) { - tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0); + tfm = crypto_alloc_skcipher("ecb(cipher_null)", + 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto unlock; @@ -183,49 +182,13 @@ void crypto_put_default_null_skcipher(void) { mutex_lock(&crypto_default_null_skcipher_lock); if (!--crypto_default_null_skcipher_refcnt) { - crypto_free_blkcipher(crypto_default_null_skcipher); + crypto_free_skcipher(crypto_default_null_skcipher); crypto_default_null_skcipher = NULL; } mutex_unlock(&crypto_default_null_skcipher_lock); } EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); -struct crypto_skcipher *crypto_get_default_null_skcipher2(void) -{ - struct crypto_skcipher *tfm; - - mutex_lock(&crypto_default_null_skcipher_lock); - tfm = crypto_default_null_skcipher2; - - if (!tfm) { - tfm = crypto_alloc_skcipher("ecb(cipher_null)", - 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - goto unlock; - - crypto_default_null_skcipher2 = tfm; - } - - crypto_default_null_skcipher2_refcnt++; - -unlock: - mutex_unlock(&crypto_default_null_skcipher_lock); - - return tfm; -} -EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher2); - -void crypto_put_default_null_skcipher2(void) -{ - mutex_lock(&crypto_default_null_skcipher_lock); - if (!--crypto_default_null_skcipher2_refcnt) { - crypto_free_skcipher(crypto_default_null_skcipher2); - crypto_default_null_skcipher2 = NULL; - } - mutex_unlock(&crypto_default_null_skcipher_lock); -} -EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher2); - static int __init crypto_null_mod_init(void) { int ret = 0; diff --git a/include/crypto/null.h b/include/crypto/null.h index dda87cbb62b2..3f0c59fb0a61 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -8,9 +8,17 @@ #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 -struct crypto_blkcipher *crypto_get_default_null_skcipher(void); +struct crypto_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); -struct crypto_skcipher *crypto_get_default_null_skcipher2(void); -void crypto_put_default_null_skcipher2(void); + +static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) +{ + return crypto_get_default_null_skcipher(); +} + +static inline void crypto_put_default_null_skcipher2(void) +{ + crypto_put_default_null_skcipher(); +} #endif -- cgit v1.2.3-70-g09d2 From 0605c41cc53ca13775d202de0de33864a46162ba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:48 +0800 Subject: crypto: cts - Convert to skcipher This patch converts cts over to the skcipher interface. It also optimises the implementation to use one CBC operation for all but the last block, which is then processed separately. Signed-off-by: Herbert Xu --- crypto/cts.c | 495 ++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 285 insertions(+), 210 deletions(-) (limited to 'crypto') diff --git a/crypto/cts.c b/crypto/cts.c index e467ec0acf9f..51976187b2bf 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -40,7 +40,7 @@ * rfc3962 includes errata information in its Appendix A. */ -#include +#include #include #include #include @@ -51,289 +51,364 @@ #include struct crypto_cts_ctx { - struct crypto_blkcipher *child; + struct crypto_skcipher *child; }; -static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +struct crypto_cts_reqctx { + struct scatterlist sg[2]; + unsigned offset; + struct skcipher_request subreq; +}; + +static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req) { - struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_blkcipher *child = ctx->child; - int err; + struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *child = ctx->child; - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_blkcipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; + return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child), + crypto_skcipher_alignmask(tfm) + 1); } -static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, - struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, - unsigned int offset, - unsigned int nbytes) +static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - int bsize = crypto_blkcipher_blocksize(desc->tfm); - u8 tmp[bsize], tmp2[bsize]; - struct blkcipher_desc lcldesc; - struct scatterlist sgsrc[1], sgdst[1]; - int lastn = nbytes - bsize; - u8 iv[bsize]; - u8 s[bsize * 2], d[bsize * 2]; + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; int err; - if (lastn < 0) - return -EINVAL; + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} - sg_init_table(sgsrc, 1); - sg_init_table(sgdst, 1); +static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; - memset(s, 0, sizeof(s)); - scatterwalk_map_and_copy(s, src, offset, nbytes, 0); + if (err == -EINPROGRESS) + return; - memcpy(iv, desc->info, bsize); + skcipher_request_complete(req, err); +} - lcldesc.tfm = ctx->child; - lcldesc.info = iv; - lcldesc.flags = desc->flags; +static int cts_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *subreq = &rctx->subreq; + int bsize = crypto_skcipher_blocksize(tfm); + u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32)))); + struct scatterlist *sg; + unsigned int offset; + int lastn; + + offset = rctx->offset; + lastn = req->cryptlen - offset; + + sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize); + scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0); + + memset(d, 0, bsize); + scatterwalk_map_and_copy(d, req->src, offset, lastn, 0); + + scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1); + memzero_explicit(d, sizeof(d)); + + skcipher_request_set_callback(subreq, req->base.flags & + CRYPTO_TFM_REQ_MAY_BACKLOG, + cts_cbc_crypt_done, req); + skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv); + return crypto_skcipher_encrypt(subreq); +} - sg_set_buf(&sgsrc[0], s, bsize); - sg_set_buf(&sgdst[0], tmp, bsize); - err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); +static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; - memcpy(d + bsize, tmp, lastn); + if (err) + goto out; - lcldesc.info = tmp; + err = cts_cbc_encrypt(req); + if (err == -EINPROGRESS || + (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return; - sg_set_buf(&sgsrc[0], s + bsize, bsize); - sg_set_buf(&sgdst[0], tmp2, bsize); - err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); +out: + skcipher_request_complete(req, err); +} - memcpy(d, tmp2, bsize); +static int crypto_cts_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq = &rctx->subreq; + int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = req->cryptlen; + int cbc_blocks = (nbytes + bsize - 1) / bsize - 1; + unsigned int offset; + + skcipher_request_set_tfm(subreq, ctx->child); + + if (cbc_blocks <= 0) { + skcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes, + req->iv); + return crypto_skcipher_encrypt(subreq); + } - scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); + offset = cbc_blocks * bsize; + rctx->offset = offset; - memcpy(desc->info, tmp2, bsize); + skcipher_request_set_callback(subreq, req->base.flags, + crypto_cts_encrypt_done, req); + skcipher_request_set_crypt(subreq, req->src, req->dst, + offset, req->iv); - return err; + return crypto_skcipher_encrypt(subreq) ?: + cts_cbc_encrypt(req); } -static int crypto_cts_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cts_cbc_decrypt(struct skcipher_request *req) { - struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - int bsize = crypto_blkcipher_blocksize(desc->tfm); - int tot_blocks = (nbytes + bsize - 1) / bsize; - int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; - struct blkcipher_desc lcldesc; - int err; + struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *subreq = &rctx->subreq; + int bsize = crypto_skcipher_blocksize(tfm); + u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32)))); + struct scatterlist *sg; + unsigned int offset; + u8 *space; + int lastn; + + offset = rctx->offset; + lastn = req->cryptlen - offset; + + sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize); + + /* 1. Decrypt Cn-1 (s) to create Dn */ + scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0); + space = crypto_cts_reqctx_space(req); + crypto_xor(d + bsize, space, bsize); + /* 2. Pad Cn with zeros at the end to create C of length BB */ + memset(d, 0, bsize); + scatterwalk_map_and_copy(d, req->src, offset, lastn, 0); + /* 3. Exclusive-or Dn with C to create Xn */ + /* 4. Select the first Ln bytes of Xn to create Pn */ + crypto_xor(d + bsize, d, lastn); + + /* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */ + memcpy(d + lastn, d + bsize + lastn, bsize - lastn); + /* 6. Decrypt En to create Pn-1 */ - lcldesc.tfm = ctx->child; - lcldesc.info = desc->info; - lcldesc.flags = desc->flags; - - if (tot_blocks == 1) { - err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize); - } else if (nbytes <= bsize * 2) { - err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); - } else { - /* do normal function for tot_blocks - 2 */ - err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, - cbc_blocks * bsize); - if (err == 0) { - /* do cts for final two blocks */ - err = cts_cbc_encrypt(ctx, desc, dst, src, - cbc_blocks * bsize, - nbytes - (cbc_blocks * bsize)); - } - } + scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1); + memzero_explicit(d, sizeof(d)); - return err; + skcipher_request_set_callback(subreq, req->base.flags & + CRYPTO_TFM_REQ_MAY_BACKLOG, + cts_cbc_crypt_done, req); + + skcipher_request_set_crypt(subreq, sg, sg, bsize, space); + return crypto_skcipher_decrypt(subreq); } -static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, - struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, - unsigned int offset, - unsigned int nbytes) +static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) { - int bsize = crypto_blkcipher_blocksize(desc->tfm); - u8 tmp[bsize]; - struct blkcipher_desc lcldesc; - struct scatterlist sgsrc[1], sgdst[1]; - int lastn = nbytes - bsize; - u8 iv[bsize]; - u8 s[bsize * 2], d[bsize * 2]; - int err; - - if (lastn < 0) - return -EINVAL; + struct skcipher_request *req = areq->data; - sg_init_table(sgsrc, 1); - sg_init_table(sgdst, 1); + if (err) + goto out; - scatterwalk_map_and_copy(s, src, offset, nbytes, 0); + err = cts_cbc_decrypt(req); + if (err == -EINPROGRESS || + (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return; - lcldesc.tfm = ctx->child; - lcldesc.info = iv; - lcldesc.flags = desc->flags; +out: + skcipher_request_complete(req, err); +} - /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ - memset(iv, 0, sizeof(iv)); - sg_set_buf(&sgsrc[0], s, bsize); - sg_set_buf(&sgdst[0], tmp, bsize); - err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); - if (err) - return err; - /* 2. Pad Cn with zeros at the end to create C of length BB */ - memset(iv, 0, sizeof(iv)); - memcpy(iv, s + bsize, lastn); - /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ - crypto_xor(tmp, iv, bsize); - /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ - memcpy(d + bsize, tmp, lastn); - - /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ - memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); - /* 6. Decrypt En to create Pn-1 */ - memzero_explicit(iv, sizeof(iv)); +static int crypto_cts_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_request *subreq = &rctx->subreq; + int bsize = crypto_skcipher_blocksize(tfm); + unsigned int nbytes = req->cryptlen; + int cbc_blocks = (nbytes + bsize - 1) / bsize - 1; + unsigned int offset; + u8 *space; + + skcipher_request_set_tfm(subreq, ctx->child); + + if (cbc_blocks <= 0) { + skcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes, + req->iv); + return crypto_skcipher_decrypt(subreq); + } - sg_set_buf(&sgsrc[0], s + bsize, bsize); - sg_set_buf(&sgdst[0], d, bsize); - err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); + skcipher_request_set_callback(subreq, req->base.flags, + crypto_cts_decrypt_done, req); - /* XOR with previous block */ - crypto_xor(d, desc->info, bsize); + space = crypto_cts_reqctx_space(req); - scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); + offset = cbc_blocks * bsize; + rctx->offset = offset; - memcpy(desc->info, s, bsize); - return err; -} + if (cbc_blocks <= 1) + memcpy(space, req->iv, bsize); + else + scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize, + bsize, 0); -static int crypto_cts_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - int bsize = crypto_blkcipher_blocksize(desc->tfm); - int tot_blocks = (nbytes + bsize - 1) / bsize; - int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; - struct blkcipher_desc lcldesc; - int err; + skcipher_request_set_crypt(subreq, req->src, req->dst, + offset, req->iv); - lcldesc.tfm = ctx->child; - lcldesc.info = desc->info; - lcldesc.flags = desc->flags; - - if (tot_blocks == 1) { - err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize); - } else if (nbytes <= bsize * 2) { - err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); - } else { - /* do normal function for tot_blocks - 2 */ - err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, - cbc_blocks * bsize); - if (err == 0) { - /* do cts for final two blocks */ - err = cts_cbc_decrypt(ctx, desc, dst, src, - cbc_blocks * bsize, - nbytes - (cbc_blocks * bsize)); - } - } - return err; + return crypto_skcipher_decrypt(subreq) ?: + cts_cbc_decrypt(req); } -static int crypto_cts_init_tfm(struct crypto_tfm *tfm) +static int crypto_cts_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_blkcipher *cipher; - - cipher = crypto_spawn_blkcipher(spawn); + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *cipher; + unsigned reqsize; + unsigned bsize; + unsigned align; + + cipher = crypto_spawn_skcipher2(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; + + align = crypto_skcipher_alignmask(tfm); + bsize = crypto_skcipher_blocksize(cipher); + reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) + + crypto_skcipher_reqsize(cipher), + crypto_tfm_ctx_alignment()) + + (align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize; + + crypto_skcipher_set_reqsize(tfm, reqsize); + return 0; } -static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) +static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm) { - struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_blkcipher(ctx->child); + struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_skcipher(ctx->child); } -static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) +static void crypto_cts_free(struct skcipher_instance *inst) { - struct crypto_instance *inst; - struct crypto_alg *alg; + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +} + +static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_skcipher_spawn *spawn; + struct skcipher_instance *inst; + struct crypto_attr_type *algt; + struct skcipher_alg *alg; + const char *cipher_name; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return -EINVAL; + + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return PTR_ERR(cipher_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = skcipher_instance_ctx(inst); + + crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); + err = crypto_grab_skcipher2(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) - return ERR_PTR(err); + goto err_free_inst; - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + alg = crypto_spawn_skcipher_alg(spawn); - inst = ERR_PTR(-EINVAL); - if (!is_power_of_2(alg->cra_blocksize)) - goto out_put_alg; + err = -EINVAL; + if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize) + goto err_drop_spawn; - if (strncmp(alg->cra_name, "cbc(", 4)) - goto out_put_alg; + if (strncmp(alg->base.cra_name, "cbc(", 4)) + goto err_drop_spawn; - inst = crypto_alloc_instance("cts", alg); - if (IS_ERR(inst)) - goto out_put_alg; + err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts", + &alg->base); + if (err) + goto err_drop_spawn; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = alg->base.cra_blocksize; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; /* We access the data as u32s when xoring. */ - inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; + inst->alg.ivsize = alg->base.cra_blocksize; + inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); - inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); + inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx); - inst->alg.cra_init = crypto_cts_init_tfm; - inst->alg.cra_exit = crypto_cts_exit_tfm; + inst->alg.init = crypto_cts_init_tfm; + inst->alg.exit = crypto_cts_exit_tfm; - inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; + inst->alg.setkey = crypto_cts_setkey; + inst->alg.encrypt = crypto_cts_encrypt; + inst->alg.decrypt = crypto_cts_decrypt; -out_put_alg: - crypto_mod_put(alg); - return inst; -} + inst->free = crypto_cts_free; -static void crypto_cts_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + +out: + return err; + +err_drop_spawn: + crypto_drop_skcipher(spawn); +err_free_inst: kfree(inst); + goto out; } static struct crypto_template crypto_cts_tmpl = { .name = "cts", - .alloc = crypto_cts_alloc, - .free = crypto_cts_free, + .create = crypto_cts_create, .module = THIS_MODULE, }; -- cgit v1.2.3-70-g09d2 From 6cf80a296575723aed6ce6c695581540202bfc6b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:49 +0800 Subject: crypto: user - Remove crypto_lookup_skcipher call As there are no more kernel users of built-in IV generators we can remove the special lookup for skciphers. Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 37 +------------------------------------ 1 file changed, 1 insertion(+), 36 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index d28513fb5a90..c24a40c8a4bf 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -378,32 +378,6 @@ drop_alg: return err; } -static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, - u32 mask) -{ - int err; - struct crypto_alg *alg; - - type = crypto_skcipher_type(type); - mask = crypto_skcipher_mask(mask); - - for (;;) { - alg = crypto_lookup_skcipher(name, type, mask); - if (!IS_ERR(alg)) - return alg; - - err = PTR_ERR(alg); - if (err != -EAGAIN) - break; - if (fatal_signal_pending(current)) { - err = -EINTR; - break; - } - } - - return ERR_PTR(err); -} - static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { @@ -436,16 +410,7 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, else name = p->cru_name; - switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_GIVCIPHER: - case CRYPTO_ALG_TYPE_BLKCIPHER: - case CRYPTO_ALG_TYPE_ABLKCIPHER: - alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask); - break; - default: - alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); - } - + alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask); if (IS_ERR(alg)) return PTR_ERR(alg); -- cgit v1.2.3-70-g09d2 From 3a01d0ee2b991c8c267620e63a4ab47cd8c30cc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:50 +0800 Subject: crypto: skcipher - Remove top-level givcipher interface This patch removes the old crypto_grab_skcipher helper and replaces it with crypto_grab_skcipher2. As this is the final entry point into givcipher this patch also removes all traces of the top-level givcipher interface, including all implicit IV generators such as chainiv. The bottom-level givcipher interface remains until the drivers using it are converted. Signed-off-by: Herbert Xu --- crypto/Makefile | 2 - crypto/ablkcipher.c | 222 -------------------------- crypto/blkcipher.c | 185 ---------------------- crypto/chainiv.c | 317 ------------------------------------- crypto/eseqiv.c | 242 ---------------------------- crypto/seqiv.c | 162 +------------------ crypto/skcipher.c | 4 +- include/crypto/internal/skcipher.h | 63 ++------ include/crypto/skcipher.h | 76 --------- include/linux/crypto.h | 19 --- 10 files changed, 18 insertions(+), 1274 deletions(-) delete mode 100644 crypto/chainiv.c delete mode 100644 crypto/eseqiv.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index df1bcfb090d2..99cc64ac70ef 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o crypto_blkcipher-y += blkcipher.o crypto_blkcipher-y += skcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o -obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o -obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 6b80516778c6..d676fc59521a 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -16,8 +16,6 @@ #include #include #include -#include -#include #include #include #include @@ -348,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, return alg->cra_ctxsize; } -int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req) -{ - return crypto_ablkcipher_encrypt(&req->creq); -} - -int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req) -{ - return crypto_ablkcipher_decrypt(&req->creq); -} - static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { @@ -370,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; - if (!alg->ivsize) { - crt->givencrypt = skcipher_null_givencrypt; - crt->givdecrypt = skcipher_null_givdecrypt; - } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; @@ -435,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); -static int no_givdecrypt(struct skcipher_givcrypt_request *req) -{ - return -ENOSYS; -} - static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { @@ -453,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, alg->setkey : setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; - crt->givencrypt = alg->givencrypt ?: no_givdecrypt; - crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; @@ -515,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = { .report = crypto_givcipher_report, }; EXPORT_SYMBOL_GPL(crypto_givcipher_type); - -const char *crypto_default_geniv(const struct crypto_alg *alg) -{ - if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : - alg->cra_ablkcipher.ivsize) != - alg->cra_blocksize) - return "chainiv"; - - return "eseqiv"; -} - -static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) -{ - struct rtattr *tb[3]; - struct { - struct rtattr attr; - struct crypto_attr_type data; - } ptype; - struct { - struct rtattr attr; - struct crypto_attr_alg data; - } palg; - struct crypto_template *tmpl; - struct crypto_instance *inst; - struct crypto_alg *larval; - const char *geniv; - int err; - - larval = crypto_larval_lookup(alg->cra_driver_name, - (type & ~CRYPTO_ALG_TYPE_MASK) | - CRYPTO_ALG_TYPE_GIVCIPHER, - mask | CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(larval); - if (IS_ERR(larval)) - goto out; - - err = -EAGAIN; - if (!crypto_is_larval(larval)) - goto drop_larval; - - ptype.attr.rta_len = sizeof(ptype); - ptype.attr.rta_type = CRYPTOA_TYPE; - ptype.data.type = type | CRYPTO_ALG_GENIV; - /* GENIV tells the template that we're making a default geniv. */ - ptype.data.mask = mask | CRYPTO_ALG_GENIV; - tb[0] = &ptype.attr; - - palg.attr.rta_len = sizeof(palg); - palg.attr.rta_type = CRYPTOA_ALG; - /* Must use the exact name to locate ourselves. */ - memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); - tb[1] = &palg.attr; - - tb[2] = NULL; - - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - geniv = alg->cra_blkcipher.geniv; - else - geniv = alg->cra_ablkcipher.geniv; - - if (!geniv) - geniv = crypto_default_geniv(alg); - - tmpl = crypto_lookup_template(geniv); - err = -ENOENT; - if (!tmpl) - goto kill_larval; - - if (tmpl->create) { - err = tmpl->create(tmpl, tb); - if (err) - goto put_tmpl; - goto ok; - } - - inst = tmpl->alloc(tb); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto put_tmpl; - - err = crypto_register_instance(tmpl, inst); - if (err) { - tmpl->free(inst); - goto put_tmpl; - } - -ok: - /* Redo the lookup to use the instance we just registered. */ - err = -EAGAIN; - -put_tmpl: - crypto_tmpl_put(tmpl); -kill_larval: - crypto_larval_kill(larval); -drop_larval: - crypto_mod_put(larval); -out: - crypto_mod_put(alg); - return err; -} - -struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) -{ - struct crypto_alg *alg; - - alg = crypto_alg_mod_lookup(name, type, mask); - if (IS_ERR(alg)) - return alg; - - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_GIVCIPHER) - return alg; - - if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : - alg->cra_ablkcipher.ivsize)) - return alg; - - crypto_mod_put(alg); - alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED, - mask & ~CRYPTO_ALG_TESTED); - if (IS_ERR(alg)) - return alg; - - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_GIVCIPHER) { - if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { - crypto_mod_put(alg); - alg = ERR_PTR(-ENOENT); - } - return alg; - } - - BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : - alg->cra_ablkcipher.ivsize)); - - return ERR_PTR(crypto_givcipher_default(alg, type, mask)); -} -EXPORT_SYMBOL_GPL(crypto_lookup_skcipher); - -int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, - u32 type, u32 mask) -{ - struct crypto_alg *alg; - int err; - - type = crypto_skcipher_type(type); - mask = crypto_skcipher_mask(mask); - - alg = crypto_lookup_skcipher(name, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); - crypto_mod_put(alg); - return err; -} -EXPORT_SYMBOL_GPL(crypto_grab_skcipher); - -struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask) -{ - struct crypto_tfm *tfm; - int err; - - type = crypto_skcipher_type(type); - mask = crypto_skcipher_mask(mask); - - for (;;) { - struct crypto_alg *alg; - - alg = crypto_lookup_skcipher(alg_name, type, mask); - if (IS_ERR(alg)) { - err = PTR_ERR(alg); - goto err; - } - - tfm = __crypto_alloc_tfm(alg, type, mask); - if (!IS_ERR(tfm)) - return __crypto_ablkcipher_cast(tfm); - - crypto_mod_put(alg); - err = PTR_ERR(tfm); - -err: - if (err != -EAGAIN) - break; - if (fatal_signal_pending(current)) { - err = -EINTR; - break; - } - } - - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 8cc1622b2ee0..369999530108 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; - if (!alg->ivsize) { - crt->givencrypt = skcipher_null_givencrypt; - crt->givdecrypt = skcipher_null_givdecrypt; - } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; @@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_blkcipher_type); -static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, - const char *name, u32 type, u32 mask) -{ - struct crypto_alg *alg; - int err; - - type = crypto_skcipher_type(type); - mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; - - alg = crypto_alg_mod_lookup(name, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); - crypto_mod_put(alg); - return err; -} - -struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, - u32 mask) -{ - struct { - int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, - unsigned int keylen); - int (*encrypt)(struct ablkcipher_request *req); - int (*decrypt)(struct ablkcipher_request *req); - - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - - const char *geniv; - } balg; - const char *name; - struct crypto_skcipher_spawn *spawn; - struct crypto_attr_type *algt; - struct crypto_instance *inst; - struct crypto_alg *alg; - int err; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & - algt->mask) - return ERR_PTR(-EINVAL); - - name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(name)) - return ERR_CAST(name); - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return ERR_PTR(-ENOMEM); - - spawn = crypto_instance_ctx(inst); - - /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); - - crypto_set_skcipher_spawn(spawn, inst); - err = crypto_grab_nivcipher(spawn, name, type, mask); - if (err) - goto err_free_inst; - - alg = crypto_skcipher_spawn_alg(spawn); - - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) { - balg.ivsize = alg->cra_blkcipher.ivsize; - balg.min_keysize = alg->cra_blkcipher.min_keysize; - balg.max_keysize = alg->cra_blkcipher.max_keysize; - - balg.setkey = async_setkey; - balg.encrypt = async_encrypt; - balg.decrypt = async_decrypt; - - balg.geniv = alg->cra_blkcipher.geniv; - } else { - balg.ivsize = alg->cra_ablkcipher.ivsize; - balg.min_keysize = alg->cra_ablkcipher.min_keysize; - balg.max_keysize = alg->cra_ablkcipher.max_keysize; - - balg.setkey = alg->cra_ablkcipher.setkey; - balg.encrypt = alg->cra_ablkcipher.encrypt; - balg.decrypt = alg->cra_ablkcipher.decrypt; - - balg.geniv = alg->cra_ablkcipher.geniv; - } - - err = -EINVAL; - if (!balg.ivsize) - goto err_drop_alg; - - /* - * This is only true if we're constructing an algorithm with its - * default IV generator. For the default generator we elide the - * template name and double-check the IV generator. - */ - if (algt->mask & CRYPTO_ALG_GENIV) { - if (!balg.geniv) - balg.geniv = crypto_default_geniv(alg); - err = -EAGAIN; - if (strcmp(tmpl->name, balg.geniv)) - goto err_drop_alg; - - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, - CRYPTO_MAX_ALG_NAME); - } else { - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; - } - - inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV; - inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_givcipher_type; - - inst->alg.cra_ablkcipher.ivsize = balg.ivsize; - inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize; - inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize; - inst->alg.cra_ablkcipher.geniv = balg.geniv; - - inst->alg.cra_ablkcipher.setkey = balg.setkey; - inst->alg.cra_ablkcipher.encrypt = balg.encrypt; - inst->alg.cra_ablkcipher.decrypt = balg.decrypt; - -out: - return inst; - -err_drop_alg: - crypto_drop_skcipher(spawn); -err_free_inst: - kfree(inst); - inst = ERR_PTR(err); - goto out; -} -EXPORT_SYMBOL_GPL(skcipher_geniv_alloc); - -void skcipher_geniv_free(struct crypto_instance *inst) -{ - crypto_drop_skcipher(crypto_instance_ctx(inst)); - kfree(inst); -} -EXPORT_SYMBOL_GPL(skcipher_geniv_free); - -int skcipher_geniv_init(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_ablkcipher *cipher; - - cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst)); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - tfm->crt_ablkcipher.base = cipher; - tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher); - - return 0; -} -EXPORT_SYMBOL_GPL(skcipher_geniv_init); - -void skcipher_geniv_exit(struct crypto_tfm *tfm) -{ - crypto_free_ablkcipher(tfm->crt_ablkcipher.base); -} -EXPORT_SYMBOL_GPL(skcipher_geniv_exit); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic block chaining cipher type"); diff --git a/crypto/chainiv.c b/crypto/chainiv.c deleted file mode 100644 index b4340018c8d4..000000000000 --- a/crypto/chainiv.c +++ /dev/null @@ -1,317 +0,0 @@ -/* - * chainiv: Chain IV Generator - * - * Generate IVs simply be using the last block of the previous encryption. - * This is mainly useful for CBC with a synchronous algorithm. - * - * Copyright (c) 2007 Herbert Xu - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - CHAINIV_STATE_INUSE = 0, -}; - -struct chainiv_ctx { - spinlock_t lock; - char iv[]; -}; - -struct async_chainiv_ctx { - unsigned long state; - - spinlock_t lock; - int err; - - struct crypto_queue queue; - struct work_struct postponed; - - char iv[]; -}; - -static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); - unsigned int ivsize; - int err; - - ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); - ablkcipher_request_set_callback(subreq, req->creq.base.flags & - ~CRYPTO_TFM_REQ_MAY_SLEEP, - req->creq.base.complete, - req->creq.base.data); - ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, - req->creq.nbytes, req->creq.info); - - spin_lock_bh(&ctx->lock); - - ivsize = crypto_ablkcipher_ivsize(geniv); - - memcpy(req->giv, ctx->iv, ivsize); - memcpy(subreq->info, ctx->iv, ivsize); - - err = crypto_ablkcipher_encrypt(subreq); - if (err) - goto unlock; - - memcpy(ctx->iv, subreq->info, ivsize); - -unlock: - spin_unlock_bh(&ctx->lock); - - return err; -} - -static int chainiv_init_common(struct crypto_tfm *tfm, char iv[]) -{ - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - int err = 0; - - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); - - if (iv) { - err = crypto_rng_get_bytes(crypto_default_rng, iv, - crypto_ablkcipher_ivsize(geniv)); - crypto_put_default_rng(); - } - - return err ?: skcipher_geniv_init(tfm); -} - -static int chainiv_init(struct crypto_tfm *tfm) -{ - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); - char *iv; - - spin_lock_init(&ctx->lock); - - iv = NULL; - if (!crypto_get_default_rng()) { - crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; - iv = ctx->iv; - } - - return chainiv_init_common(tfm, iv); -} - -static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) -{ - int queued; - int err = ctx->err; - - if (!ctx->queue.qlen) { - smp_mb__before_atomic(); - clear_bit(CHAINIV_STATE_INUSE, &ctx->state); - - if (!ctx->queue.qlen || - test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) - goto out; - } - - queued = queue_work(kcrypto_wq, &ctx->postponed); - BUG_ON(!queued); - -out: - return err; -} - -static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - int err; - - spin_lock_bh(&ctx->lock); - err = skcipher_enqueue_givcrypt(&ctx->queue, req); - spin_unlock_bh(&ctx->lock); - - if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) - return err; - - ctx->err = err; - return async_chainiv_schedule_work(ctx); -} - -static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); - unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); - - memcpy(req->giv, ctx->iv, ivsize); - memcpy(subreq->info, ctx->iv, ivsize); - - ctx->err = crypto_ablkcipher_encrypt(subreq); - if (ctx->err) - goto out; - - memcpy(ctx->iv, subreq->info, ivsize); - -out: - return async_chainiv_schedule_work(ctx); -} - -static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); - - ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); - ablkcipher_request_set_callback(subreq, req->creq.base.flags, - req->creq.base.complete, - req->creq.base.data); - ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, - req->creq.nbytes, req->creq.info); - - if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) - goto postpone; - - if (ctx->queue.qlen) { - clear_bit(CHAINIV_STATE_INUSE, &ctx->state); - goto postpone; - } - - return async_chainiv_givencrypt_tail(req); - -postpone: - return async_chainiv_postpone_request(req); -} - -static void async_chainiv_do_postponed(struct work_struct *work) -{ - struct async_chainiv_ctx *ctx = container_of(work, - struct async_chainiv_ctx, - postponed); - struct skcipher_givcrypt_request *req; - struct ablkcipher_request *subreq; - int err; - - /* Only handle one request at a time to avoid hogging keventd. */ - spin_lock_bh(&ctx->lock); - req = skcipher_dequeue_givcrypt(&ctx->queue); - spin_unlock_bh(&ctx->lock); - - if (!req) { - async_chainiv_schedule_work(ctx); - return; - } - - subreq = skcipher_givcrypt_reqctx(req); - subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; - - err = async_chainiv_givencrypt_tail(req); - - local_bh_disable(); - skcipher_givcrypt_complete(req, err); - local_bh_enable(); -} - -static int async_chainiv_init(struct crypto_tfm *tfm) -{ - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); - char *iv; - - spin_lock_init(&ctx->lock); - - crypto_init_queue(&ctx->queue, 100); - INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); - - iv = NULL; - if (!crypto_get_default_rng()) { - crypto_ablkcipher_crt(geniv)->givencrypt = - async_chainiv_givencrypt; - iv = ctx->iv; - } - - return chainiv_init_common(tfm, iv); -} - -static void async_chainiv_exit(struct crypto_tfm *tfm) -{ - struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); - - BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); - - skcipher_geniv_exit(tfm); -} - -static struct crypto_template chainiv_tmpl; - -static struct crypto_instance *chainiv_alloc(struct rtattr **tb) -{ - struct crypto_attr_type *algt; - struct crypto_instance *inst; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); - if (IS_ERR(inst)) - goto out; - - inst->alg.cra_init = chainiv_init; - inst->alg.cra_exit = skcipher_geniv_exit; - - inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); - - if (!crypto_requires_sync(algt->type, algt->mask)) { - inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; - - inst->alg.cra_init = async_chainiv_init; - inst->alg.cra_exit = async_chainiv_exit; - - inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); - } - - inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; - -out: - return inst; -} - -static struct crypto_template chainiv_tmpl = { - .name = "chainiv", - .alloc = chainiv_alloc, - .free = skcipher_geniv_free, - .module = THIS_MODULE, -}; - -static int __init chainiv_module_init(void) -{ - return crypto_register_template(&chainiv_tmpl); -} - -static void chainiv_module_exit(void) -{ - crypto_unregister_template(&chainiv_tmpl); -} - -module_init(chainiv_module_init); -module_exit(chainiv_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Chain IV Generator"); -MODULE_ALIAS_CRYPTO("chainiv"); diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c deleted file mode 100644 index 16dda72fc4f8..000000000000 --- a/crypto/eseqiv.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * eseqiv: Encrypted Sequence Number IV Generator - * - * This generator generates an IV based on a sequence number by xoring it - * with a salt and then encrypting it with the same key as used to encrypt - * the plain text. This algorithm requires that the block size be equal - * to the IV size. It is mainly useful for CBC. - * - * Copyright (c) 2007 Herbert Xu - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct eseqiv_request_ctx { - struct scatterlist src[2]; - struct scatterlist dst[2]; - char tail[]; -}; - -struct eseqiv_ctx { - spinlock_t lock; - unsigned int reqoff; - char salt[]; -}; - -static void eseqiv_complete2(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); - - memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, - crypto_ablkcipher_alignmask(geniv) + 1), - crypto_ablkcipher_ivsize(geniv)); -} - -static void eseqiv_complete(struct crypto_async_request *base, int err) -{ - struct skcipher_givcrypt_request *req = base->data; - - if (err) - goto out; - - eseqiv_complete2(req); - -out: - skcipher_givcrypt_complete(req, err); -} - -static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); - struct ablkcipher_request *subreq; - crypto_completion_t compl; - void *data; - struct scatterlist *osrc, *odst; - struct scatterlist *dst; - struct page *srcp; - struct page *dstp; - u8 *giv; - u8 *vsrc; - u8 *vdst; - __be64 seq; - unsigned int ivsize; - unsigned int len; - int err; - - subreq = (void *)(reqctx->tail + ctx->reqoff); - ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); - - giv = req->giv; - compl = req->creq.base.complete; - data = req->creq.base.data; - - osrc = req->creq.src; - odst = req->creq.dst; - srcp = sg_page(osrc); - dstp = sg_page(odst); - vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; - vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; - - ivsize = crypto_ablkcipher_ivsize(geniv); - - if (vsrc != giv + ivsize && vdst != giv + ivsize) { - giv = PTR_ALIGN((u8 *)reqctx->tail, - crypto_ablkcipher_alignmask(geniv) + 1); - compl = eseqiv_complete; - data = req; - } - - ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, - data); - - sg_init_table(reqctx->src, 2); - sg_set_buf(reqctx->src, giv, ivsize); - scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); - - dst = reqctx->src; - if (osrc != odst) { - sg_init_table(reqctx->dst, 2); - sg_set_buf(reqctx->dst, giv, ivsize); - scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); - - dst = reqctx->dst; - } - - ablkcipher_request_set_crypt(subreq, reqctx->src, dst, - req->creq.nbytes + ivsize, - req->creq.info); - - memcpy(req->creq.info, ctx->salt, ivsize); - - len = ivsize; - if (ivsize > sizeof(u64)) { - memset(req->giv, 0, ivsize - sizeof(u64)); - len = sizeof(u64); - } - seq = cpu_to_be64(req->seq); - memcpy(req->giv + ivsize - len, &seq, len); - - err = crypto_ablkcipher_encrypt(subreq); - if (err) - goto out; - - if (giv != req->giv) - eseqiv_complete2(req); - -out: - return err; -} - -static int eseqiv_init(struct crypto_tfm *tfm) -{ - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - unsigned long alignmask; - unsigned int reqsize; - int err; - - spin_lock_init(&ctx->lock); - - alignmask = crypto_tfm_ctx_alignment() - 1; - reqsize = sizeof(struct eseqiv_request_ctx); - - if (alignmask & reqsize) { - alignmask &= reqsize; - alignmask--; - } - - alignmask = ~alignmask; - alignmask &= crypto_ablkcipher_alignmask(geniv); - - reqsize += alignmask; - reqsize += crypto_ablkcipher_ivsize(geniv); - reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); - - ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); - - tfm->crt_ablkcipher.reqsize = reqsize + - sizeof(struct ablkcipher_request); - - err = 0; - if (!crypto_get_default_rng()) { - crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_ablkcipher_ivsize(geniv)); - crypto_put_default_rng(); - } - - return err ?: skcipher_geniv_init(tfm); -} - -static struct crypto_template eseqiv_tmpl; - -static struct crypto_instance *eseqiv_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst; - int err; - - inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); - if (IS_ERR(inst)) - goto out; - - err = -EINVAL; - if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) - goto free_inst; - - inst->alg.cra_init = eseqiv_init; - inst->alg.cra_exit = skcipher_geniv_exit; - - inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx); - inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; - -out: - return inst; - -free_inst: - skcipher_geniv_free(inst); - inst = ERR_PTR(err); - goto out; -} - -static struct crypto_template eseqiv_tmpl = { - .name = "eseqiv", - .alloc = eseqiv_alloc, - .free = skcipher_geniv_free, - .module = THIS_MODULE, -}; - -static int __init eseqiv_module_init(void) -{ - return crypto_register_template(&eseqiv_tmpl); -} - -static void __exit eseqiv_module_exit(void) -{ - crypto_unregister_template(&eseqiv_tmpl); -} - -module_init(eseqiv_module_init); -module_exit(eseqiv_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); -MODULE_ALIAS_CRYPTO("eseqiv"); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index a859b3ae239d..c7049231861f 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -14,50 +14,17 @@ */ #include -#include -#include #include +#include #include #include #include #include #include -#include #include -struct seqiv_ctx { - spinlock_t lock; - u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); -}; - static void seqiv_free(struct crypto_instance *inst); -static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) -{ - struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); - struct crypto_ablkcipher *geniv; - - if (err == -EINPROGRESS) - return; - - if (err) - goto out; - - geniv = skcipher_givcrypt_reqtfm(req); - memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); - -out: - kfree(subreq->info); -} - -static void seqiv_complete(struct crypto_async_request *base, int err) -{ - struct skcipher_givcrypt_request *req = base->data; - - seqiv_complete2(req, err); - skcipher_givcrypt_complete(req, err); -} - static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) { struct aead_request *subreq = aead_request_ctx(req); @@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, aead_request_complete(req, err); } -static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, - unsigned int ivsize) -{ - unsigned int len = ivsize; - - if (ivsize > sizeof(u64)) { - memset(info, 0, ivsize - sizeof(u64)); - len = sizeof(u64); - } - seq = cpu_to_be64(seq); - memcpy(info + ivsize - len, &seq, len); - crypto_xor(info, ctx->salt, ivsize); -} - -static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); - crypto_completion_t compl; - void *data; - u8 *info; - unsigned int ivsize; - int err; - - ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); - - compl = req->creq.base.complete; - data = req->creq.base.data; - info = req->creq.info; - - ivsize = crypto_ablkcipher_ivsize(geniv); - - if (unlikely(!IS_ALIGNED((unsigned long)info, - crypto_ablkcipher_alignmask(geniv) + 1))) { - info = kmalloc(ivsize, req->creq.base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: - GFP_ATOMIC); - if (!info) - return -ENOMEM; - - compl = seqiv_complete; - data = req; - } - - ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, - data); - ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, - req->creq.nbytes, info); - - seqiv_geniv(ctx, info, req->seq, ivsize); - memcpy(req->giv, info, ivsize); - - err = crypto_ablkcipher_encrypt(subreq); - if (unlikely(info != req->creq.info)) - seqiv_complete2(req, err); - return err; -} - static int seqiv_aead_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); @@ -233,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req) return crypto_aead_decrypt(subreq); } -static int seqiv_init(struct crypto_tfm *tfm) -{ - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - int err; - - spin_lock_init(&ctx->lock); - - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); - - err = 0; - if (!crypto_get_default_rng()) { - crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_ablkcipher_ivsize(geniv)); - crypto_put_default_rng(); - } - - return err ?: skcipher_geniv_init(tfm); -} - -static int seqiv_ablkcipher_create(struct crypto_template *tmpl, - struct rtattr **tb) -{ - struct crypto_instance *inst; - int err; - - inst = skcipher_geniv_alloc(tmpl, tb, 0, 0); - - if (IS_ERR(inst)) - return PTR_ERR(inst); - - err = -EINVAL; - if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) - goto free_inst; - - inst->alg.cra_init = seqiv_init; - inst->alg.cra_exit = skcipher_geniv_exit; - - inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; - inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); - - inst->alg.cra_alignmask |= __alignof__(u32) - 1; - - err = crypto_register_instance(tmpl, inst); - if (err) - goto free_inst; - -out: - return err; - -free_inst: - skcipher_geniv_free(inst); - goto out; -} - static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; @@ -334,26 +186,20 @@ free_inst: static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; - int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - err = seqiv_ablkcipher_create(tmpl, tb); - else - err = seqiv_aead_create(tmpl, tb); + return -EINVAL; - return err; + return seqiv_aead_create(tmpl, tb); } static void seqiv_free(struct crypto_instance *inst) { - if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - skcipher_geniv_free(inst); - else - aead_geniv_free(aead_instance(inst)); + aead_geniv_free(aead_instance(inst)); } static struct crypto_template seqiv_tmpl = { diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d248008e7f7b..f7d0018dcaee 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -325,13 +325,13 @@ static const struct crypto_type crypto_skcipher_type2 = { .tfmsize = offsetof(struct crypto_skcipher, base), }; -int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_skcipher_type2; return crypto_grab_spawn(&spawn->base, name, type, mask); } -EXPORT_SYMBOL_GPL(crypto_grab_skcipher2); +EXPORT_SYMBOL_GPL(crypto_grab_skcipher); struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index ce6619c339fe..a21a95e1a375 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -67,8 +67,12 @@ static inline void crypto_set_skcipher_spawn( int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask); -int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, - const char *name, u32 type, u32 mask); + +static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + return crypto_grab_skcipher(spawn, name, type, mask); +} struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); @@ -77,30 +81,28 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) crypto_drop_spawn(&spawn->base); } -static inline struct crypto_alg *crypto_skcipher_spawn_alg( +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( struct crypto_skcipher_spawn *spawn) { - return spawn->base.alg; + return container_of(spawn->base.alg, struct skcipher_alg, base); } static inline struct skcipher_alg *crypto_spawn_skcipher_alg( struct crypto_skcipher_spawn *spawn) { - return container_of(spawn->base.alg, struct skcipher_alg, base); + return crypto_skcipher_spawn_alg(spawn); } -static inline struct crypto_ablkcipher *crypto_spawn_skcipher( +static inline struct crypto_skcipher *crypto_spawn_skcipher( struct crypto_skcipher_spawn *spawn) { - return __crypto_ablkcipher_cast( - crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0), - crypto_skcipher_mask(0))); + return crypto_spawn_tfm2(&spawn->base); } static inline struct crypto_skcipher *crypto_spawn_skcipher2( struct crypto_skcipher_spawn *spawn) { - return crypto_spawn_tfm2(&spawn->base); + return crypto_spawn_skcipher(spawn); } static inline void crypto_skcipher_set_reqsize( @@ -116,53 +118,12 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); -int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); -int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); -const char *crypto_default_geniv(const struct crypto_alg *alg); - -struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, - u32 mask); -void skcipher_geniv_free(struct crypto_instance *inst); -int skcipher_geniv_init(struct crypto_tfm *tfm); -void skcipher_geniv_exit(struct crypto_tfm *tfm); - -static inline struct crypto_ablkcipher *skcipher_geniv_cipher( - struct crypto_ablkcipher *geniv) -{ - return crypto_ablkcipher_crt(geniv)->base; -} - -static inline int skcipher_enqueue_givcrypt( - struct crypto_queue *queue, struct skcipher_givcrypt_request *request) -{ - return ablkcipher_enqueue_request(queue, &request->creq); -} - -static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( - struct crypto_queue *queue) -{ - return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); -} - -static inline void *skcipher_givcrypt_reqctx( - struct skcipher_givcrypt_request *req) -{ - return ablkcipher_request_ctx(&req->creq); -} - static inline void ablkcipher_request_complete(struct ablkcipher_request *req, int err) { req->base.complete(&req->base, err); } -static inline void skcipher_givcrypt_complete( - struct skcipher_givcrypt_request *req, int err) -{ - ablkcipher_request_complete(&req->creq, err); -} - static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) { return req->base.flags; diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index a381f57ea695..59c8f6c593e6 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -139,82 +139,6 @@ struct skcipher_alg { crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ struct skcipher_request *name = (void *)__##name##_desc -static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( - struct skcipher_givcrypt_request *req) -{ - return crypto_ablkcipher_reqtfm(&req->creq); -} - -static inline int crypto_skcipher_givencrypt( - struct skcipher_givcrypt_request *req) -{ - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); - return crt->givencrypt(req); -}; - -static inline int crypto_skcipher_givdecrypt( - struct skcipher_givcrypt_request *req) -{ - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); - return crt->givdecrypt(req); -}; - -static inline void skcipher_givcrypt_set_tfm( - struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) -{ - req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); -} - -static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast( - struct crypto_async_request *req) -{ - return container_of(ablkcipher_request_cast(req), - struct skcipher_givcrypt_request, creq); -} - -static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc( - struct crypto_ablkcipher *tfm, gfp_t gfp) -{ - struct skcipher_givcrypt_request *req; - - req = kmalloc(sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(tfm), gfp); - - if (likely(req)) - skcipher_givcrypt_set_tfm(req, tfm); - - return req; -} - -static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) -{ - kfree(req); -} - -static inline void skcipher_givcrypt_set_callback( - struct skcipher_givcrypt_request *req, u32 flags, - crypto_completion_t compl, void *data) -{ - ablkcipher_request_set_callback(&req->creq, flags, compl, data); -} - -static inline void skcipher_givcrypt_set_crypt( - struct skcipher_givcrypt_request *req, - struct scatterlist *src, struct scatterlist *dst, - unsigned int nbytes, void *iv) -{ - ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); -} - -static inline void skcipher_givcrypt_set_giv( - struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) -{ - req->giv = giv; - req->seq = seq; -} - /** * DOC: Symmetric Key Cipher API * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 37a652d1639d..7cee5551625b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -488,8 +488,6 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); struct crypto_ablkcipher *base; @@ -714,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask) * state information is unused by the kernel crypto API. */ -/** - * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * ablkcipher cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for an ablkcipher. The returned struct - * crypto_ablkcipher is the cipher handle that is required for any subsequent - * API invocation for that ablkcipher. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask); - static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { -- cgit v1.2.3-70-g09d2 From 5506f53c7cc17c4ad5e69e5512a35faf77182986 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:53 +0800 Subject: crypto: scatterwalk - Remove scatterwalk_bytes_sglen This patch removes the now unused scatterwalk_bytes_sglen. Anyone using this out-of-tree should switch over to sg_nents_for_len. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 22 ---------------------- include/crypto/scatterwalk.h | 2 -- 2 files changed, 24 deletions(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index ea5815c5e128..03ca4aef1b9d 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -125,28 +125,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, } EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); -int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) -{ - int offset = 0, n = 0; - - /* num_bytes is too small */ - if (num_bytes < sg->length) - return -1; - - do { - offset += sg->length; - n++; - sg = sg_next(sg); - - /* num_bytes is too large */ - if (unlikely(!sg && (num_bytes < offset))) - return -1; - } while (sg && (num_bytes > offset)); - - return n; -} -EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); - struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len) diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 35f99b68d037..7e1a33645eb5 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -92,8 +92,6 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); -int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); - struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len); -- cgit v1.2.3-70-g09d2 From 85eccddee401ae81067e763516889780b5545160 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:55 +0800 Subject: crypto: scatterwalk - Add no-copy support to copychunks The function ablkcipher_done_slow is pretty much identical to scatterwalk_copychunks except that it doesn't actually copy as the processing hasn't been completed yet. This patch allows scatterwalk_copychunks to be used in this case by specifying out == 2. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 03ca4aef1b9d..e124ce26feed 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -87,9 +87,11 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, if (len_this_page > nbytes) len_this_page = nbytes; - vaddr = scatterwalk_map(walk); - memcpy_dir(buf, vaddr, len_this_page, out); - scatterwalk_unmap(vaddr); + if (out != 2) { + vaddr = scatterwalk_map(walk); + memcpy_dir(buf, vaddr, len_this_page, out); + scatterwalk_unmap(vaddr); + } scatterwalk_advance(walk, len_this_page); @@ -99,7 +101,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, buf += len_this_page; nbytes -= len_this_page; - scatterwalk_pagedone(walk, out, 1); + scatterwalk_pagedone(walk, out & 1, 1); } } EXPORT_SYMBOL_GPL(scatterwalk_copychunks); -- cgit v1.2.3-70-g09d2 From 5f070e81bee35f1b7bd1477bb223a873ff657803 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:57 +0800 Subject: crypto: scatterwalk - Fix test in scatterwalk_done When there is more data to be processed, the current test in scatterwalk_done may prevent us from calling pagedone even when we should. In particular, if we're on an SG entry spanning multiple pages where the last page is not a full page, we will incorrectly skip calling pagedone on the second last page. This patch fixes this by adding a separate test for whether we've reached the end of a page. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index e124ce26feed..52aae2974794 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) scatterwalk_pagedone(walk, out, more); } EXPORT_SYMBOL_GPL(scatterwalk_done); -- cgit v1.2.3-70-g09d2 From 28cf86fafdd663cfcad3c5a5fe9869f1fa01b472 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:58 +0800 Subject: crypto: scatterwalk - Remove unnecessary advance in scatterwalk_pagedone The offset advance in scatterwalk_pagedone not only is unnecessary, but it was also buggy when it was needed by scatterwalk_copychunks. As the latter has long ago been fixed to call scatterwalk_advance directly, we can remove this unnecessary offset adjustment. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 52aae2974794..2ec5368ed649 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -62,12 +62,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, flush_dcache_page(page); } - if (more) { - walk->offset += PAGE_SIZE - 1; - walk->offset &= PAGE_MASK; - if (walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); - } + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); } void scatterwalk_done(struct scatter_walk *walk, int out, int more) -- cgit v1.2.3-70-g09d2 From 2ee732d57496b8365819dfb958bc1ff04fcd4cac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:17:59 +0800 Subject: crypto: scatterwalk - Remove unnecessary BUG in scatterwalk_start Nothing bad will happen even if sg->length is zero, so there is no point in keeping this BUG_ON in scatterwalk_start. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 2ec5368ed649..ddffbb3ee712 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -33,9 +33,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) { walk->sg = sg; - - BUG_ON(!sg->length); - walk->offset = sg->offset; } EXPORT_SYMBOL_GPL(scatterwalk_start); -- cgit v1.2.3-70-g09d2 From ac02725812cb3a814cfe1fdc2a8a59db073e7e66 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 12 Jul 2016 13:18:00 +0800 Subject: crypto: scatterwalk - Inline start/map/done This patch inlines the functions scatterwalk_start, scatterwalk_map and scatterwalk_done as they're all tiny and mostly used by the block cipher walker. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 43 ----------------------------------------- include/crypto/scatterwalk.h | 46 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 40 insertions(+), 49 deletions(-) (limited to 'crypto') diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index ddffbb3ee712..52ce17a3dd63 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -18,8 +18,6 @@ #include #include #include -#include -#include #include static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) @@ -30,47 +28,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) memcpy(dst, src, nbytes); } -void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) -{ - walk->sg = sg; - walk->offset = sg->offset; -} -EXPORT_SYMBOL_GPL(scatterwalk_start); - -void *scatterwalk_map(struct scatter_walk *walk) -{ - return kmap_atomic(scatterwalk_page(walk)) + - offset_in_page(walk->offset); -} -EXPORT_SYMBOL_GPL(scatterwalk_map); - -static void scatterwalk_pagedone(struct scatter_walk *walk, int out, - unsigned int more) -{ - if (out) { - struct page *page; - - page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as - * PageSlab cannot be optimised away per se due to - * use of volatile pointer. - */ - if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) - flush_dcache_page(page); - } - - if (more && walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); -} - -void scatterwalk_done(struct scatter_walk *walk, int out, int more) -{ - if (!more || walk->offset >= walk->sg->offset + walk->sg->length || - !(walk->offset & (PAGE_SIZE - 1))) - scatterwalk_pagedone(walk, out, more); -} -EXPORT_SYMBOL_GPL(scatterwalk_done); - void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out) { diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 7e1a33645eb5..880e6be9e95e 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -16,14 +16,10 @@ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H -#include #include -#include #include #include -#include #include -#include static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, @@ -83,11 +79,49 @@ static inline void scatterwalk_unmap(void *vaddr) kunmap_atomic(vaddr); } -void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); void *scatterwalk_map(struct scatter_walk *walk); -void scatterwalk_done(struct scatter_walk *walk, int out, int more); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); -- cgit v1.2.3-70-g09d2 From a6d7bfd0ff21f258913dd5e626d2bd70ab3942df Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 14 Jul 2016 20:39:18 -0700 Subject: crypto: rsa-pkcs1pad - fix rsa-pkcs1pad request struct To allow for child request context the struct akcipher_request child_req needs to be at the end of the structure. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 880d3db57a25..877019a6d3ea 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -101,10 +101,9 @@ struct pkcs1pad_inst_ctx { }; struct pkcs1pad_request { - struct akcipher_request child_req; - struct scatterlist in_sg[2], out_sg[1]; uint8_t *in_buf, *out_buf; + struct akcipher_request child_req; }; static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, -- cgit v1.2.3-70-g09d2 From 15226e4804e244c2dd51db2a3a2c2b5c9dd65874 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 18 Jul 2016 18:20:10 +0800 Subject: crypto: testmgr - Print akcipher algorithm name When an akcipher test fails, we don't know which algorithm failed because the name is not printed. This patch fixes this. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 769cc2a88040..5c9d5a5e7b65 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2034,6 +2034,8 @@ free_xbuf: static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, struct akcipher_testvec *vecs, unsigned int tcount) { + const char *algo = + crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm)); int ret, i; for (i = 0; i < tcount; i++) { @@ -2041,8 +2043,8 @@ static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, if (!ret) continue; - pr_err("alg: akcipher: test failed on vector %d, err=%d\n", - i + 1, ret); + pr_err("alg: akcipher: test %d failed for %s, err=%d\n", + i + 1, algo, ret); return ret; } return 0; -- cgit v1.2.3-70-g09d2