summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml21
-rw-r--r--arch/arm/crypto/blake2b-neon-glue.c4
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c2
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/ecc.c11
-rw-r--r--crypto/ecc.h37
-rw-r--r--crypto/ecc_curve_defs.h17
-rw-r--r--crypto/ecdh.c72
-rw-r--r--crypto/ecdh_helper.c4
-rw-r--r--crypto/serpent_generic.c39
-rw-r--r--crypto/testmgr.c24
-rw-r--r--crypto/testmgr.h34
-rw-r--r--drivers/char/hw_random/ba431-rng.c12
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c27
-rw-r--r--drivers/char/hw_random/cctrng.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c3
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/xiphera-trng.c4
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c21
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c4
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c3
-rw-r--r--drivers/crypto/atmel-ecc.c28
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c1
-rw-r--r--drivers/crypto/ccp/sev-dev.c6
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h17
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c881
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c12
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/hisilicon/qm.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h4
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c4
-rw-r--r--drivers/crypto/keembay/keembay-ocs-aes-core.c4
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c4
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c32
-rw-r--r--drivers/crypto/qce/cipher.h1
-rw-r--r--drivers/crypto/qce/common.c25
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/sha.c143
-rw-r--r--drivers/crypto/qce/skcipher.c69
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--include/crypto/acompress.h2
-rw-r--r--include/crypto/aead.h2
-rw-r--r--include/crypto/akcipher.h2
-rw-r--r--include/crypto/ecc_curve.h60
-rw-r--r--include/crypto/ecdh.h2
-rw-r--r--include/crypto/hash.h4
-rw-r--r--include/crypto/kpp.h2
-rw-r--r--include/crypto/rng.h2
-rw-r--r--include/crypto/skcipher.h2
-rw-r--r--net/bluetooth/ecdh_helper.c2
-rw-r--r--net/bluetooth/selftest.c2
-rw-r--r--net/bluetooth/smp.c6
61 files changed, 1355 insertions, 351 deletions
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
index c147900f9041..6da674666d45 100644
--- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
+++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
@@ -28,6 +28,12 @@ properties:
clock-names:
const: ipsec
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: ipsec
+
interrupts:
maxItems: 1
@@ -35,6 +41,18 @@ required:
- compatible
- reg
+if:
+ properties:
+ compatible:
+ enum:
+ - brcm,bcm6368-rng
+then:
+ required:
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
additionalProperties: false
examples:
@@ -58,4 +76,7 @@ examples:
clocks = <&periph_clk 18>;
clock-names = "ipsec";
+
+ resets = <&periph_rst 4>;
+ reset-names = "ipsec";
};
diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c
index 34d73200e7fa..4b59d027ba4a 100644
--- a/arch/arm/crypto/blake2b-neon-glue.c
+++ b/arch/arm/crypto/blake2b-neon-glue.c
@@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void)
static void __exit blake2b_neon_mod_exit(void)
{
- return crypto_unregister_shashes(blake2b_neon_algs,
- ARRAY_SIZE(blake2b_neon_algs));
+ crypto_unregister_shashes(blake2b_neon_algs,
+ ARRAY_SIZE(blake2b_neon_algs));
}
module_init(blake2b_neon_mod_init);
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index b1e577cbf00c..88e8ea73bfa7 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
src += bytes;
len -= bytes;
- };
+ }
memcpy((char *)sctx->buffer, src, len);
return 0;
diff --git a/crypto/api.c b/crypto/api.c
index ed08cbd5b9d3..c4eda56cff89 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
- if (unlikely(!mem))
+ if (IS_ERR_OR_NULL(mem))
return;
alg = tfm->__crt_alg;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index c80aa25994a0..0798a1836e58 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <crypto/ecc_curve.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
@@ -42,7 +43,14 @@ typedef struct {
u64 m_high;
} uint128_t;
-static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
+/* Returns curv25519 curve param */
+const struct ecc_curve *ecc_get_curve25519(void)
+{
+ return &ecc_25519;
+}
+EXPORT_SYMBOL(ecc_get_curve25519);
+
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
{
switch (curve_id) {
/* In FIPS mode only allow P256 and higher */
@@ -54,6 +62,7 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
return NULL;
}
}
+EXPORT_SYMBOL(ecc_get_curve);
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
diff --git a/crypto/ecc.h b/crypto/ecc.h
index d4e546b9ad79..38a81d404821 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,6 +26,8 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
+#include <crypto/ecc_curve.h>
+
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
@@ -33,44 +35,9 @@
#define ECC_DIGITS_TO_BYTES_SHIFT 3
-/**
- * struct ecc_point - elliptic curve point in affine coordinates
- *
- * @x: X coordinate in vli form.
- * @y: Y coordinate in vli form.
- * @ndigits: Length of vlis in u64 qwords.
- */
-struct ecc_point {
- u64 *x;
- u64 *y;
- u8 ndigits;
-};
-
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
/**
- * struct ecc_curve - definition of elliptic curve
- *
- * @name: Short name of the curve.
- * @g: Generator point of the curve.
- * @p: Prime number, if Barrett's reduction is used for this curve
- * pre-calculated value 'mu' is appended to the @p after ndigits.
- * Use of Barrett's reduction is heuristically determined in
- * vli_mmod_fast().
- * @n: Order of the curve group.
- * @a: Curve parameter a.
- * @b: Curve parameter b.
- */
-struct ecc_curve {
- char *name;
- struct ecc_point g;
- u64 *p;
- u64 *n;
- u64 *a;
- u64 *b;
-};
-
-/**
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 69be6c7d228f..d7769ccc4c8e 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -54,4 +54,21 @@ static struct ecc_curve nist_p256 = {
.b = nist_p256_b
};
+/* curve25519 */
+static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000 };
+static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff,
+ 0xffffffffffffffff, 0x7fffffffffffffff };
+static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
+ 0x0000000000000000, 0x0000000000000000 };
+static const struct ecc_curve ecc_25519 = {
+ .name = "curve25519",
+ .g = {
+ .x = curve25519_g_x,
+ .ndigits = 4,
+ },
+ .p = curve25519_p,
+ .a = curve25519_a,
+};
+
#endif
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 96f80c8f8e30..04a427b8c956 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
return kpp_tfm_ctx(tfm);
}
-static unsigned int ecdh_supported_curve(unsigned int curve_id)
-{
- switch (curve_id) {
- case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS;
- case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS;
- default: return 0;
- }
-}
-
static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;
- unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
- params.key_size > sizeof(ctx->private_key))
+ params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
- ndigits = ecdh_supported_curve(params.curve_id);
- if (!ndigits)
- return -EINVAL;
-
- ctx->curve_id = params.curve_id;
- ctx->ndigits = ndigits;
-
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
@@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
-static struct kpp_alg ecdh = {
+static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+ ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS;
+
+ return 0;
+}
+
+static struct kpp_alg ecdh_nist_p192 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
+ .init = ecdh_nist_p192_init_tfm,
.base = {
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p192",
.cra_driver_name = "ecdh-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
@@ -154,14 +148,48 @@ static struct kpp_alg ecdh = {
},
};
+static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+ ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS;
+
+ return 0;
+}
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = ecdh_set_secret,
+ .generate_public_key = ecdh_compute_value,
+ .compute_shared_secret = ecdh_compute_value,
+ .max_size = ecdh_max_size,
+ .init = ecdh_nist_p256_init_tfm,
+ .base = {
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "ecdh-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecdh_ctx),
+ },
+};
+
+static bool ecdh_nist_p192_registered;
+
static int ecdh_init(void)
{
- return crypto_register_kpp(&ecdh);
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ ecdh_nist_p192_registered = ret == 0;
+
+ return crypto_register_kpp(&ecdh_nist_p256);
}
static void ecdh_exit(void)
{
- crypto_unregister_kpp(&ecdh);
+ if (ecdh_nist_p192_registered)
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ crypto_unregister_kpp(&ecdh_nist_p256);
}
subsys_initcall(ecdh_init);
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index fca63b559f65..f18f9028f912 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -10,7 +10,7 @@
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
-#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
+#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short))
static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
{
@@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len,
return -EINVAL;
ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
- ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
ecdh_pack_data(ptr, params->key, params->key_size);
@@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (unlikely(len < secret.len))
return -EINVAL;
- ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))
return -EINVAL;
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 236c87547a17..45f98b750053 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0, r1, r2, r3, r4;
+ __le32 *lk;
int i;
/* Copy key, add padding */
@@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
+ lk = (__le32 *)k;
+ k[0] = le32_to_cpu(lk[0]);
+ k[1] = le32_to_cpu(lk[1]);
+ k[2] = le32_to_cpu(lk[2]);
+ k[3] = le32_to_cpu(lk[3]);
+ k[4] = le32_to_cpu(lk[4]);
+ k[5] = le32_to_cpu(lk[5]);
+ k[6] = le32_to_cpu(lk[6]);
+ k[7] = le32_to_cpu(lk[7]);
+
/* Expand key using polynomial */
- r0 = le32_to_cpu(k[3]);
- r1 = le32_to_cpu(k[4]);
- r2 = le32_to_cpu(k[5]);
- r3 = le32_to_cpu(k[6]);
- r4 = le32_to_cpu(k[7]);
-
- keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0);
- keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1);
- keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2);
- keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3);
- keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4);
- keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5);
- keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6);
- keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7);
+ r0 = k[3];
+ r1 = k[4];
+ r2 = k[5];
+ r3 = k[6];
+ r4 = k[7];
+
+ keyiter(k[0], r0, r4, r2, 0, 0);
+ keyiter(k[1], r1, r0, r3, 1, 1);
+ keyiter(k[2], r2, r1, r4, 2, 2);
+ keyiter(k[3], r3, r2, r0, 3, 3);
+ keyiter(k[4], r4, r3, r1, 4, 4);
+ keyiter(k[5], r0, r4, r2, 5, 5);
+ keyiter(k[6], r1, r0, r3, 6, 6);
+ keyiter(k[7], r2, r1, r4, 7, 7);
keyiter(k[0], r3, r2, r0, 8, 8);
keyiter(k[1], r4, r3, r1, 9, 9);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 93359999c94b..fbe5d6486f8c 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err,
return err;
}
-static inline const void *sg_data(struct scatterlist *sg)
-{
- return page_address(sg_page(sg)) + sg->offset;
-}
-
/* Test one hash test vector in one configuration, using the shash API */
static int test_shash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
@@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
return 0;
if (cfg->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]),
+ err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
tsgl->sgl[0].length, result);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
@@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]),
+ err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length, result);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
@@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
}
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
- err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]),
+ err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
@@ -4904,11 +4899,20 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
#endif
- .alg = "ecdh",
+#ifndef CONFIG_CRYPTO_FIPS
+ .alg = "ecdh-nist-p192",
+ .test = alg_test_kpp,
+ .fips_allowed = 1,
+ .suite = {
+ .kpp = __VECS(ecdh_p192_tv_template)
+ }
+ }, {
+#endif
+ .alg = "ecdh-nist-p256",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
- .kpp = __VECS(ecdh_tv_template)
+ .kpp = __VECS(ecdh_p256_tv_template)
}
}, {
.alg = "ecrdsa",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index ced56ea0c9b4..6426603597c0 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -2261,19 +2261,17 @@ static const struct kpp_testvec curve25519_tv_template[] = {
}
};
-static const struct kpp_testvec ecdh_tv_template[] = {
- {
#ifndef CONFIG_CRYPTO_FIPS
+static const struct kpp_testvec ecdh_p192_tv_template[] = {
+ {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x20\x00" /* len */
- "\x01\x00" /* curve_id */
+ "\x1e\x00" /* len */
"\x18\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x20" /* len */
- "\x00\x01" /* curve_id */
+ "\x00\x1e" /* len */
"\x00\x18" /* key_size */
#endif
"\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
@@ -2301,18 +2299,20 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.b_public_size = 48,
.expected_a_public_size = 48,
.expected_ss_size = 24
- }, {
+ }
+};
#endif
+
+static const struct kpp_testvec ecdh_p256_tv_template[] = {
+ {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x28\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x26\x00" /* len */
"\x20\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x28" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x26" /* len */
"\x00\x20" /* key_size */
#endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
@@ -2350,25 +2350,21 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x08\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x06\x00" /* len */
"\x00\x00", /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x08" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x06" /* len */
"\x00\x00", /* key_size */
#endif
.b_secret =
#ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */
- "\x28\x00" /* len */
- "\x02\x00" /* curve_id */
+ "\x26\x00" /* len */
"\x20\x00" /* key_size */
#else
"\x00\x02" /* type */
- "\x00\x28" /* len */
- "\x00\x02" /* curve_id */
+ "\x00\x26" /* len */
"\x00\x20" /* key_size */
#endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
index 410b50b05e21..4f514e24c79b 100644
--- a/drivers/char/hw_random/ba431-rng.c
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -193,7 +193,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ba431);
- ret = hwrng_register(&ba431->rng);
+ ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret;
@@ -204,15 +204,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
return 0;
}
-static int ba431_trng_remove(struct platform_device *pdev)
-{
- struct ba431_trng *ba431 = platform_get_drvdata(pdev);
-
- hwrng_unregister(&ba431->rng);
-
- return 0;
-}
-
static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
{ /* sentinel */ }
@@ -225,7 +216,6 @@ static struct platform_driver ba431_trng_driver = {
.of_match_table = ba431_trng_dt_ids,
},
.probe = ba431_trng_probe,
- .remove = ba431_trng_remove,
};
module_platform_driver(ba431_trng_driver);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 1a7c43b43c6b..e7dd457e9b22 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
void __iomem *base;
bool mask_interrupts;
struct clk *clk;
+ struct reset_control *reset;
};
static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
int ret = 0;
u32 val;
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
if (priv->mask_interrupts) {
/* mask the interrupt */
@@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
/* disable rng hardware */
rng_writel(priv, 0, RNG_CTRL);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
struct bcm2835_rng_of_data {
@@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
/* Clock is optional on most platforms */
- priv->clk = devm_clk_get(dev, NULL);
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 7a293f2147a0..0efb37adbfa3 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -585,7 +585,7 @@ static int cctrng_probe(struct platform_device *pdev)
atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */
- rc = hwrng_register(&drvdata->rng);
+ rc = devm_hwrng_register(dev, &drvdata->rng);
if (rc) {
dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err;
@@ -618,8 +618,6 @@ static int cctrng_remove(struct platform_device *pdev)
dev_dbg(dev, "Releasing cctrng resources...\n");
- hwrng_unregister(&drvdata->rng);
-
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 5cc5fc504968..4380c23587be 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define RNG_REG_STATUS_RDY (1 << 0)
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index e8210c1715cf..99c8bd0859a1 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.read = pic32_rng_read;
- ret = hwrng_register(&priv->rng);
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret)
goto err_register;
@@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
{
struct pic32_rng *rng = platform_get_drvdata(pdev);
- hwrng_unregister(&rng->rng);
writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk);
return 0;
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 7bdab8c8a6a8..2a9fea72b2e0 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
int ret;
struct xiphera_trng *trng;
struct device *dev = &pdev->dev;
- struct resource *res;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 709905ec4680..ef224d5e4903 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
{
struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
- if (ss->reset)
- reset_control_assert(ss->reset);
+ reset_control_assert(ss->reset);
clk_disable_unprepare(ss->ssclk);
clk_disable_unprepare(ss->busclk);
@@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
goto err_enable;
}
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(ss->dev, "Cannot deassert reset control\n");
- goto err_enable;
- }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
}
return err;
@@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
+ if (IS_ERR(ss->reset))
+ return PTR_ERR(ss->reset);
+ if (!ss->reset)
dev_info(&pdev->dev, "no reset control found\n");
- ss->reset = NULL;
- }
/*
* Check that clock have the correct rates given in the datasheet
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index cfde9ee4356b..cd1baee424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, dma_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
goto err_iv;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 11cbcbc83a7b..0b9aa24a5edd 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -438,8 +438,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
kfree(pad);
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
- kfree(result);
theend:
+ kfree(result);
crypto_finalize_hash_request(engine, breq, err);
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 08a1473b2145..3191527928e4 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, dma_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n");
- return -EFAULT;
+ err = -EFAULT;
+ goto err_free;
}
dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@@ -167,6 +168,7 @@ err_iv:
memcpy(ctx->seed, d + dlen, ctx->slen);
}
memzero_explicit(d, todo);
+err_free:
kfree(d);
return err;
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 5bbeff433c8c..6e7ae896717c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
struct meson_dev *mc;
int err, i;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 9bd8e5167be3..515946c99394 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
* of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id
- * @n_sz : size in bytes of the n prime
* @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key.
*/
@@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
struct crypto_kpp *fallback;
const u8 *public_key;
unsigned int curve_id;
- size_t n_sz;
bool do_fallback;
};
@@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
- struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
@@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
goto free_work_data;
/* might want less than we've got */
- n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+ n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@@ -73,14 +70,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
-{
- if (curve_id == ECC_CURVE_NIST_P256)
- return ATMEL_ECC_NIST_P256_N_SIZE;
-
- return 0;
-}
-
/*
* A random private key is generated and stored in the device. The device
* returns the pair public key.
@@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
- if (!ctx->n_sz || params.key_size) {
+ if (params.key_size) {
/* fallback to ecdh software implementation */
ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len);
@@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
goto free_cmd;
ctx->do_fallback = false;
- ctx->curve_id = params.curve_id;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
@@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n");
@@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
return ATMEL_ECC_PUBKEY_SIZE;
}
-static struct kpp_alg atmel_ecdh = {
+static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret,
@@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
.max_size = atmel_ecdh_max_size,
.base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE,
@@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
&driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock);
- ret = crypto_register_kpp(&atmel_ecdh);
+ ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) {
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n",
- atmel_ecdh.base.cra_driver_name);
+ atmel_ecdh_nist_p256.base.cra_driver_name);
} else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
@@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
return -EBUSY;
}
- crypto_unregister_kpp(&atmel_ecdh);
+ crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 711b1acdd4e0..06ee42e8a245 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -10,7 +10,6 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/printk.h>
-#include <linux/version.h>
#include "cptpf.h"
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index cb9b4c4e371e..da3872c48308 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,6 +21,7 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
#include <linux/gfp.h>
+#include <linux/cpufeature.h>
#include <asm/smp.h>
@@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
struct sev_device *sev;
int ret = -ENOMEM;
+ if (!boot_cpu_has(X86_FEATURE_SEV)) {
+ dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
+ return 0;
+ }
+
sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
if (!sev)
goto e_err;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 843192666dc3..c45adb15ce8d 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -65,6 +65,7 @@ config CRYPTO_DEV_HISI_HPRE
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
+ select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 181c109b19f7..92892e373cd0 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -10,6 +10,14 @@
#define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0
+/*
+ * type used in qm sqc DW6.
+ * 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
+ * 1 - ECC algorithm in V3.
+ */
+#define HPRE_V2_ALG_TYPE 0
+#define HPRE_V3_ECC_ALG_TYPE 1
+
enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
@@ -75,6 +83,9 @@ enum hpre_alg_type {
HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5,
+ HPRE_ALG_ECC_MUL = 0xD,
+ /* shared by x25519 and x448, but x448 is not supported now */
+ HPRE_ALG_CURVE25519_MUL = 0x10,
};
struct hpre_sqe {
@@ -92,8 +103,8 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hisi_qp *hpre_create_qp(void);
-int hpre_algs_register(void);
-void hpre_algs_unregister(void);
+struct hisi_qp *hpre_create_qp(u8 type);
+int hpre_algs_register(struct hisi_qm *qm);
+void hpre_algs_unregister(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a87f9904087a..53068d2a19cf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
+#include <crypto/curve25519.h>
#include <crypto/dh.h>
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -36,6 +39,13 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* size in bytes of the n prime */
+#define HPRE_ECC_NIST_P192_N_SIZE 24
+#define HPRE_ECC_NIST_P256_N_SIZE 32
+
+/* size in bytes */
+#define HPRE_ECC_HW256_KSZ_B 32
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -61,14 +71,35 @@ struct hpre_dh_ctx {
* else if base if the counterpart public key we
* compute the shared secret
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ * low address: d--->n, please refer to Hisilicon HPRE UM
*/
- char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ char *xa_p;
dma_addr_t dma_xa_p;
char *g; /* m */
dma_addr_t dma_g;
};
+struct hpre_ecdh_ctx {
+ /* low address: p->a->k->b */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* low address: x->y */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
+struct hpre_curve25519_ctx {
+ /* low address: p->a->k */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* gx coordinate */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
@@ -80,7 +111,11 @@ struct hpre_ctx {
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
+ struct hpre_ecdh_ctx ecdh;
+ struct hpre_curve25519_ctx curve25519;
};
+ /* for ecc algorithms */
+ unsigned int curve_id;
};
struct hpre_asym_request {
@@ -91,6 +126,8 @@ struct hpre_asym_request {
union {
struct akcipher_request *rsa;
struct kpp_request *dh;
+ struct kpp_request *ecdh;
+ struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
}
}
-static struct hisi_qp *hpre_get_qp_and_start(void)
+static struct hisi_qp *hpre_get_qp_and_start(u8 type)
{
struct hisi_qp *qp;
int ret;
- qp = hpre_create_qp();
+ qp = hpre_create_qp(type);
if (!qp) {
pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
@@ -413,7 +450,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
-
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
@@ -422,11 +458,11 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
req->cb(ctx, resp);
}
-static int hpre_ctx_init(struct hpre_ctx *ctx)
+static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
- qp = hpre_get_qp_and_start();
+ qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
return PTR_ERR(qp);
@@ -674,7 +710,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx);
+ return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -1100,7 +1136,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- ret = hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
crypto_free_akcipher(ctx->rsa.soft_tfm);
@@ -1115,6 +1151,728 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
crypto_free_akcipher(ctx->rsa.soft_tfm);
}
+static void hpre_key_to_big_end(u8 *data, int len)
+{
+ int i, j;
+ u8 tmp;
+
+ for (i = 0; i < len / 2; i++) {
+ j = len - i - 1;
+ tmp = data[j];
+ data[j] = data[i];
+ data[i] = tmp;
+ }
+}
+
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
+ bool is_ecdh)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (is_ecdh && ctx->ecdh.p) {
+ /* ecdh: p->a->k->b */
+ memzero_explicit(ctx->ecdh.p + shift, sz);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ } else if (!is_ecdh && ctx->curve25519.p) {
+ /* curve25519: p->a->k */
+ memzero_explicit(ctx->curve25519.p + shift, sz);
+ dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
+ ctx->curve25519.dma_p);
+ ctx->curve25519.p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static unsigned int hpre_ecdh_supported_curve(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_HW256_KSZ_B;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
+{
+ unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
+ u8 i = 0;
+
+ while (i < ndigits - 1) {
+ memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
+ i++;
+ }
+
+ memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
+ hpre_key_to_big_end((u8 *)addr, cur_sz);
+}
+
+static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
+ unsigned int cur_sz)
+{
+ unsigned int shifta = ctx->key_sz << 1;
+ unsigned int shiftb = ctx->key_sz << 2;
+ void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
+ void *a = ctx->ecdh.p + shifta - cur_sz;
+ void *b = ctx->ecdh.p + shiftb - cur_sz;
+ void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
+ void *y = ctx->ecdh.g + shifta - cur_sz;
+ const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
+ char *n;
+
+ if (unlikely(!curve))
+ return -EINVAL;
+
+ n = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
+ fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
+ fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
+ fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
+ fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
+ fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
+
+ if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
+ kfree(n);
+ return -EINVAL;
+ }
+
+ kfree(n);
+ return 0;
+}
+
+static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ return HPRE_ECC_NIST_P192_N_SIZE;
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_NIST_P256_N_SIZE;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, shift, curve_sz;
+ int ret;
+
+ ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
+ if (!ctx->key_sz)
+ return -EINVAL;
+
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz || params->key_size > curve_sz)
+ return -EINVAL;
+
+ sz = ctx->key_sz;
+
+ if (!ctx->ecdh.p) {
+ ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
+ GFP_KERNEL);
+ if (!ctx->ecdh.p)
+ return -ENOMEM;
+ }
+
+ shift = sz << 2;
+ ctx->ecdh.g = ctx->ecdh.p + shift;
+ ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
+
+ ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
+ if (ret) {
+ dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++)
+ if (key[i])
+ return false;
+
+ return true;
+}
+
+static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, sz_shift;
+ struct ecdh params;
+ int ret;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(dev, "failed to decode ecdh key!\n");
+ return -EINVAL;
+ }
+
+ if (hpre_key_is_zero(params.key, params.key_size)) {
+ dev_err(dev, "Invalid hpre key!\n");
+ return -EINVAL;
+ }
+
+ hpre_ecc_clear_ctx(ctx, false, true);
+
+ ret = hpre_ecdh_set_param(ctx, &params);
+ if (ret < 0) {
+ dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
+ return ret;
+ }
+
+ sz = ctx->key_sz;
+ sz_shift = (sz << 1) + sz - params.key_size;
+ memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
+
+ return 0;
+}
+
+static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+ if (unlikely(!dma))
+ return;
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+ if (unlikely(!dma))
+ return;
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
+}
+
+static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ char *p;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.ecdh;
+ areq->dst_len = ctx->key_sz << 1;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ p = sg_virt(areq->dst);
+ memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+ memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (req->dst_len < ctx->key_sz << 1) {
+ req->dst_len = ctx->key_sz << 1;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_ecdh_cb;
+ h_req->areq.ecdh = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->ecdh.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int tmpshift;
+ dma_addr_t dma = 0;
+ void *ptr;
+ int shift;
+
+ /* Src_data include gx and gy. */
+ shift = ctx->key_sz - (len >> 1);
+ if (unlikely(shift < 0))
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ tmpshift = ctx->key_sz << 1;
+ scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
+ memcpy(ptr + shift, ptr + tmpshift, len >> 1);
+ memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_ecdh_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->ecdh.dma_g);
+ }
+
+ ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ /* max size is the pub_key_size, include x and y */
+ return ctx->key_sz << 1;
+}
+
+static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, true);
+}
+
+static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ u8 secret[CURVE25519_KEY_SIZE] = { 0 };
+ unsigned int sz = ctx->key_sz;
+ const struct ecc_curve *curve;
+ unsigned int shift = sz << 1;
+ void *p;
+
+ /*
+ * The key from 'buf' is in little-endian, we should preprocess it as
+ * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
+ * then convert it to big endian. Only in this way, the result can be
+ * the same as the software curve-25519 that exists in crypto.
+ */
+ memcpy(secret, buf, len);
+ curve25519_clamp_secret(secret);
+ hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
+
+ p = ctx->curve25519.p + sz - len;
+
+ curve = ecc_get_curve25519();
+
+ /* fill curve parameters */
+ fill_curve_param(p, curve->p, len, curve->g.ndigits);
+ fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
+ memcpy(p + shift, secret, len);
+ fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
+ memzero_explicit(secret, CURVE25519_KEY_SIZE);
+}
+
+static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ /* p->a->k->gx */
+ if (!ctx->curve25519.p) {
+ ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
+ &ctx->curve25519.dma_p,
+ GFP_KERNEL);
+ if (!ctx->curve25519.p)
+ return -ENOMEM;
+ }
+
+ ctx->curve25519.g = ctx->curve25519.p + shift + sz;
+ ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
+
+ hpre_curve25519_fill_curve(ctx, buf, len);
+
+ return 0;
+}
+
+static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ int ret = -EINVAL;
+
+ if (len != CURVE25519_KEY_SIZE ||
+ !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "key is null or key len is not 32bytes!\n");
+ return ret;
+ }
+
+ /* Free old secret if any */
+ hpre_ecc_clear_ctx(ctx, false, false);
+
+ ctx->key_sz = CURVE25519_KEY_SIZE;
+ ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
+ if (ret) {
+ dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
+ hpre_ecc_clear_ctx(ctx, false, false);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+ if (unlikely(!dma))
+ return;
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+ if (unlikely(!dma))
+ return;
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
+}
+
+static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.curve25519;
+ areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (unlikely(req->dst_len < ctx->key_sz)) {
+ req->dst_len = ctx->key_sz;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_curve25519_cb;
+ h_req->areq.curve25519 = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->curve25519.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ u8 p[CURVE25519_KEY_SIZE] = { 0 };
+ const struct ecc_curve *curve;
+ dma_addr_t dma = 0;
+ u8 *ptr;
+
+ if (len != CURVE25519_KEY_SIZE) {
+ dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
+ return -EINVAL;
+ }
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(ptr, data, 0, len, 0);
+
+ if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "gx is null!\n");
+ goto err;
+ }
+
+ /*
+ * Src_data(gx) is in little-endian order, MSB in the final byte should
+ * be masked as discribed in RFC7748, then transform it to big-endian
+ * form, then hisi_hpre can use the data.
+ */
+ ptr[31] &= 0x7f;
+ hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
+
+ curve = ecc_get_curve25519();
+
+ fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
+ if (memcmp(ptr, p, ctx->key_sz) >= 0) {
+ dev_err(dev, "gx is out of p!\n");
+ goto err;
+ }
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+
+err:
+ dma_free_coherent(dev, ctx->key_sz, ptr, dma);
+ return -EINVAL;
+}
+
+static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (!data || !sg_is_last(data) || len != ctx->key_sz) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_curve25519_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n",
+ ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->curve25519.dma_g);
+ }
+
+ ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, false);
+}
+
static struct akcipher_alg rsa = {
.sign = hpre_rsa_dec,
.verify = hpre_rsa_enc,
@@ -1154,7 +1912,82 @@ static struct kpp_alg dh = {
};
#endif
-int hpre_algs_register(void)
+static struct kpp_alg ecdh_nist_p192 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg curve25519_alg = {
+ .set_secret = hpre_curve25519_set_secret,
+ .generate_public_key = hpre_curve25519_compute_value,
+ .compute_shared_secret = hpre_curve25519_compute_value,
+ .max_size = hpre_curve25519_max_size,
+ .init = hpre_curve25519_init_tfm,
+ .exit = hpre_curve25519_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "curve25519",
+ .cra_driver_name = "hpre-curve25519",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+
+static int hpre_register_ecdh(void)
+{
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p256);
+ if (ret) {
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_unregister_ecdh(void)
+{
+ crypto_unregister_kpp(&ecdh_nist_p256);
+ crypto_unregister_kpp(&ecdh_nist_p192);
+}
+
+int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
@@ -1164,17 +1997,41 @@ int hpre_algs_register(void)
return ret;
#ifdef CONFIG_CRYPTO_DH
ret = crypto_register_kpp(&dh);
- if (ret)
+ if (ret) {
crypto_unregister_akcipher(&rsa);
+ return ret;
+ }
#endif
+ if (qm->ver >= QM_HW_V3) {
+ ret = hpre_register_ecdh();
+ if (ret)
+ goto reg_err;
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret) {
+ hpre_unregister_ecdh();
+ goto reg_err;
+ }
+ }
+ return 0;
+
+reg_err:
+#ifdef CONFIG_CRYPTO_DH
+ crypto_unregister_kpp(&dh);
+#endif
+ crypto_unregister_akcipher(&rsa);
return ret;
}
-void hpre_algs_unregister(void)
+void hpre_algs_unregister(struct hisi_qm *qm)
{
- crypto_unregister_akcipher(&rsa);
+ if (qm->ver >= QM_HW_V3) {
+ crypto_unregister_kpp(&curve25519_alg);
+ hpre_unregister_ecdh();
+ }
+
#ifdef CONFIG_CRYPTO_DH
crypto_unregister_kpp(&dh);
#endif
+ crypto_unregister_akcipher(&rsa);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e7a2c70eb9cf..87e8f4d60474 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -226,13 +226,20 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-struct hisi_qp *hpre_create_qp(void)
+struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
struct hisi_qp *qp = NULL;
int ret;
- ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
+ return NULL;
+
+ /*
+ * type: 0 - RSA/DH. algorithm supported in V2,
+ * 1 - ECC algorithm in V3.
+ */
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
if (!ret)
return qp;
@@ -1075,4 +1082,5 @@ module_exit(hpre_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 13cb4216561a..bc231742ad36 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -4084,7 +4084,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (flag) {
- ret = qm_list->register_to_crypto();
+ ret = qm_list->register_to_crypto(qm);
if (ret) {
mutex_lock(&qm_list->lock);
list_del(&qm->list);
@@ -4115,7 +4115,7 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (list_empty(&qm_list->list))
- qm_list->unregister_from_crypto();
+ qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 54967c6b9c78..f91110fcf6a4 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -199,8 +199,8 @@ struct hisi_qm_err_ini {
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
- int (*register_to_crypto)(void);
- void (*unregister_from_crypto)(void);
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
};
struct hisi_qm {
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 08491912afd5..17ddb20ad7a1 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -183,6 +183,6 @@ struct sec_dev {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 2eaa516b3231..f8355140cd46 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1634,7 +1634,7 @@ static struct aead_alg sec_aeads[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
};
-int sec_register_to_crypto(void)
+int sec_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -1651,7 +1651,7 @@ int sec_register_to_crypto(void)
return ret;
}
-void sec_unregister_from_crypto(void)
+void sec_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index b2786e17d8fe..0e933e7858e7 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -211,6 +211,6 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 92397f993e23..9ed74611f722 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -62,6 +62,6 @@ struct hisi_zip_sqe {
};
int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
-int hisi_zip_register_to_crypto(void);
-void hisi_zip_unregister_from_crypto(void);
+int hisi_zip_register_to_crypto(struct hisi_qm *qm);
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 08b4660b014c..41f69662024a 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -665,7 +665,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(void)
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -684,7 +684,7 @@ int hisi_zip_register_to_crypto(void)
return ret;
}
-void hisi_zip_unregister_from_crypto(void)
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
index b6b25d994af3..2ef312866338 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
@@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
/* Initialize crypto engine */
aes_dev->engine = crypto_engine_alloc_init(dev, true);
- if (!aes_dev->engine)
+ if (!aes_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(aes_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
index c4b97b4160e9..322c51a6936f 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Initialize crypto engine */
hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
- if (!hcu_dev->engine)
+ if (!hcu_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(hcu_dev->engine);
if (rc) {
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 13c65deda8e9..446f611726df 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
}
if (!ct_842 || !ct_gzip) {
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 8b090b7ae8c6..a1b77bd7a894 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -169,7 +169,7 @@ out:
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
- * Function sends a messge from the PF to a VF
+ * Function sends a message from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 2c98fb63f7b7..e85bd62d134a 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -8,7 +8,7 @@
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends an init messge from the VF to a PF
+ * Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init);
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends a shutdown messge from the VF to a PF
+ * Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index ff78c73c47e3..f998ed58457c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp = 0;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!bufl))
return -ENOMEM;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
+ for_each_sg(sgl, sg, n, i)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err_in;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
+
bufers = buflout->bufers;
+ for_each_sg(sglout, sg, n, i)
+ bufers[i].addr = DMA_MAPPING_ERROR;
+
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +823,6 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
- struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req; // keep at the end
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
{
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+ /* Set data unit size to cryptlen. Anything else causes
+ * crypto engine to return back incorrect results.
+ */
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
- qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+ qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
- qce_write(qce, REG_SEG_SIZE, totallen);
+ qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
}
#endif
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
- return qce_setup_regs_skcipher(async_req, totallen, offset);
+ return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
- return qce_setup_regs_ahash(async_req, totallen, offset);
+ return qce_setup_regs_ahash(async_req);
#endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
#include "core.h"
#include "sha.h"
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING 64
-#define SHA_PADDING_MASK (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+ u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+ __be32 byte_count[2];
+ unsigned int pending_buflen;
+ unsigned int flags;
+ u64 count;
+ bool first_blk;
+};
static LIST_HEAD(ahash_algs);
@@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
static int qce_ahash_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- struct sha1_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buffer, rctx->buf, blocksize);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- struct sha256_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buf, rctx->buf, blocksize);
- } else {
- return -EINVAL;
- }
+ struct qce_sha_saved_state *export_state = out;
- return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
- const u32 *state, const u8 *buffer, bool hmac)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize;
- u64 count = in_count;
-
- blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
- rctx->count = in_count;
- memcpy(rctx->buf, buffer, blocksize);
-
- if (in_count <= blocksize) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * For HMAC, there is a hardware padding done when first block
- * is set. Therefore the byte_count must be incremened by 64
- * after the first block operation.
- */
- if (hmac)
- count += SHA_PADDING;
- }
-
- rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
- rctx->byte_count[1] = (__force __be32)(count >> 32);
- qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
- digestsize);
- rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+ memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+ memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest));
+ export_state->byte_count[0] = rctx->byte_count[0];
+ export_state->byte_count[1] = rctx->byte_count[1];
+ export_state->pending_buflen = rctx->buflen;
+ export_state->count = rctx->count;
+ export_state->first_blk = rctx->first_blk;
+ export_state->flags = rctx->flags;
return 0;
}
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx;
- unsigned long flags;
- bool hmac;
- int ret;
-
- ret = qce_ahash_init(req);
- if (ret)
- return ret;
-
- rctx = ahash_request_ctx(req);
- flags = rctx->flags;
- hmac = IS_SHA_HMAC(flags);
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- const struct sha1_state *state = in;
-
- ret = qce_import_common(req, state->count, state->state,
- state->buffer, hmac);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- const struct sha256_state *state = in;
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ const struct qce_sha_saved_state *import_state = in;
- ret = qce_import_common(req, state->count, state->state,
- state->buf, hmac);
- }
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->count = import_state->count;
+ rctx->buflen = import_state->pending_buflen;
+ rctx->first_blk = import_state->first_blk;
+ rctx->flags = import_state->flags;
+ rctx->byte_count[0] = import_state->byte_count[0];
+ rctx->byte_count[1] = import_state->byte_count[1];
+ memcpy(rctx->buf, import_state->pending_buf, rctx->buflen);
+ memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest));
- return ret;
+ return 0;
}
static int qce_ahash_update(struct ahash_request *req)
@@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+ /*
+ * At this point, there is more than one block size of data. If
+ * the available data to transfer is exactly a multiple of block
+ * size, save the last block to be transferred in qce_ahash_final
+ * (with the last block bit set) if this is indeed the end of data
+ * stream. If not this saved block will be transferred as part of
+ * next update. If this block is not held back and if this is
+ * indeed the end of data stream, the digest obtained will be wrong
+ * since qce_ahash_final will see that rctx->buflen is 0 and return
+ * doing nothing which in turn means that a digest will not be
+ * copied to the destination result buffer. qce_ahash_final cannot
+ * be made to alter this behavior and allowed to proceed if
+ * rctx->buflen is 0 because the crypto engine BAM does not allow
+ * for zero length transfers.
+ */
+ if (!hash_later)
+ hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
@@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
{
@@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
};
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..c0a0d8c4fce1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+ unsigned int __keylen;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+ /*
+ * AES XTS key1 = key2 not supported by crypto engine.
+ * Revisit to request a fallback cipher in this case.
+ */
+ if (IS_XTS(flags)) {
+ __keylen = keylen >> 1;
+ if (!memcmp(key, key + __keylen, __keylen))
+ return -ENOKEY;
+ } else {
+ __keylen = keylen;
+ }
+
+ switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+ case AES_KEYSIZE_192:
+ break;
+ default:
+ return -EINVAL;
}
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ u32 _key[6];
int err;
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Return -ENOKEY in case any two keys
+ * are the same. Revisit to see if a fallback cipher
+ * is needed to handle this condition.
+ */
+ memcpy(_key, key, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
@@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
@@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
- * is not a multiple of it; pass such requests to the fallback
+ /* CE does not handle 0 length messages */
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen, blocksize))
+ return -EINVAL;
+
+ /*
+ * Conditions for requesting a fallback cipher
+ * AES-192 (not supported by crypto engine (CE))
+ * AES-XTS request with len <= 512 byte (not recommended to use CE)
+ * AES-XTS request with len > QCE_SECTOR_SIZE and
+ * is not a multiple of it.(Revisit this condition to check if it is
+ * needed in all versions of CE)
*/
if (IS_AES(rctx->flags) &&
- (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
- (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+ (req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))))) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
@@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name = "ecb(aes)",
.drv_name = "ecb-aes-qce",
.blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 682c8a450a57..8ed08130196f 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -401,7 +401,7 @@ static const struct samsung_aes_variant exynos_aes_data = {
static const struct samsung_aes_variant exynos5433_slim_aes_data = {
.aes_offset = 0x400,
.hash_offset = 0x800,
- .clk_names = { "pclk", "aclk", },
+ .clk_names = { "aclk", "pclk", },
};
static const struct of_device_id s5p_sss_dt_match[] = {
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index fcde59c65a81..cb3d6b1c655d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
* crypto_free_acomp() -- free ACOMPRESS tfm handle
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_acomp(struct crypto_acomp *tfm)
{
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index fcc12c593ef8..e728469c4ccc 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_aead(struct crypto_aead *tfm)
{
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 1d3aa252caba..5764b46bd1ec 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
* crypto_free_akcipher() - free AKCIPHER tfm handle
*
* @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
{
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
new file mode 100644
index 000000000000..70964781eb68
--- /dev/null
+++ b/include/crypto/ecc_curve.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon */
+
+#ifndef _CRYTO_ECC_CURVE_H
+#define _CRYTO_ECC_CURVE_H
+
+#include <linux/types.h>
+
+/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
+ * ecc_get_curve() - get elliptic curve;
+ * @curve_id: Curves IDs:
+ * defined in 'include/crypto/ecdh.h';
+ *
+ * Returns curve if get curve succssful, NULL otherwise
+ */
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id);
+
+/**
+ * ecc_get_curve25519() - get curve25519 curve;
+ *
+ * Returns curve25519
+ */
+const struct ecc_curve *ecc_get_curve25519(void);
+
+#endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a5b805b5526d..deaaa4822174 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -29,12 +29,10 @@
/**
* struct ecdh - define an ECDH private key
*
- * @curve_id: ECC curve the key is based on.
* @key: Private ECDH key
* @key_size: Size of the private ECDH key
*/
struct ecdh {
- unsigned short curve_id;
char *key;
unsigned short key_size;
};
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 13f8a6a54ca8..b2bc1e46e86a 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
/**
* crypto_free_ahash() - zeroize and free the ahash handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
@@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
/**
* crypto_free_shash() - zeroize and free the message digest handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 88b591215d5c..cccceadc164b 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
* crypto_free_kpp() - free KPP tfm handle
*
* @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_kpp(struct crypto_kpp *tfm)
{
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 8b4b844b4eef..17bb3673d3c1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
/**
* crypto_free_rng() - zeroize and free RNG handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 6a733b171a5d..ef0fc9ed4342 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c
index 3226fe02e875..989401f116e9 100644
--- a/net/bluetooth/ecdh_helper.c
+++ b/net/bluetooth/ecdh_helper.c
@@ -126,8 +126,6 @@ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32])
int err;
struct ecdh p = {0};
- p.curve_id = ECC_CURVE_NIST_P256;
-
if (private_key) {
tmp = kmalloc(32, GFP_KERNEL);
if (!tmp)
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
index f71c6fa65fb3..f49604d44b87 100644
--- a/net/bluetooth/selftest.c
+++ b/net/bluetooth/selftest.c
@@ -205,7 +205,7 @@ static int __init test_ecdh(void)
calltime = ktime_get();
- tfm = crypto_alloc_kpp("ecdh", 0, 0);
+ tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
if (IS_ERR(tfm)) {
BT_ERR("Unable to create ECDH crypto context");
err = PTR_ERR(tfm);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index efc19f98b959..393696f2312a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1389,7 +1389,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
goto zfree_smp;
}
- smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+ smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
if (IS_ERR(smp->tfm_ecdh)) {
bt_dev_err(hcon->hdev, "Unable to create ECDH crypto context");
goto free_shash;
@@ -3296,7 +3296,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
return ERR_CAST(tfm_cmac);
}
- tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+ tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
if (IS_ERR(tfm_ecdh)) {
bt_dev_err(hdev, "Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac);
@@ -3821,7 +3821,7 @@ int __init bt_selftest_smp(void)
return PTR_ERR(tfm_cmac);
}
- tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+ tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
if (IS_ERR(tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac);