aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c9
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S8
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c21
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S8
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c39
-rw-r--r--arch/arm64/crypto/sha256-glue.c26
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S8
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c26
-rw-r--r--arch/arm64/crypto/sha512-glue.c12
9 files changed, 86 insertions, 71 deletions
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index cd882c35d925..e4a0b463f080 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -34,6 +34,14 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
return 0;
}
+static int nhpoly1305_neon_digest(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen, u8 *out)
+{
+ return crypto_nhpoly1305_init(desc) ?:
+ nhpoly1305_neon_update(desc, src, srclen) ?:
+ crypto_nhpoly1305_final(desc, out);
+}
+
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-neon",
@@ -44,6 +52,7 @@ static struct shash_alg nhpoly1305_alg = {
.init = crypto_nhpoly1305_init,
.update = nhpoly1305_neon_update,
.final = crypto_nhpoly1305_final,
+ .digest = nhpoly1305_neon_digest,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 889ca0f8972b..9b1f2d82a6fe 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -62,10 +62,10 @@
.endm
/*
- * int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- * int blocks)
+ * int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ * int blocks)
*/
-SYM_FUNC_START(sha1_ce_transform)
+SYM_FUNC_START(__sha1_ce_transform)
/* load round constants */
loadrc k0.4s, 0x5a827999, w6
loadrc k1.4s, 0x6ed9eba1, w6
@@ -147,4 +147,4 @@ CPU_LE( rev32 v11.16b, v11.16b )
str dgb, [x0, #16]
mov w0, w2
ret
-SYM_FUNC_END(sha1_ce_transform)
+SYM_FUNC_END(__sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 71fa4f1122d7..1dd93e1fcb39 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -29,18 +29,19 @@ struct sha1_ce_state {
extern const u32 sha1_ce_offsetof_count;
extern const u32 sha1_ce_offsetof_finalize;
-asmlinkage int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ int blocks);
-static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
- int blocks)
+static void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks)
{
while (blocks) {
int rem;
kernel_neon_begin();
- rem = sha1_ce_transform(container_of(sst, struct sha1_ce_state,
- sst), src, blocks);
+ rem = __sha1_ce_transform(container_of(sst,
+ struct sha1_ce_state,
+ sst), src, blocks);
kernel_neon_end();
src += (blocks - rem) * SHA1_BLOCK_SIZE;
blocks = rem;
@@ -59,7 +60,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
return crypto_sha1_update(desc, data, len);
sctx->finalize = 0;
- sha1_base_do_update(desc, data, len, __sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
return 0;
}
@@ -79,9 +80,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
*/
sctx->finalize = finalize;
- sha1_base_do_update(desc, data, len, __sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, __sha1_ce_transform);
+ sha1_base_do_finalize(desc, sha1_ce_transform);
return sha1_base_finish(desc, out);
}
@@ -93,7 +94,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
return crypto_sha1_finup(desc, NULL, 0, out);
sctx->finalize = 0;
- sha1_base_do_finalize(desc, __sha1_ce_transform);
+ sha1_base_do_finalize(desc, sha1_ce_transform);
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 491179922f49..fce84d88ddb2 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -71,11 +71,11 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- * int blocks)
+ * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ * int blocks)
*/
.text
-SYM_FUNC_START(sha2_ce_transform)
+SYM_FUNC_START(__sha256_ce_transform)
/* load round constants */
adr_l x8, .Lsha2_rcon
ld1 { v0.4s- v3.4s}, [x8], #64
@@ -154,4 +154,4 @@ CPU_LE( rev32 v19.16b, v19.16b )
3: st1 {dgav.4s, dgbv.4s}, [x0]
mov w0, w2
ret
-SYM_FUNC_END(sha2_ce_transform)
+SYM_FUNC_END(__sha256_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index c57a6119fefc..0a44d2e7ee1f 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -30,18 +30,19 @@ struct sha256_ce_state {
extern const u32 sha256_ce_offsetof_count;
extern const u32 sha256_ce_offsetof_finalize;
-asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ int blocks);
-static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks)
{
while (blocks) {
int rem;
kernel_neon_begin();
- rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
- sst), src, blocks);
+ rem = __sha256_ce_transform(container_of(sst,
+ struct sha256_ce_state,
+ sst), src, blocks);
kernel_neon_end();
src += (blocks - rem) * SHA256_BLOCK_SIZE;
blocks = rem;
@@ -55,8 +56,8 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
-static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
- int blocks)
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
{
sha256_block_data_order(sst->state, src, blocks);
}
@@ -68,10 +69,10 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
+ sha256_arm64_transform);
sctx->finalize = 0;
- sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, sha256_ce_transform);
return 0;
}
@@ -85,8 +86,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
- sha256_base_do_finalize(desc, __sha256_block_data_order);
+ sha256_arm64_transform);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
return sha256_base_finish(desc, out);
}
@@ -96,9 +97,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
*/
sctx->finalize = finalize;
- sha256_base_do_update(desc, data, len, __sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, sha256_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc, __sha2_ce_transform);
+ sha256_base_do_finalize(desc, sha256_ce_transform);
return sha256_base_finish(desc, out);
}
@@ -107,15 +108,22 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc, __sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
return sha256_base_finish(desc, out);
}
sctx->finalize = 0;
- sha256_base_do_finalize(desc, __sha2_ce_transform);
+ sha256_base_do_finalize(desc, sha256_ce_transform);
return sha256_base_finish(desc, out);
}
+static int sha256_ce_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha256_base_init(desc);
+ return sha256_ce_finup(desc, data, len, out);
+}
+
static int sha256_ce_export(struct shash_desc *desc, void *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
@@ -155,6 +163,7 @@ static struct shash_alg algs[] = { {
.update = sha256_ce_update,
.final = sha256_ce_final,
.finup = sha256_ce_finup,
+ .digest = sha256_ce_digest,
.export = sha256_ce_export,
.import = sha256_ce_import,
.descsize = sizeof(struct sha256_ce_state),
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 9b5c86e07a9a..35356987cc1e 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,8 +27,8 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);
-static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
- int blocks)
+static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
{
sha256_block_data_order(sst->state, src, blocks);
}
@@ -36,8 +36,8 @@ static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
-static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
- int blocks)
+static void sha256_neon_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
{
sha256_block_neon(sst->state, src, blocks);
}
@@ -45,17 +45,15 @@ static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
+ return sha256_base_do_update(desc, data, len, sha256_arm64_transform);
}
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
- sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
- sha256_base_do_finalize(desc, __sha256_block_data_order);
+ sha256_base_do_update(desc, data, len, sha256_arm64_transform);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
return sha256_base_finish(desc, out);
}
@@ -98,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
+ sha256_arm64_transform);
while (len > 0) {
unsigned int chunk = len;
@@ -114,7 +112,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, sha256_neon_transform);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -128,13 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- __sha256_block_data_order);
- sha256_base_do_finalize(desc, __sha256_block_data_order);
+ sha256_arm64_transform);
+ sha256_base_do_finalize(desc, sha256_arm64_transform);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc, __sha256_block_neon);
+ sha256_base_do_finalize(desc, sha256_neon_transform);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index b6a3a36e15f5..91ef68b15fcc 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -102,11 +102,11 @@
.endm
/*
- * void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- * int blocks)
+ * int __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ * int blocks)
*/
.text
-SYM_FUNC_START(sha512_ce_transform)
+SYM_FUNC_START(__sha512_ce_transform)
/* load state */
ld1 {v8.2d-v11.2d}, [x0]
@@ -203,4 +203,4 @@ CPU_LE( rev64 v19.16b, v19.16b )
3: st1 {v8.2d-v11.2d}, [x0]
mov w0, w2
ret
-SYM_FUNC_END(sha512_ce_transform)
+SYM_FUNC_END(__sha512_ce_transform)
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 94cb7580deb7..f3431fc62315 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -26,27 +26,27 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage int sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- int blocks);
+asmlinkage int __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
-static void __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- int blocks)
+static void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
while (blocks) {
int rem;
kernel_neon_begin();
- rem = sha512_ce_transform(sst, src, blocks);
+ rem = __sha512_ce_transform(sst, src, blocks);
kernel_neon_end();
src += (blocks - rem) * SHA512_BLOCK_SIZE;
blocks = rem;
}
}
-static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
- int blocks)
+static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
sha512_block_data_order(sst->state, src, blocks);
}
@@ -54,8 +54,8 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
- : __sha512_block_data_order;
+ sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
+ : sha512_arm64_transform;
sha512_base_do_update(desc, data, len, fn);
return 0;
@@ -64,8 +64,8 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
- : __sha512_block_data_order;
+ sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
+ : sha512_arm64_transform;
sha512_base_do_update(desc, data, len, fn);
sha512_base_do_finalize(desc, fn);
@@ -74,8 +74,8 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
- sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
- : __sha512_block_data_order;
+ sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform
+ : sha512_arm64_transform;
sha512_base_do_finalize(desc, fn);
return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 2acff1c7df5d..62f129dea83d 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -23,8 +23,8 @@ asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha512_block_data_order);
-static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
- int blocks)
+static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
sha512_block_data_order(sst->state, src, blocks);
}
@@ -32,17 +32,15 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha512_base_do_update(desc, data, len,
- __sha512_block_data_order);
+ return sha512_base_do_update(desc, data, len, sha512_arm64_transform);
}
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
- sha512_base_do_update(desc, data, len,
- __sha512_block_data_order);
- sha512_base_do_finalize(desc, __sha512_block_data_order);
+ sha512_base_do_update(desc, data, len, sha512_arm64_transform);
+ sha512_base_do_finalize(desc, sha512_arm64_transform);
return sha512_base_finish(desc, out);
}