aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/aes-neonbs-core.S
diff options
context:
space:
mode:
authorGravatar Mark Brown <broonie@kernel.org> 2019-12-13 15:49:10 +0000
committerGravatar Herbert Xu <herbert@gondor.apana.org.au> 2019-12-20 14:58:35 +0800
commit0e89640b640d7f726bcbf6903c78257a28e56f3c (patch)
tree89a8f2498ce7b5b2bb92989a7419082e79d00647 /arch/arm64/crypto/aes-neonbs-core.S
parentcrypto: atmel-aes - Fix CTR counter overflow when multiple fragments (diff)
downloadlinux-0e89640b640d7f726bcbf6903c78257a28e56f3c.tar.gz
linux-0e89640b640d7f726bcbf6903c78257a28e56f3c.tar.bz2
linux-0e89640b640d7f726bcbf6903c78257a28e56f3c.zip
crypto: arm64 - Use modern annotations for assembly functions
In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC and also add a new annotation for static functions which previously had no ENTRY equivalent. Update the annotations in the crypto code to the new macros. There are a small number of files imported from OpenSSL where the assembly is generated using perl programs, these are not currently annotated at all and have not been modified. Signed-off-by: Mark Brown <broonie@kernel.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64/crypto/aes-neonbs-core.S')
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S40
1 files changed, 20 insertions, 20 deletions
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index 65982039fa36..b357164379f6 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f
/*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/
-ENTRY(aesbs_convert_key)
+SYM_FUNC_START(aesbs_convert_key)
ld1 {v7.4s}, [x1], #16 // load round 0 key
ld1 {v17.4s}, [x1], #16 // load round 1 key
@@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key)
eor v17.16b, v17.16b, v7.16b
str q17, [x0]
ret
-ENDPROC(aesbs_convert_key)
+SYM_FUNC_END(aesbs_convert_key)
.align 4
-aesbs_encrypt8:
+SYM_FUNC_START_LOCAL(aesbs_encrypt8)
ldr q9, [bskey], #16 // round 0 key
ldr q8, M0SR
ldr q24, SR
@@ -488,10 +488,10 @@ aesbs_encrypt8:
eor v2.16b, v2.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
-ENDPROC(aesbs_encrypt8)
+SYM_FUNC_END(aesbs_encrypt8)
.align 4
-aesbs_decrypt8:
+SYM_FUNC_START_LOCAL(aesbs_decrypt8)
lsl x9, rounds, #7
add bskey, bskey, x9
@@ -553,7 +553,7 @@ aesbs_decrypt8:
eor v3.16b, v3.16b, v12.16b
eor v5.16b, v5.16b, v12.16b
ret
-ENDPROC(aesbs_decrypt8)
+SYM_FUNC_END(aesbs_decrypt8)
/*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
@@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8)
.endm
.align 4
-ENTRY(aesbs_ecb_encrypt)
+SYM_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
-ENDPROC(aesbs_ecb_encrypt)
+SYM_FUNC_END(aesbs_ecb_encrypt)
.align 4
-ENTRY(aesbs_ecb_decrypt)
+SYM_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
-ENDPROC(aesbs_ecb_decrypt)
+SYM_FUNC_END(aesbs_ecb_decrypt)
/*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
.align 4
-ENTRY(aesbs_cbc_decrypt)
+SYM_FUNC_START(aesbs_cbc_decrypt)
frame_push 6
mov x19, x0
@@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt)
2: frame_pop
ret
-ENDPROC(aesbs_cbc_decrypt)
+SYM_FUNC_END(aesbs_cbc_decrypt)
.macro next_tweak, out, in, const, tmp
sshr \tmp\().2d, \in\().2d, #63
@@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[])
*/
-__xts_crypt8:
+SYM_FUNC_START_LOCAL(__xts_crypt8)
mov x6, #1
lsl x6, x6, x23
subs w23, w23, #8
@@ -789,7 +789,7 @@ __xts_crypt8:
0: mov bskey, x21
mov rounds, x22
br x7
-ENDPROC(__xts_crypt8)
+SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
frame_push 6, 64
@@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8)
ret
.endm
-ENTRY(aesbs_xts_encrypt)
+SYM_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
-ENDPROC(aesbs_xts_encrypt)
+SYM_FUNC_END(aesbs_xts_encrypt)
-ENTRY(aesbs_xts_decrypt)
+SYM_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
-ENDPROC(aesbs_xts_decrypt)
+SYM_FUNC_END(aesbs_xts_decrypt)
.macro next_ctr, v
mov \v\().d[1], x8
@@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt)
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[], u8 final[])
*/
-ENTRY(aesbs_ctr_encrypt)
+SYM_FUNC_START(aesbs_ctr_encrypt)
frame_push 8
mov x19, x0
@@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 )
7: cbz x25, 8b
st1 {v5.16b}, [x25]
b 8b
-ENDPROC(aesbs_ctr_encrypt)
+SYM_FUNC_END(aesbs_ctr_encrypt)