mirror of https://github.com/wolfSSL/wolfssl.git
ARM32/Thumb2 ASM: fix WOLFSSL_NO_VAR_ASSIGN_REG
Thumb2 needed constants defined even with no register assignments. ARM32 needed support added fo rnot having registers assigned to variables.pull/8590/head
parent
61cdcd71e6
commit
cfab666369
|
@ -3850,13 +3850,13 @@ L_AES_ECB_decrypt_end:
|
||||||
.type AES_CBC_decrypt, %function
|
.type AES_CBC_decrypt, %function
|
||||||
AES_CBC_decrypt:
|
AES_CBC_decrypt:
|
||||||
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
|
push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
|
||||||
ldr r8, [sp, #36]
|
|
||||||
ldr r4, [sp, #40]
|
|
||||||
mov lr, r0
|
mov lr, r0
|
||||||
adr r0, L_AES_ARM32_td_ecb
|
adr r0, L_AES_ARM32_td_ecb
|
||||||
ldr r0, [r0]
|
ldr r0, [r0]
|
||||||
mov r12, r2
|
mov r12, r2
|
||||||
adr r2, L_AES_ARM32_td4
|
adr r2, L_AES_ARM32_td4
|
||||||
|
ldr r8, [sp, #36]
|
||||||
|
ldr r4, [sp, #40]
|
||||||
push {r3, r4}
|
push {r3, r4}
|
||||||
cmp r8, #10
|
cmp r8, #10
|
||||||
beq L_AES_CBC_decrypt_loop_block_128
|
beq L_AES_CBC_decrypt_loop_block_128
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#ifndef NO_AES
|
#ifndef NO_AES
|
||||||
#include <wolfssl/wolfcrypt/aes.h>
|
#include <wolfssl/wolfcrypt/aes.h>
|
||||||
|
|
||||||
|
@ -204,12 +208,23 @@ static const word32* L_AES_ARM32_te = L_AES_ARM32_te_data;
|
||||||
* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
|
* WOLFSSL_AES_DIRECT || WOLFSSL_AES_COUNTER */
|
||||||
#ifdef HAVE_AES_DECRYPT
|
#ifdef HAVE_AES_DECRYPT
|
||||||
void AES_invert_key(unsigned char* ks_p, word32 rounds_p);
|
void AES_invert_key(unsigned char* ks_p, word32 rounds_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_invert_key(unsigned char* ks_p, word32 rounds_p)
|
void AES_invert_key(unsigned char* ks_p, word32 rounds_p)
|
||||||
|
#else
|
||||||
|
void AES_invert_key(unsigned char* ks, word32 rounds)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register unsigned char* ks asm ("r0") = (unsigned char*)ks_p;
|
register unsigned char* ks asm ("r0") = (unsigned char*)ks_p;
|
||||||
register word32 rounds asm ("r1") = (word32)rounds_p;
|
register word32 rounds asm ("r1") = (word32)rounds_p;
|
||||||
register word32* L_AES_ARM32_te_c asm ("r2") = (word32*)L_AES_ARM32_te;
|
register word32* L_AES_ARM32_te_c asm ("r2") = (word32*)L_AES_ARM32_te;
|
||||||
register word32* L_AES_ARM32_td_c asm ("r3") = (word32*)L_AES_ARM32_td;
|
register word32* L_AES_ARM32_td_c asm ("r3") = (word32*)L_AES_ARM32_td;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_c = (word32*)L_AES_ARM32_te;
|
||||||
|
|
||||||
|
register word32* L_AES_ARM32_td_c = (word32*)L_AES_ARM32_td;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov r12, %[L_AES_ARM32_te]\n\t"
|
"mov r12, %[L_AES_ARM32_te]\n\t"
|
||||||
|
@ -423,15 +438,27 @@ static const word32 L_AES_ARM32_rcon[] = {
|
||||||
|
|
||||||
void AES_set_encrypt_key(const unsigned char* key_p, word32 len_p,
|
void AES_set_encrypt_key(const unsigned char* key_p, word32 len_p,
|
||||||
unsigned char* ks_p);
|
unsigned char* ks_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_set_encrypt_key(const unsigned char* key_p, word32 len_p,
|
void AES_set_encrypt_key(const unsigned char* key_p, word32 len_p,
|
||||||
unsigned char* ks_p)
|
unsigned char* ks_p)
|
||||||
|
#else
|
||||||
|
void AES_set_encrypt_key(const unsigned char* key, word32 len,
|
||||||
|
unsigned char* ks)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* key asm ("r0") = (const unsigned char*)key_p;
|
register const unsigned char* key asm ("r0") = (const unsigned char*)key_p;
|
||||||
register word32 len asm ("r1") = (word32)len_p;
|
register word32 len asm ("r1") = (word32)len_p;
|
||||||
register unsigned char* ks asm ("r2") = (unsigned char*)ks_p;
|
register unsigned char* ks asm ("r2") = (unsigned char*)ks_p;
|
||||||
register word32* L_AES_ARM32_te_c asm ("r3") = (word32*)L_AES_ARM32_te;
|
register word32* L_AES_ARM32_te_c asm ("r3") = (word32*)L_AES_ARM32_te;
|
||||||
register word32* L_AES_ARM32_rcon_c asm ("r4") =
|
register word32* L_AES_ARM32_rcon_c asm ("r4") =
|
||||||
(word32*)&L_AES_ARM32_rcon;
|
(word32*)&L_AES_ARM32_rcon;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_c = (word32*)L_AES_ARM32_te;
|
||||||
|
|
||||||
|
register word32* L_AES_ARM32_rcon_c = (word32*)&L_AES_ARM32_rcon;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov r8, %[L_AES_ARM32_te]\n\t"
|
"mov r8, %[L_AES_ARM32_te]\n\t"
|
||||||
|
@ -939,13 +966,19 @@ void AES_set_encrypt_key(const unsigned char* key_p, word32 len_p,
|
||||||
|
|
||||||
void AES_encrypt_block(const word32* te_p, int nr_p, int len_p,
|
void AES_encrypt_block(const word32* te_p, int nr_p, int len_p,
|
||||||
const word32* ks_p);
|
const word32* ks_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_encrypt_block(const word32* te_p, int nr_p, int len_p,
|
void AES_encrypt_block(const word32* te_p, int nr_p, int len_p,
|
||||||
const word32* ks_p)
|
const word32* ks_p)
|
||||||
|
#else
|
||||||
|
void AES_encrypt_block(const word32* te, int nr, int len, const word32* ks)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const word32* te asm ("r0") = (const word32*)te_p;
|
register const word32* te asm ("r0") = (const word32*)te_p;
|
||||||
register int nr asm ("r1") = (int)nr_p;
|
register int nr asm ("r1") = (int)nr_p;
|
||||||
register int len asm ("r2") = (int)len_p;
|
register int len asm ("r2") = (int)len_p;
|
||||||
register const word32* ks asm ("r3") = (const word32*)ks_p;
|
register const word32* ks asm ("r3") = (const word32*)ks_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -1595,9 +1628,15 @@ void AES_encrypt_block(const word32* te_p, int nr_p, int len_p,
|
||||||
static const word32* L_AES_ARM32_te_ecb = L_AES_ARM32_te_data;
|
static const word32* L_AES_ARM32_te_ecb = L_AES_ARM32_te_data;
|
||||||
void AES_ECB_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_ECB_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p);
|
unsigned long len_p, const unsigned char* ks_p, int nr_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_ECB_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_ECB_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p)
|
unsigned long len_p, const unsigned char* ks_p, int nr_p)
|
||||||
|
#else
|
||||||
|
void AES_ECB_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -1605,11 +1644,19 @@ void AES_ECB_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register int nr asm ("r4") = (int)nr_p;
|
register int nr asm ("r4") = (int)nr_p;
|
||||||
register word32* L_AES_ARM32_te_ecb_c asm ("r5") =
|
register word32* L_AES_ARM32_te_ecb_c asm ("r5") =
|
||||||
(word32*)L_AES_ARM32_te_ecb;
|
(word32*)L_AES_ARM32_te_ecb;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_ecb_c = (word32*)L_AES_ARM32_te_ecb;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_te_ecb]\n\t"
|
"mov r0, %[L_AES_ARM32_te_ecb]\n\t"
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r12, r4\n\t"
|
"mov r12, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r12, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"push {%[ks]}\n\t"
|
"push {%[ks]}\n\t"
|
||||||
"cmp r12, #10\n\t"
|
"cmp r12, #10\n\t"
|
||||||
"beq L_AES_ECB_encrypt_start_block_128_%=\n\t"
|
"beq L_AES_ECB_encrypt_start_block_128_%=\n\t"
|
||||||
|
@ -1851,10 +1898,16 @@ static const word32* L_AES_ARM32_te_cbc = L_AES_ARM32_te_data;
|
||||||
void AES_CBC_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CBC_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* iv_p);
|
unsigned char* iv_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_CBC_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CBC_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* iv_p)
|
unsigned char* iv_p)
|
||||||
|
#else
|
||||||
|
void AES_CBC_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr, unsigned char* iv)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -1863,10 +1916,22 @@ void AES_CBC_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register unsigned char* iv asm ("r5") = (unsigned char*)iv_p;
|
register unsigned char* iv asm ("r5") = (unsigned char*)iv_p;
|
||||||
register word32* L_AES_ARM32_te_cbc_c asm ("r6") =
|
register word32* L_AES_ARM32_te_cbc_c asm ("r6") =
|
||||||
(word32*)L_AES_ARM32_te_cbc;
|
(word32*)L_AES_ARM32_te_cbc;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_cbc_c = (word32*)L_AES_ARM32_te_cbc;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r8, r4\n\t"
|
"mov r8, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r8, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r9, r5\n\t"
|
"mov r9, r5\n\t"
|
||||||
|
#else
|
||||||
|
"mov r9, %[iv]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_te_cbc]\n\t"
|
"mov r0, %[L_AES_ARM32_te_cbc]\n\t"
|
||||||
"ldm r9, {r4, r5, r6, r7}\n\t"
|
"ldm r9, {r4, r5, r6, r7}\n\t"
|
||||||
|
@ -2124,10 +2189,16 @@ static const word32* L_AES_ARM32_te_ctr = L_AES_ARM32_te_data;
|
||||||
void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* ctr_p);
|
unsigned char* ctr_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* ctr_p)
|
unsigned char* ctr_p)
|
||||||
|
#else
|
||||||
|
void AES_CTR_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr, unsigned char* ctr)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -2136,10 +2207,22 @@ void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register unsigned char* ctr asm ("r5") = (unsigned char*)ctr_p;
|
register unsigned char* ctr asm ("r5") = (unsigned char*)ctr_p;
|
||||||
register word32* L_AES_ARM32_te_ctr_c asm ("r6") =
|
register word32* L_AES_ARM32_te_ctr_c asm ("r6") =
|
||||||
(word32*)L_AES_ARM32_te_ctr;
|
(word32*)L_AES_ARM32_te_ctr;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_ctr_c = (word32*)L_AES_ARM32_te_ctr;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r12, r4\n\t"
|
"mov r12, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r12, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r8, r5\n\t"
|
"mov r8, r5\n\t"
|
||||||
|
#else
|
||||||
|
"mov r8, %[ctr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_te_ctr]\n\t"
|
"mov r0, %[L_AES_ARM32_te_ctr]\n\t"
|
||||||
"ldm r8, {r4, r5, r6, r7}\n\t"
|
"ldm r8, {r4, r5, r6, r7}\n\t"
|
||||||
|
@ -2398,11 +2481,17 @@ void AES_CTR_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER) || \
|
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER) || \
|
||||||
defined(HAVE_AES_CBC)
|
defined(HAVE_AES_CBC)
|
||||||
void AES_decrypt_block(const word32* td_p, int nr_p, const byte* td4_p);
|
void AES_decrypt_block(const word32* td_p, int nr_p, const byte* td4_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_decrypt_block(const word32* td_p, int nr_p, const byte* td4_p)
|
void AES_decrypt_block(const word32* td_p, int nr_p, const byte* td4_p)
|
||||||
|
#else
|
||||||
|
void AES_decrypt_block(const word32* td, int nr, const byte* td4)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const word32* td asm ("r0") = (const word32*)td_p;
|
register const word32* td asm ("r0") = (const word32*)td_p;
|
||||||
register int nr asm ("r1") = (int)nr_p;
|
register int nr asm ("r1") = (int)nr_p;
|
||||||
register const byte* td4 asm ("r2") = (const byte*)td4_p;
|
register const byte* td4 asm ("r2") = (const byte*)td4_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -3086,9 +3175,15 @@ static const byte L_AES_ARM32_td4[] = {
|
||||||
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
|
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
|
||||||
void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p);
|
unsigned long len_p, const unsigned char* ks_p, int nr_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p)
|
unsigned long len_p, const unsigned char* ks_p, int nr_p)
|
||||||
|
#else
|
||||||
|
void AES_ECB_decrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -3097,9 +3192,19 @@ void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register word32* L_AES_ARM32_td_ecb_c asm ("r5") =
|
register word32* L_AES_ARM32_td_ecb_c asm ("r5") =
|
||||||
(word32*)L_AES_ARM32_td_ecb;
|
(word32*)L_AES_ARM32_td_ecb;
|
||||||
register byte* L_AES_ARM32_td4_c asm ("r6") = (byte*)&L_AES_ARM32_td4;
|
register byte* L_AES_ARM32_td4_c asm ("r6") = (byte*)&L_AES_ARM32_td4;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_td_ecb_c = (word32*)L_AES_ARM32_td_ecb;
|
||||||
|
|
||||||
|
register byte* L_AES_ARM32_td4_c = (byte*)&L_AES_ARM32_td4;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r8, r4\n\t"
|
"mov r8, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r8, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_td_ecb]\n\t"
|
"mov r0, %[L_AES_ARM32_td_ecb]\n\t"
|
||||||
"mov r12, %[len]\n\t"
|
"mov r12, %[len]\n\t"
|
||||||
|
@ -3339,10 +3444,16 @@ void AES_ECB_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
void AES_CBC_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CBC_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* iv_p);
|
unsigned char* iv_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_CBC_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_CBC_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* iv_p)
|
unsigned char* iv_p)
|
||||||
|
#else
|
||||||
|
void AES_CBC_decrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr, unsigned char* iv)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -3352,14 +3463,28 @@ void AES_CBC_decrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register word32* L_AES_ARM32_td_ecb_c asm ("r6") =
|
register word32* L_AES_ARM32_td_ecb_c asm ("r6") =
|
||||||
(word32*)L_AES_ARM32_td_ecb;
|
(word32*)L_AES_ARM32_td_ecb;
|
||||||
register byte* L_AES_ARM32_td4_c asm ("r7") = (byte*)&L_AES_ARM32_td4;
|
register byte* L_AES_ARM32_td4_c asm ("r7") = (byte*)&L_AES_ARM32_td4;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_td_ecb_c = (word32*)L_AES_ARM32_td_ecb;
|
||||||
|
|
||||||
|
register byte* L_AES_ARM32_td4_c = (byte*)&L_AES_ARM32_td4;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov r8, r4\n\t"
|
|
||||||
"mov r4, r5\n\t"
|
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_td_ecb]\n\t"
|
"mov r0, %[L_AES_ARM32_td_ecb]\n\t"
|
||||||
"mov r12, %[len]\n\t"
|
"mov r12, %[len]\n\t"
|
||||||
"mov r2, %[L_AES_ARM32_td4]\n\t"
|
"mov r2, %[L_AES_ARM32_td4]\n\t"
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
"mov r8, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r8, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
"mov r4, r5\n\t"
|
||||||
|
#else
|
||||||
|
"mov r4, %[iv]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"push {%[ks]-r4}\n\t"
|
"push {%[ks]-r4}\n\t"
|
||||||
"cmp r8, #10\n\t"
|
"cmp r8, #10\n\t"
|
||||||
"beq L_AES_CBC_decrypt_loop_block_128_%=\n\t"
|
"beq L_AES_CBC_decrypt_loop_block_128_%=\n\t"
|
||||||
|
@ -3983,9 +4108,15 @@ static const word32 L_GCM_gmult_len_r[] = {
|
||||||
|
|
||||||
void GCM_gmult_len(unsigned char* x_p, const unsigned char** m_p,
|
void GCM_gmult_len(unsigned char* x_p, const unsigned char** m_p,
|
||||||
const unsigned char* data_p, unsigned long len_p);
|
const unsigned char* data_p, unsigned long len_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void GCM_gmult_len(unsigned char* x_p, const unsigned char** m_p,
|
void GCM_gmult_len(unsigned char* x_p, const unsigned char** m_p,
|
||||||
const unsigned char* data_p, unsigned long len_p)
|
const unsigned char* data_p, unsigned long len_p)
|
||||||
|
#else
|
||||||
|
void GCM_gmult_len(unsigned char* x, const unsigned char** m,
|
||||||
|
const unsigned char* data, unsigned long len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register unsigned char* x asm ("r0") = (unsigned char*)x_p;
|
register unsigned char* x asm ("r0") = (unsigned char*)x_p;
|
||||||
register const unsigned char** m asm ("r1") = (const unsigned char**)m_p;
|
register const unsigned char** m asm ("r1") = (const unsigned char**)m_p;
|
||||||
register const unsigned char* data asm ("r2") =
|
register const unsigned char* data asm ("r2") =
|
||||||
|
@ -3993,6 +4124,10 @@ void GCM_gmult_len(unsigned char* x_p, const unsigned char** m_p,
|
||||||
register unsigned long len asm ("r3") = (unsigned long)len_p;
|
register unsigned long len asm ("r3") = (unsigned long)len_p;
|
||||||
register word32* L_GCM_gmult_len_r_c asm ("r4") =
|
register word32* L_GCM_gmult_len_r_c asm ("r4") =
|
||||||
(word32*)&L_GCM_gmult_len_r;
|
(word32*)&L_GCM_gmult_len_r;
|
||||||
|
#else
|
||||||
|
register word32* L_GCM_gmult_len_r_c = (word32*)&L_GCM_gmult_len_r;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov lr, %[L_GCM_gmult_len_r]\n\t"
|
"mov lr, %[L_GCM_gmult_len_r]\n\t"
|
||||||
|
@ -4578,10 +4713,16 @@ static const word32* L_AES_ARM32_te_gcm = L_AES_ARM32_te_data;
|
||||||
void AES_GCM_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_GCM_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* ctr_p);
|
unsigned char* ctr_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void AES_GCM_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
void AES_GCM_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
unsigned long len_p, const unsigned char* ks_p, int nr_p,
|
||||||
unsigned char* ctr_p)
|
unsigned char* ctr_p)
|
||||||
|
#else
|
||||||
|
void AES_GCM_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
|
unsigned long len, const unsigned char* ks, int nr, unsigned char* ctr)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r0") = (const unsigned char*)in_p;
|
||||||
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r1") = (unsigned char*)out_p;
|
||||||
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
register unsigned long len asm ("r2") = (unsigned long)len_p;
|
||||||
|
@ -4590,10 +4731,22 @@ void AES_GCM_encrypt(const unsigned char* in_p, unsigned char* out_p,
|
||||||
register unsigned char* ctr asm ("r5") = (unsigned char*)ctr_p;
|
register unsigned char* ctr asm ("r5") = (unsigned char*)ctr_p;
|
||||||
register word32* L_AES_ARM32_te_gcm_c asm ("r6") =
|
register word32* L_AES_ARM32_te_gcm_c asm ("r6") =
|
||||||
(word32*)L_AES_ARM32_te_gcm;
|
(word32*)L_AES_ARM32_te_gcm;
|
||||||
|
#else
|
||||||
|
register word32* L_AES_ARM32_te_gcm_c = (word32*)L_AES_ARM32_te_gcm;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r12, r4\n\t"
|
"mov r12, r4\n\t"
|
||||||
|
#else
|
||||||
|
"mov r12, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"mov r8, r5\n\t"
|
"mov r8, r5\n\t"
|
||||||
|
#else
|
||||||
|
"mov r8, %[ctr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"mov lr, %[in]\n\t"
|
"mov lr, %[in]\n\t"
|
||||||
"mov r0, %[L_AES_ARM32_te_gcm]\n\t"
|
"mov r0, %[L_AES_ARM32_te_gcm]\n\t"
|
||||||
"ldm r8, {r4, r5, r6, r7}\n\t"
|
"ldm r8, {r4, r5, r6, r7}\n\t"
|
||||||
|
|
|
@ -44,19 +44,29 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#ifdef HAVE_CHACHA
|
#ifdef HAVE_CHACHA
|
||||||
#include <wolfssl/wolfcrypt/chacha.h>
|
#include <wolfssl/wolfcrypt/chacha.h>
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void wc_chacha_setiv(word32* x_p, const byte* iv_p, word32 counter_p)
|
void wc_chacha_setiv(word32* x_p, const byte* iv_p, word32 counter_p)
|
||||||
|
#else
|
||||||
|
void wc_chacha_setiv(word32* x, const byte* iv, word32 counter)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register word32* x asm ("r0") = (word32*)x_p;
|
register word32* x asm ("r0") = (word32*)x_p;
|
||||||
register const byte* iv asm ("r1") = (const byte*)iv_p;
|
register const byte* iv asm ("r1") = (const byte*)iv_p;
|
||||||
register word32 counter asm ("r2") = (word32)counter_p;
|
register word32 counter asm ("r2") = (word32)counter_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"add r3, %[x], #52\n\t"
|
"add r3, %[x], #52\n\t"
|
||||||
|
@ -81,15 +91,26 @@ static const word32 L_chacha_arm32_constants[] = {
|
||||||
0x61707865, 0x3320646e, 0x79622d32, 0x6b206574,
|
0x61707865, 0x3320646e, 0x79622d32, 0x6b206574,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void wc_chacha_setkey(word32* x_p, const byte* key_p, word32 keySz_p)
|
void wc_chacha_setkey(word32* x_p, const byte* key_p, word32 keySz_p)
|
||||||
|
#else
|
||||||
|
void wc_chacha_setkey(word32* x, const byte* key, word32 keySz)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register word32* x asm ("r0") = (word32*)x_p;
|
register word32* x asm ("r0") = (word32*)x_p;
|
||||||
register const byte* key asm ("r1") = (const byte*)key_p;
|
register const byte* key asm ("r1") = (const byte*)key_p;
|
||||||
register word32 keySz asm ("r2") = (word32)keySz_p;
|
register word32 keySz asm ("r2") = (word32)keySz_p;
|
||||||
register word32* L_chacha_arm32_constants_c asm ("r3") =
|
register word32* L_chacha_arm32_constants_c asm ("r3") =
|
||||||
(word32*)&L_chacha_arm32_constants;
|
(word32*)&L_chacha_arm32_constants;
|
||||||
|
#else
|
||||||
|
register word32* L_chacha_arm32_constants_c =
|
||||||
|
(word32*)&L_chacha_arm32_constants;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
"mov r3, %[L_chacha_arm32_constants]\n\t"
|
||||||
"subs %[keySz], %[keySz], #16\n\t"
|
"subs %[keySz], %[keySz], #16\n\t"
|
||||||
"add r3, r3, %[keySz]\n\t"
|
"add r3, r3, %[keySz]\n\t"
|
||||||
/* Start state with constants */
|
/* Start state with constants */
|
||||||
|
@ -126,13 +147,19 @@ void wc_chacha_setkey(word32* x_p, const byte* key_p, word32 keySz_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WOLFSSL_ARMASM_NO_NEON
|
#ifdef WOLFSSL_ARMASM_NO_NEON
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void wc_chacha_crypt_bytes(ChaCha* ctx_p, byte* c_p, const byte* m_p,
|
void wc_chacha_crypt_bytes(ChaCha* ctx_p, byte* c_p, const byte* m_p,
|
||||||
word32 len_p)
|
word32 len_p)
|
||||||
|
#else
|
||||||
|
void wc_chacha_crypt_bytes(ChaCha* ctx, byte* c, const byte* m, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ChaCha* ctx asm ("r0") = (ChaCha*)ctx_p;
|
register ChaCha* ctx asm ("r0") = (ChaCha*)ctx_p;
|
||||||
register byte* c asm ("r1") = (byte*)c_p;
|
register byte* c asm ("r1") = (byte*)c_p;
|
||||||
register const byte* m asm ("r2") = (const byte*)m_p;
|
register const byte* m asm ("r2") = (const byte*)m_p;
|
||||||
register word32 len asm ("r3") = (word32)len_p;
|
register word32 len asm ("r3") = (word32)len_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #52\n\t"
|
"sub sp, sp, #52\n\t"
|
||||||
|
@ -490,13 +517,19 @@ void wc_chacha_crypt_bytes(ChaCha* ctx_p, byte* c_p, const byte* m_p,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void wc_chacha_use_over(byte* over_p, byte* output_p, const byte* input_p,
|
void wc_chacha_use_over(byte* over_p, byte* output_p, const byte* input_p,
|
||||||
word32 len_p)
|
word32 len_p)
|
||||||
|
#else
|
||||||
|
void wc_chacha_use_over(byte* over, byte* output, const byte* input, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* over asm ("r0") = (byte*)over_p;
|
register byte* over asm ("r0") = (byte*)over_p;
|
||||||
register byte* output asm ("r1") = (byte*)output_p;
|
register byte* output asm ("r1") = (byte*)output_p;
|
||||||
register const byte* input asm ("r2") = (const byte*)input_p;
|
register const byte* input asm ("r2") = (const byte*)input_p;
|
||||||
register word32 len asm ("r3") = (word32)len_p;
|
register word32 len asm ("r3") = (word32)len_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"\n"
|
"\n"
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
/* Based on work by: Emil Lenngren
|
/* Based on work by: Emil Lenngren
|
||||||
* https://github.com/pornin/X25519-Cortex-M4
|
* https://github.com/pornin/X25519-Cortex-M4
|
||||||
*/
|
*/
|
||||||
|
@ -60,8 +64,14 @@
|
||||||
#if defined(HAVE_CURVE25519) || defined(HAVE_ED25519)
|
#if defined(HAVE_CURVE25519) || defined(HAVE_ED25519)
|
||||||
#if !defined(CURVE25519_SMALL) || !defined(ED25519_SMALL)
|
#if !defined(CURVE25519_SMALL) || !defined(ED25519_SMALL)
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_init()
|
void fe_init()
|
||||||
|
#else
|
||||||
|
void fe_init()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"\n\t"
|
"\n\t"
|
||||||
:
|
:
|
||||||
|
@ -71,8 +81,14 @@ void fe_init()
|
||||||
}
|
}
|
||||||
|
|
||||||
void fe_add_sub_op(void);
|
void fe_add_sub_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_add_sub_op()
|
void fe_add_sub_op()
|
||||||
|
#else
|
||||||
|
void fe_add_sub_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Add-Sub */
|
/* Add-Sub */
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
@ -279,8 +295,14 @@ void fe_add_sub_op()
|
||||||
}
|
}
|
||||||
|
|
||||||
void fe_sub_op(void);
|
void fe_sub_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sub_op()
|
void fe_sub_op()
|
||||||
|
#else
|
||||||
|
void fe_sub_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Sub */
|
/* Sub */
|
||||||
"ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}\n\t"
|
"ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}\n\t"
|
||||||
|
@ -320,11 +342,17 @@ void fe_sub_op()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sub(fe r_p, const fe a_p, const fe b_p)
|
void fe_sub(fe r_p, const fe a_p, const fe b_p)
|
||||||
|
#else
|
||||||
|
void fe_sub(fe r, const fe a, const fe b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"bl fe_sub_op\n\t"
|
"bl fe_sub_op\n\t"
|
||||||
|
@ -336,8 +364,14 @@ void fe_sub(fe r_p, const fe a_p, const fe b_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
void fe_add_op(void);
|
void fe_add_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_add_op()
|
void fe_add_op()
|
||||||
|
#else
|
||||||
|
void fe_add_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Add */
|
/* Add */
|
||||||
"ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}\n\t"
|
"ldm r2!, {r6, r7, r8, r9, r10, r11, r12, lr}\n\t"
|
||||||
|
@ -378,11 +412,17 @@ void fe_add_op()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_add(fe r_p, const fe a_p, const fe b_p)
|
void fe_add(fe r_p, const fe a_p, const fe b_p)
|
||||||
|
#else
|
||||||
|
void fe_add(fe r, const fe a, const fe b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"bl fe_add_op\n\t"
|
"bl fe_add_op\n\t"
|
||||||
|
@ -394,10 +434,16 @@ void fe_add(fe r_p, const fe a_p, const fe b_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_ED25519
|
#ifdef HAVE_ED25519
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_frombytes(fe out_p, const unsigned char* in_p)
|
void fe_frombytes(fe out_p, const unsigned char* in_p)
|
||||||
|
#else
|
||||||
|
void fe_frombytes(fe out, const unsigned char* in)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* out asm ("r0") = (sword32*)out_p;
|
register sword32* out asm ("r0") = (sword32*)out_p;
|
||||||
register const unsigned char* in asm ("r1") = (const unsigned char*)in_p;
|
register const unsigned char* in asm ("r1") = (const unsigned char*)in_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"ldr r2, [%[in]]\n\t"
|
"ldr r2, [%[in]]\n\t"
|
||||||
|
@ -427,10 +473,16 @@ void fe_frombytes(fe out_p, const unsigned char* in_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_tobytes(unsigned char* out_p, const fe n_p)
|
void fe_tobytes(unsigned char* out_p, const fe n_p)
|
||||||
|
#else
|
||||||
|
void fe_tobytes(unsigned char* out, const fe n)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register unsigned char* out asm ("r0") = (unsigned char*)out_p;
|
register unsigned char* out asm ("r0") = (unsigned char*)out_p;
|
||||||
register const sword32* n asm ("r1") = (const sword32*)n_p;
|
register const sword32* n asm ("r1") = (const sword32*)n_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"ldm %[n], {r2, r3, r4, r5, r6, r7, r8, r9}\n\t"
|
"ldm %[n], {r2, r3, r4, r5, r6, r7, r8, r9}\n\t"
|
||||||
|
@ -471,9 +523,15 @@ void fe_tobytes(unsigned char* out_p, const fe n_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_1(fe n_p)
|
void fe_1(fe n_p)
|
||||||
|
#else
|
||||||
|
void fe_1(fe n)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* n asm ("r0") = (sword32*)n_p;
|
register sword32* n asm ("r0") = (sword32*)n_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Set one */
|
/* Set one */
|
||||||
|
@ -492,9 +550,15 @@ void fe_1(fe n_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_0(fe n_p)
|
void fe_0(fe n_p)
|
||||||
|
#else
|
||||||
|
void fe_0(fe n)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* n asm ("r0") = (sword32*)n_p;
|
register sword32* n asm ("r0") = (sword32*)n_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Set zero */
|
/* Set zero */
|
||||||
|
@ -513,10 +577,16 @@ void fe_0(fe n_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_copy(fe r_p, const fe a_p)
|
void fe_copy(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_copy(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Copy */
|
/* Copy */
|
||||||
|
@ -572,10 +642,16 @@ void fe_copy(fe r_p, const fe a_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_neg(fe r_p, const fe a_p)
|
void fe_neg(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_neg(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mvn lr, #0\n\t"
|
"mvn lr, #0\n\t"
|
||||||
|
@ -599,9 +675,15 @@ void fe_neg(fe r_p, const fe a_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
int fe_isnonzero(const fe a_p)
|
int fe_isnonzero(const fe a_p)
|
||||||
|
#else
|
||||||
|
int fe_isnonzero(const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const sword32* a asm ("r0") = (const sword32*)a_p;
|
register const sword32* a asm ("r0") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"ldm %[a], {r2, r3, r4, r5, r6, r7, r8, r9}\n\t"
|
"ldm %[a], {r2, r3, r4, r5, r6, r7, r8, r9}\n\t"
|
||||||
|
@ -643,9 +725,15 @@ int fe_isnonzero(const fe a_p)
|
||||||
return (word32)(size_t)a;
|
return (word32)(size_t)a;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
int fe_isnegative(const fe a_p)
|
int fe_isnegative(const fe a_p)
|
||||||
|
#else
|
||||||
|
int fe_isnegative(const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register const sword32* a asm ("r0") = (const sword32*)a_p;
|
register const sword32* a asm ("r0") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"ldm %[a]!, {r2, r3, r4, r5}\n\t"
|
"ldm %[a]!, {r2, r3, r4, r5}\n\t"
|
||||||
|
@ -671,11 +759,17 @@ int fe_isnegative(const fe a_p)
|
||||||
|
|
||||||
#if defined(HAVE_ED25519_MAKE_KEY) || defined(HAVE_ED25519_SIGN)
|
#if defined(HAVE_ED25519_MAKE_KEY) || defined(HAVE_ED25519_SIGN)
|
||||||
#ifndef WC_NO_CACHE_RESISTANT
|
#ifndef WC_NO_CACHE_RESISTANT
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
||||||
|
#else
|
||||||
|
void fe_cmov_table(fe* r, fe* base, signed char b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register fe* r asm ("r0") = (fe*)r_p;
|
register fe* r asm ("r0") = (fe*)r_p;
|
||||||
register fe* base asm ("r1") = (fe*)base_p;
|
register fe* base asm ("r1") = (fe*)base_p;
|
||||||
register signed char b asm ("r2") = (signed char)b_p;
|
register signed char b asm ("r2") = (signed char)b_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
@ -2205,11 +2299,17 @@ void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
||||||
|
#else
|
||||||
|
void fe_cmov_table(fe* r, fe* base, signed char b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register fe* r asm ("r0") = (fe*)r_p;
|
register fe* r asm ("r0") = (fe*)r_p;
|
||||||
register fe* base asm ("r1") = (fe*)base_p;
|
register fe* base asm ("r1") = (fe*)base_p;
|
||||||
register signed char b asm ("r2") = (signed char)b_p;
|
register signed char b asm ("r2") = (signed char)b_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
@ -2330,8 +2430,14 @@ void fe_cmov_table(fe* r_p, fe* base_p, signed char b_p)
|
||||||
#endif /* HAVE_ED25519 */
|
#endif /* HAVE_ED25519 */
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
void fe_mul_op(void);
|
void fe_mul_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_mul_op()
|
void fe_mul_op()
|
||||||
|
#else
|
||||||
|
void fe_mul_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #40\n\t"
|
"sub sp, sp, #40\n\t"
|
||||||
"str r0, [sp, #36]\n\t"
|
"str r0, [sp, #36]\n\t"
|
||||||
|
@ -2714,8 +2820,14 @@ void fe_mul_op()
|
||||||
|
|
||||||
#else
|
#else
|
||||||
void fe_mul_op(void);
|
void fe_mul_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_mul_op()
|
void fe_mul_op()
|
||||||
|
#else
|
||||||
|
void fe_mul_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #44\n\t"
|
"sub sp, sp, #44\n\t"
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
@ -2856,11 +2968,17 @@ void fe_mul_op()
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_mul(fe r_p, const fe a_p, const fe b_p)
|
void fe_mul(fe r_p, const fe a_p, const fe b_p)
|
||||||
|
#else
|
||||||
|
void fe_mul(fe r, const fe a, const fe b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
register const sword32* b asm ("r2") = (const sword32*)b_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"bl fe_mul_op\n\t"
|
"bl fe_mul_op\n\t"
|
||||||
|
@ -2873,8 +2991,14 @@ void fe_mul(fe r_p, const fe a_p, const fe b_p)
|
||||||
|
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
void fe_sq_op(void);
|
void fe_sq_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sq_op()
|
void fe_sq_op()
|
||||||
|
#else
|
||||||
|
void fe_sq_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x44\n\t"
|
"sub sp, sp, #0x44\n\t"
|
||||||
"str r0, [sp, #64]\n\t"
|
"str r0, [sp, #64]\n\t"
|
||||||
|
@ -3150,8 +3274,14 @@ void fe_sq_op()
|
||||||
|
|
||||||
#else
|
#else
|
||||||
void fe_sq_op(void);
|
void fe_sq_op(void);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sq_op()
|
void fe_sq_op()
|
||||||
|
#else
|
||||||
|
void fe_sq_op()
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #32\n\t"
|
"sub sp, sp, #32\n\t"
|
||||||
"str r0, [sp, #28]\n\t"
|
"str r0, [sp, #28]\n\t"
|
||||||
|
@ -3278,10 +3408,16 @@ void fe_sq_op()
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sq(fe r_p, const fe a_p)
|
void fe_sq(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_sq(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"bl fe_sq_op\n\t"
|
"bl fe_sq_op\n\t"
|
||||||
|
@ -3294,10 +3430,16 @@ void fe_sq(fe r_p, const fe a_p)
|
||||||
|
|
||||||
#ifdef HAVE_CURVE25519
|
#ifdef HAVE_CURVE25519
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_mul121666(fe r_p, fe a_p)
|
void fe_mul121666(fe r_p, fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_mul121666(fe r, fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register sword32* a asm ("r1") = (sword32*)a_p;
|
register sword32* a asm ("r1") = (sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Multiply by 121666 */
|
/* Multiply by 121666 */
|
||||||
|
@ -3367,10 +3509,16 @@ void fe_mul121666(fe r_p, fe a_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_mul121666(fe r_p, fe a_p)
|
void fe_mul121666(fe r_p, fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_mul121666(fe r, fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register sword32* a asm ("r1") = (sword32*)a_p;
|
register sword32* a asm ("r1") = (sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Multiply by 121666 */
|
/* Multiply by 121666 */
|
||||||
|
@ -3428,11 +3576,17 @@ void fe_mul121666(fe r_p, fe a_p)
|
||||||
|
|
||||||
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
||||||
#ifndef WC_NO_CACHE_RESISTANT
|
#ifndef WC_NO_CACHE_RESISTANT
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
||||||
|
#else
|
||||||
|
int curve25519(byte* r, const byte* n, const byte* a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* r asm ("r0") = (byte*)r_p;
|
register byte* r asm ("r0") = (byte*)r_p;
|
||||||
register const byte* n asm ("r1") = (const byte*)n_p;
|
register const byte* n asm ("r1") = (const byte*)n_p;
|
||||||
register const byte* a asm ("r2") = (const byte*)a_p;
|
register const byte* a asm ("r2") = (const byte*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0xbc\n\t"
|
"sub sp, sp, #0xbc\n\t"
|
||||||
|
@ -3819,11 +3973,17 @@ int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
||||||
|
#else
|
||||||
|
int curve25519(byte* r, const byte* n, const byte* a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* r asm ("r0") = (byte*)r_p;
|
register byte* r asm ("r0") = (byte*)r_p;
|
||||||
register const byte* n asm ("r1") = (const byte*)n_p;
|
register const byte* n asm ("r1") = (const byte*)n_p;
|
||||||
register const byte* a asm ("r2") = (const byte*)a_p;
|
register const byte* a asm ("r2") = (const byte*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0xc0\n\t"
|
"sub sp, sp, #0xc0\n\t"
|
||||||
|
@ -4135,10 +4295,16 @@ int curve25519(byte* r_p, const byte* n_p, const byte* a_p)
|
||||||
#endif /* WC_NO_CACHE_RESISTANT */
|
#endif /* WC_NO_CACHE_RESISTANT */
|
||||||
#endif /* HAVE_CURVE25519 */
|
#endif /* HAVE_CURVE25519 */
|
||||||
#ifdef HAVE_ED25519
|
#ifdef HAVE_ED25519
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_invert(fe r_p, const fe a_p)
|
void fe_invert(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_invert(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x88\n\t"
|
"sub sp, sp, #0x88\n\t"
|
||||||
|
@ -4307,10 +4473,16 @@ void fe_invert(fe r_p, const fe a_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sq2(fe r_p, const fe a_p)
|
void fe_sq2(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_sq2(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x44\n\t"
|
"sub sp, sp, #0x44\n\t"
|
||||||
|
@ -4627,10 +4799,16 @@ void fe_sq2(fe r_p, const fe a_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_sq2(fe r_p, const fe a_p)
|
void fe_sq2(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_sq2(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #36\n\t"
|
"sub sp, sp, #36\n\t"
|
||||||
|
@ -4806,10 +4984,16 @@ void fe_sq2(fe r_p, const fe a_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void fe_pow22523(fe r_p, const fe a_p)
|
void fe_pow22523(fe r_p, const fe a_p)
|
||||||
|
#else
|
||||||
|
void fe_pow22523(fe r, const fe a)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword32* r asm ("r0") = (sword32*)r_p;
|
register sword32* r asm ("r0") = (sword32*)r_p;
|
||||||
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
register const sword32* a asm ("r1") = (const sword32*)a_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x68\n\t"
|
"sub sp, sp, #0x68\n\t"
|
||||||
|
@ -4977,10 +5161,16 @@ void fe_pow22523(fe r_p, const fe a_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_p1p1_to_p2(ge_p2 * r_p, const ge_p1p1 * p_p)
|
void ge_p1p1_to_p2(ge_p2 * r_p, const ge_p1p1 * p_p)
|
||||||
|
#else
|
||||||
|
void ge_p1p1_to_p2(ge_p2 * r, const ge_p1p1 * p)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p2 * r asm ("r0") = (ge_p2 *)r_p;
|
register ge_p2 * r asm ("r0") = (ge_p2 *)r_p;
|
||||||
register const ge_p1p1 * p asm ("r1") = (const ge_p1p1 *)p_p;
|
register const ge_p1p1 * p asm ("r1") = (const ge_p1p1 *)p_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #8\n\t"
|
"sub sp, sp, #8\n\t"
|
||||||
|
@ -5008,10 +5198,16 @@ void ge_p1p1_to_p2(ge_p2 * r_p, const ge_p1p1 * p_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_p1p1_to_p3(ge_p3 * r_p, const ge_p1p1 * p_p)
|
void ge_p1p1_to_p3(ge_p3 * r_p, const ge_p1p1 * p_p)
|
||||||
|
#else
|
||||||
|
void ge_p1p1_to_p3(ge_p3 * r, const ge_p1p1 * p)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p3 * r asm ("r0") = (ge_p3 *)r_p;
|
register ge_p3 * r asm ("r0") = (ge_p3 *)r_p;
|
||||||
register const ge_p1p1 * p asm ("r1") = (const ge_p1p1 *)p_p;
|
register const ge_p1p1 * p asm ("r1") = (const ge_p1p1 *)p_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #8\n\t"
|
"sub sp, sp, #8\n\t"
|
||||||
|
@ -5044,10 +5240,16 @@ void ge_p1p1_to_p3(ge_p3 * r_p, const ge_p1p1 * p_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_p2_dbl(ge_p1p1 * r_p, const ge_p2 * p_p)
|
void ge_p2_dbl(ge_p1p1 * r_p, const ge_p2 * p_p)
|
||||||
|
#else
|
||||||
|
void ge_p2_dbl(ge_p1p1 * r, const ge_p2 * p)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
||||||
register const ge_p2 * p asm ("r1") = (const ge_p2 *)p_p;
|
register const ge_p2 * p asm ("r1") = (const ge_p2 *)p_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #8\n\t"
|
"sub sp, sp, #8\n\t"
|
||||||
|
@ -5092,11 +5294,17 @@ void ge_p2_dbl(ge_p1p1 * r_p, const ge_p2 * p_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_madd(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
void ge_madd(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
||||||
|
#else
|
||||||
|
void ge_madd(ge_p1p1 * r, const ge_p3 * p, const ge_precomp * q)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
||||||
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
||||||
register const ge_precomp * q asm ("r2") = (const ge_precomp *)q_p;
|
register const ge_precomp * q asm ("r2") = (const ge_precomp *)q_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #12\n\t"
|
"sub sp, sp, #12\n\t"
|
||||||
|
@ -5179,11 +5387,17 @@ void ge_madd(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_msub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
void ge_msub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
||||||
|
#else
|
||||||
|
void ge_msub(ge_p1p1 * r, const ge_p3 * p, const ge_precomp * q)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
||||||
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
||||||
register const ge_precomp * q asm ("r2") = (const ge_precomp *)q_p;
|
register const ge_precomp * q asm ("r2") = (const ge_precomp *)q_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #12\n\t"
|
"sub sp, sp, #12\n\t"
|
||||||
|
@ -5267,11 +5481,17 @@ void ge_msub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_precomp * q_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_add(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
void ge_add(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
||||||
|
#else
|
||||||
|
void ge_add(ge_p1p1 * r, const ge_p3 * p, const ge_cached* q)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
||||||
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
||||||
register const ge_cached* q asm ("r2") = (const ge_cached*)q_p;
|
register const ge_cached* q asm ("r2") = (const ge_cached*)q_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #44\n\t"
|
"sub sp, sp, #44\n\t"
|
||||||
|
@ -5355,11 +5575,17 @@ void ge_add(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void ge_sub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
void ge_sub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
||||||
|
#else
|
||||||
|
void ge_sub(ge_p1p1 * r, const ge_p3 * p, const ge_cached* q)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
register ge_p1p1 * r asm ("r0") = (ge_p1p1 *)r_p;
|
||||||
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
register const ge_p3 * p asm ("r1") = (const ge_p3 *)p_p;
|
||||||
register const ge_cached* q asm ("r2") = (const ge_cached*)q_p;
|
register const ge_cached* q asm ("r2") = (const ge_cached*)q_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #44\n\t"
|
"sub sp, sp, #44\n\t"
|
||||||
|
@ -5444,9 +5670,15 @@ void ge_sub(ge_p1p1 * r_p, const ge_p3 * p_p, const ge_cached* q_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void sc_reduce(byte* s_p)
|
void sc_reduce(byte* s_p)
|
||||||
|
#else
|
||||||
|
void sc_reduce(byte* s)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* s asm ("r0") = (byte*)s_p;
|
register byte* s asm ("r0") = (byte*)s_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #56\n\t"
|
"sub sp, sp, #56\n\t"
|
||||||
|
@ -6233,9 +6465,15 @@ void sc_reduce(byte* s_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void sc_reduce(byte* s_p)
|
void sc_reduce(byte* s_p)
|
||||||
|
#else
|
||||||
|
void sc_reduce(byte* s)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* s asm ("r0") = (byte*)s_p;
|
register byte* s asm ("r0") = (byte*)s_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #56\n\t"
|
"sub sp, sp, #56\n\t"
|
||||||
|
@ -6895,12 +7133,18 @@ void sc_reduce(byte* s_p)
|
||||||
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
#endif /* WOLFSSL_ARM_ARCH && WOLFSSL_ARM_ARCH < 6 */
|
||||||
#ifdef HAVE_ED25519_SIGN
|
#ifdef HAVE_ED25519_SIGN
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 6)
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void sc_muladd(byte* s_p, const byte* a_p, const byte* b_p, const byte* c_p)
|
void sc_muladd(byte* s_p, const byte* a_p, const byte* b_p, const byte* c_p)
|
||||||
|
#else
|
||||||
|
void sc_muladd(byte* s, const byte* a, const byte* b, const byte* c)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* s asm ("r0") = (byte*)s_p;
|
register byte* s asm ("r0") = (byte*)s_p;
|
||||||
register const byte* a asm ("r1") = (const byte*)a_p;
|
register const byte* a asm ("r1") = (const byte*)a_p;
|
||||||
register const byte* b asm ("r2") = (const byte*)b_p;
|
register const byte* b asm ("r2") = (const byte*)b_p;
|
||||||
register const byte* c asm ("r3") = (const byte*)c_p;
|
register const byte* c asm ("r3") = (const byte*)c_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x50\n\t"
|
"sub sp, sp, #0x50\n\t"
|
||||||
|
@ -8044,12 +8288,18 @@ void sc_muladd(byte* s_p, const byte* a_p, const byte* b_p, const byte* c_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void sc_muladd(byte* s_p, const byte* a_p, const byte* b_p, const byte* c_p)
|
void sc_muladd(byte* s_p, const byte* a_p, const byte* b_p, const byte* c_p)
|
||||||
|
#else
|
||||||
|
void sc_muladd(byte* s, const byte* a, const byte* b, const byte* c)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register byte* s asm ("r0") = (byte*)s_p;
|
register byte* s asm ("r0") = (byte*)s_p;
|
||||||
register const byte* a asm ("r1") = (const byte*)a_p;
|
register const byte* a asm ("r1") = (const byte*)a_p;
|
||||||
register const byte* b asm ("r2") = (const byte*)b_p;
|
register const byte* b asm ("r2") = (const byte*)b_p;
|
||||||
register const byte* c asm ("r3") = (const byte*)c_p;
|
register const byte* c asm ("r3") = (const byte*)c_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0x50\n\t"
|
"sub sp, sp, #0x50\n\t"
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#include <wolfssl/wolfcrypt/wc_mlkem.h>
|
#include <wolfssl/wolfcrypt/wc_mlkem.h>
|
||||||
|
|
||||||
#ifdef WOLFSSL_WC_MLKEM
|
#ifdef WOLFSSL_WC_MLKEM
|
||||||
|
@ -87,14 +91,25 @@ static const word16 L_mlkem_arm32_ntt_zetas[] = {
|
||||||
0x03be, 0x074d, 0x05f2, 0x065c,
|
0x03be, 0x074d, 0x05f2, 0x065c,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void mlkem_arm32_ntt(sword16* r_p)
|
void mlkem_arm32_ntt(sword16* r_p)
|
||||||
|
#else
|
||||||
|
void mlkem_arm32_ntt(sword16* r)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* r asm ("r0") = (sword16*)r_p;
|
register sword16* r asm ("r0") = (sword16*)r_p;
|
||||||
register word16* L_mlkem_arm32_ntt_zetas_c asm ("r1") =
|
register word16* L_mlkem_arm32_ntt_zetas_c asm ("r1") =
|
||||||
(word16*)&L_mlkem_arm32_ntt_zetas;
|
(word16*)&L_mlkem_arm32_ntt_zetas;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_arm32_ntt_zetas_c =
|
||||||
|
(word16*)&L_mlkem_arm32_ntt_zetas;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #8\n\t"
|
"sub sp, sp, #8\n\t"
|
||||||
|
"mov r1, %[L_mlkem_arm32_ntt_zetas]\n\t"
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
"mov r10, #0x1\n\t"
|
"mov r10, #0x1\n\t"
|
||||||
|
@ -3123,14 +3138,25 @@ static const word16 L_mlkem_invntt_zetas_inv[] = {
|
||||||
0x05ed, 0x0167, 0x02f6, 0x05a1,
|
0x05ed, 0x0167, 0x02f6, 0x05a1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void mlkem_arm32_invntt(sword16* r_p)
|
void mlkem_arm32_invntt(sword16* r_p)
|
||||||
|
#else
|
||||||
|
void mlkem_arm32_invntt(sword16* r)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* r asm ("r0") = (sword16*)r_p;
|
register sword16* r asm ("r0") = (sword16*)r_p;
|
||||||
register word16* L_mlkem_invntt_zetas_inv_c asm ("r1") =
|
register word16* L_mlkem_invntt_zetas_inv_c asm ("r1") =
|
||||||
(word16*)&L_mlkem_invntt_zetas_inv;
|
(word16*)&L_mlkem_invntt_zetas_inv;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_invntt_zetas_inv_c =
|
||||||
|
(word16*)&L_mlkem_invntt_zetas_inv;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #8\n\t"
|
"sub sp, sp, #8\n\t"
|
||||||
|
"mov r1, %[L_mlkem_invntt_zetas_inv]\n\t"
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
"mov r10, #0x1\n\t"
|
"mov r10, #0x1\n\t"
|
||||||
|
@ -7553,16 +7579,27 @@ static const word16 L_mlkem_basemul_mont_zetas[] = {
|
||||||
0x03be, 0x074d, 0x05f2, 0x065c,
|
0x03be, 0x074d, 0x05f2, 0x065c,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void mlkem_arm32_basemul_mont(sword16* r_p, const sword16* a_p,
|
void mlkem_arm32_basemul_mont(sword16* r_p, const sword16* a_p,
|
||||||
const sword16* b_p)
|
const sword16* b_p)
|
||||||
|
#else
|
||||||
|
void mlkem_arm32_basemul_mont(sword16* r, const sword16* a, const sword16* b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* r asm ("r0") = (sword16*)r_p;
|
register sword16* r asm ("r0") = (sword16*)r_p;
|
||||||
register const sword16* a asm ("r1") = (const sword16*)a_p;
|
register const sword16* a asm ("r1") = (const sword16*)a_p;
|
||||||
register const sword16* b asm ("r2") = (const sword16*)b_p;
|
register const sword16* b asm ("r2") = (const sword16*)b_p;
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c asm ("r3") =
|
register word16* L_mlkem_basemul_mont_zetas_c asm ("r3") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
"mov r3, %[L_mlkem_basemul_mont_zetas]\n\t"
|
||||||
"add r3, r3, #0x80\n\t"
|
"add r3, r3, #0x80\n\t"
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
@ -7841,16 +7878,28 @@ void mlkem_arm32_basemul_mont(sword16* r_p, const sword16* a_p,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void mlkem_arm32_basemul_mont_add(sword16* r_p, const sword16* a_p,
|
void mlkem_arm32_basemul_mont_add(sword16* r_p, const sword16* a_p,
|
||||||
const sword16* b_p)
|
const sword16* b_p)
|
||||||
|
#else
|
||||||
|
void mlkem_arm32_basemul_mont_add(sword16* r, const sword16* a,
|
||||||
|
const sword16* b)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* r asm ("r0") = (sword16*)r_p;
|
register sword16* r asm ("r0") = (sword16*)r_p;
|
||||||
register const sword16* a asm ("r1") = (const sword16*)a_p;
|
register const sword16* a asm ("r1") = (const sword16*)a_p;
|
||||||
register const sword16* b asm ("r2") = (const sword16*)b_p;
|
register const sword16* b asm ("r2") = (const sword16*)b_p;
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c asm ("r3") =
|
register word16* L_mlkem_basemul_mont_zetas_c asm ("r3") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
"mov r3, %[L_mlkem_basemul_mont_zetas]\n\t"
|
||||||
"add r3, r3, #0x80\n\t"
|
"add r3, r3, #0x80\n\t"
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH >= 6)
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
@ -8163,11 +8212,21 @@ void mlkem_arm32_basemul_mont_add(sword16* r_p, const sword16* a_p,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void mlkem_arm32_csubq(sword16* p_p)
|
void mlkem_arm32_csubq(sword16* p_p)
|
||||||
|
#else
|
||||||
|
void mlkem_arm32_csubq(sword16* p)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* p asm ("r0") = (sword16*)p_p;
|
register sword16* p asm ("r0") = (sword16*)p_p;
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c asm ("r1") =
|
register word16* L_mlkem_basemul_mont_zetas_c asm ("r1") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
@ -8342,15 +8401,26 @@ void mlkem_arm32_csubq(sword16* p_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
unsigned int mlkem_arm32_rej_uniform(sword16* p_p, unsigned int len_p,
|
unsigned int mlkem_arm32_rej_uniform(sword16* p_p, unsigned int len_p,
|
||||||
const byte* r_p, unsigned int rLen_p)
|
const byte* r_p, unsigned int rLen_p)
|
||||||
|
#else
|
||||||
|
unsigned int mlkem_arm32_rej_uniform(sword16* p, unsigned int len,
|
||||||
|
const byte* r, unsigned int rLen)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register sword16* p asm ("r0") = (sword16*)p_p;
|
register sword16* p asm ("r0") = (sword16*)p_p;
|
||||||
register unsigned int len asm ("r1") = (unsigned int)len_p;
|
register unsigned int len asm ("r1") = (unsigned int)len_p;
|
||||||
register const byte* r asm ("r2") = (const byte*)r_p;
|
register const byte* r asm ("r2") = (const byte*)r_p;
|
||||||
register unsigned int rLen asm ("r3") = (unsigned int)rLen_p;
|
register unsigned int rLen asm ("r3") = (unsigned int)rLen_p;
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c asm ("r4") =
|
register word16* L_mlkem_basemul_mont_zetas_c asm ("r4") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
|
|
|
@ -44,22 +44,33 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#ifdef HAVE_POLY1305
|
#ifdef HAVE_POLY1305
|
||||||
#include <wolfssl/wolfcrypt/poly1305.h>
|
#include <wolfssl/wolfcrypt/poly1305.h>
|
||||||
|
|
||||||
#ifdef WOLFSSL_ARMASM_NO_NEON
|
#ifdef WOLFSSL_ARMASM_NO_NEON
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_arm32_blocks_16(Poly1305* ctx_p, const byte* m_p, word32 len_p,
|
void poly1305_arm32_blocks_16(Poly1305* ctx_p, const byte* m_p, word32 len_p,
|
||||||
int notLast_p)
|
int notLast_p)
|
||||||
|
#else
|
||||||
|
void poly1305_arm32_blocks_16(Poly1305* ctx, const byte* m, word32 len,
|
||||||
|
int notLast)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register const byte* m asm ("r1") = (const byte*)m_p;
|
register const byte* m asm ("r1") = (const byte*)m_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register int notLast asm ("r3") = (int)notLast_p;
|
register int notLast asm ("r3") = (int)notLast_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #28\n\t"
|
"sub sp, sp, #28\n\t"
|
||||||
|
@ -282,12 +293,22 @@ static const word32 L_poly1305_arm32_clamp[] = {
|
||||||
0x0fffffff, 0x0ffffffc, 0x0ffffffc, 0x0ffffffc,
|
0x0fffffff, 0x0ffffffc, 0x0ffffffc, 0x0ffffffc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
||||||
|
#else
|
||||||
|
void poly1305_set_key(Poly1305* ctx, const byte* key)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register const byte* key asm ("r1") = (const byte*)key_p;
|
register const byte* key asm ("r1") = (const byte*)key_p;
|
||||||
register word32* L_poly1305_arm32_clamp_c asm ("r2") =
|
register word32* L_poly1305_arm32_clamp_c asm ("r2") =
|
||||||
(word32*)&L_poly1305_arm32_clamp;
|
(word32*)&L_poly1305_arm32_clamp;
|
||||||
|
#else
|
||||||
|
register word32* L_poly1305_arm32_clamp_c =
|
||||||
|
(word32*)&L_poly1305_arm32_clamp;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Load mask. */
|
/* Load mask. */
|
||||||
|
@ -328,10 +349,16 @@ void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_final(Poly1305* ctx_p, byte* mac_p)
|
void poly1305_final(Poly1305* ctx_p, byte* mac_p)
|
||||||
|
#else
|
||||||
|
void poly1305_final(Poly1305* ctx, byte* mac)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register byte* mac asm ("r1") = (byte*)mac_p;
|
register byte* mac asm ("r1") = (byte*)mac_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"add r9, %[ctx], #16\n\t"
|
"add r9, %[ctx], #16\n\t"
|
||||||
|
@ -385,13 +412,20 @@ void poly1305_final(Poly1305* ctx_p, byte* mac_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_arm32_blocks_16(Poly1305* ctx_p, const byte* m_p, word32 len_p,
|
void poly1305_arm32_blocks_16(Poly1305* ctx_p, const byte* m_p, word32 len_p,
|
||||||
int notLast_p)
|
int notLast_p)
|
||||||
|
#else
|
||||||
|
void poly1305_arm32_blocks_16(Poly1305* ctx, const byte* m, word32 len,
|
||||||
|
int notLast)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register const byte* m asm ("r1") = (const byte*)m_p;
|
register const byte* m asm ("r1") = (const byte*)m_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register int notLast asm ("r3") = (int)notLast_p;
|
register int notLast asm ("r3") = (int)notLast_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #28\n\t"
|
"sub sp, sp, #28\n\t"
|
||||||
|
@ -610,12 +644,18 @@ void poly1305_arm32_blocks_16(Poly1305* ctx_p, const byte* m_p, word32 len_p,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_arm32_blocks(Poly1305* ctx_p, const unsigned char* m_p,
|
void poly1305_arm32_blocks(Poly1305* ctx_p, const unsigned char* m_p,
|
||||||
size_t bytes_p)
|
size_t bytes_p)
|
||||||
|
#else
|
||||||
|
void poly1305_arm32_blocks(Poly1305* ctx, const unsigned char* m, size_t bytes)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register const unsigned char* m asm ("r1") = (const unsigned char*)m_p;
|
register const unsigned char* m asm ("r1") = (const unsigned char*)m_p;
|
||||||
register size_t bytes asm ("r2") = (size_t)bytes_p;
|
register size_t bytes asm ("r2") = (size_t)bytes_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"cmp %[bytes], #16\n\t"
|
"cmp %[bytes], #16\n\t"
|
||||||
|
@ -1074,12 +1114,22 @@ static const word32 L_poly1305_arm32_clamp[] = {
|
||||||
0x0fffffff, 0x0ffffffc, 0x0ffffffc, 0x0ffffffc,
|
0x0fffffff, 0x0ffffffc, 0x0ffffffc, 0x0ffffffc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
||||||
|
#else
|
||||||
|
void poly1305_set_key(Poly1305* ctx, const byte* key)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register const byte* key asm ("r1") = (const byte*)key_p;
|
register const byte* key asm ("r1") = (const byte*)key_p;
|
||||||
register word32* L_poly1305_arm32_clamp_c asm ("r2") =
|
register word32* L_poly1305_arm32_clamp_c asm ("r2") =
|
||||||
(word32*)&L_poly1305_arm32_clamp;
|
(word32*)&L_poly1305_arm32_clamp;
|
||||||
|
#else
|
||||||
|
register word32* L_poly1305_arm32_clamp_c =
|
||||||
|
(word32*)&L_poly1305_arm32_clamp;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
/* Load mask. */
|
/* Load mask. */
|
||||||
|
@ -1300,10 +1350,16 @@ void poly1305_set_key(Poly1305* ctx_p, const byte* key_p)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void poly1305_final(Poly1305* ctx_p, byte* mac_p)
|
void poly1305_final(Poly1305* ctx_p, byte* mac_p)
|
||||||
|
#else
|
||||||
|
void poly1305_final(Poly1305* ctx, byte* mac)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
register Poly1305* ctx asm ("r0") = (Poly1305*)ctx_p;
|
||||||
register byte* mac asm ("r1") = (byte*)mac_p;
|
register byte* mac asm ("r1") = (byte*)mac_p;
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"add r9, %[ctx], #16\n\t"
|
"add r9, %[ctx], #16\n\t"
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#ifndef NO_SHA256
|
#ifndef NO_SHA256
|
||||||
#include <wolfssl/wolfcrypt/sha256.h>
|
#include <wolfssl/wolfcrypt/sha256.h>
|
||||||
|
|
||||||
|
@ -74,16 +78,27 @@ static const word32 L_SHA256_transform_len_k[] = {
|
||||||
|
|
||||||
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p,
|
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p,
|
||||||
word32 len_p);
|
word32 len_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p, word32 len_p)
|
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p, word32 len_p)
|
||||||
|
#else
|
||||||
|
void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register wc_Sha256* sha256 asm ("r0") = (wc_Sha256*)sha256_p;
|
register wc_Sha256* sha256 asm ("r0") = (wc_Sha256*)sha256_p;
|
||||||
register const byte* data asm ("r1") = (const byte*)data_p;
|
register const byte* data asm ("r1") = (const byte*)data_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register word32* L_SHA256_transform_len_k_c asm ("r3") =
|
register word32* L_SHA256_transform_len_k_c asm ("r3") =
|
||||||
(word32*)&L_SHA256_transform_len_k;
|
(word32*)&L_SHA256_transform_len_k;
|
||||||
|
#else
|
||||||
|
register word32* L_SHA256_transform_len_k_c =
|
||||||
|
(word32*)&L_SHA256_transform_len_k;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0xc0\n\t"
|
"sub sp, sp, #0xc0\n\t"
|
||||||
|
"mov r3, %[L_SHA256_transform_len_k]\n\t"
|
||||||
/* Copy digest to add in at end */
|
/* Copy digest to add in at end */
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
"ldm r0, {r4, r5}\n\t"
|
"ldm r0, {r4, r5}\n\t"
|
||||||
|
@ -1760,13 +1775,23 @@ static const word32 L_SHA256_transform_neon_len_k[] = {
|
||||||
|
|
||||||
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p,
|
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p,
|
||||||
word32 len_p);
|
word32 len_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p, word32 len_p)
|
void Transform_Sha256_Len(wc_Sha256* sha256_p, const byte* data_p, word32 len_p)
|
||||||
|
#else
|
||||||
|
void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register wc_Sha256* sha256 asm ("r0") = (wc_Sha256*)sha256_p;
|
register wc_Sha256* sha256 asm ("r0") = (wc_Sha256*)sha256_p;
|
||||||
register const byte* data asm ("r1") = (const byte*)data_p;
|
register const byte* data asm ("r1") = (const byte*)data_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register word32* L_SHA256_transform_neon_len_k_c asm ("r3") =
|
register word32* L_SHA256_transform_neon_len_k_c asm ("r3") =
|
||||||
(word32*)&L_SHA256_transform_neon_len_k;
|
(word32*)&L_SHA256_transform_neon_len_k;
|
||||||
|
#else
|
||||||
|
register word32* L_SHA256_transform_neon_len_k_c =
|
||||||
|
(word32*)&L_SHA256_transform_neon_len_k;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #24\n\t"
|
"sub sp, sp, #24\n\t"
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#ifdef WOLFSSL_SHA3
|
#ifdef WOLFSSL_SHA3
|
||||||
#ifndef WOLFSSL_ARMASM_NO_NEON
|
#ifndef WOLFSSL_ARMASM_NO_NEON
|
||||||
static const word64 L_sha3_arm2_neon_rt[] = {
|
static const word64 L_sha3_arm2_neon_rt[] = {
|
||||||
|
@ -68,14 +72,24 @@ static const word64 L_sha3_arm2_neon_rt[] = {
|
||||||
|
|
||||||
#include <wolfssl/wolfcrypt/sha3.h>
|
#include <wolfssl/wolfcrypt/sha3.h>
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void BlockSha3(word64* state_p)
|
void BlockSha3(word64* state_p)
|
||||||
|
#else
|
||||||
|
void BlockSha3(word64* state)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register word64* state asm ("r0") = (word64*)state_p;
|
register word64* state asm ("r0") = (word64*)state_p;
|
||||||
register word64* L_sha3_arm2_neon_rt_c asm ("r1") =
|
register word64* L_sha3_arm2_neon_rt_c asm ("r1") =
|
||||||
(word64*)&L_sha3_arm2_neon_rt;
|
(word64*)&L_sha3_arm2_neon_rt;
|
||||||
|
#else
|
||||||
|
register word64* L_sha3_arm2_neon_rt_c = (word64*)&L_sha3_arm2_neon_rt;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #16\n\t"
|
"sub sp, sp, #16\n\t"
|
||||||
|
"mov r1, %[L_sha3_arm2_neon_rt]\n\t"
|
||||||
"mov r2, #24\n\t"
|
"mov r2, #24\n\t"
|
||||||
"mov r3, sp\n\t"
|
"mov r3, sp\n\t"
|
||||||
"vld1.8 {d0-d3}, [%[state]]!\n\t"
|
"vld1.8 {d0-d3}, [%[state]]!\n\t"
|
||||||
|
@ -361,13 +375,23 @@ static const word64 L_sha3_arm2_rt[] = {
|
||||||
|
|
||||||
#include <wolfssl/wolfcrypt/sha3.h>
|
#include <wolfssl/wolfcrypt/sha3.h>
|
||||||
|
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void BlockSha3(word64* state_p)
|
void BlockSha3(word64* state_p)
|
||||||
|
#else
|
||||||
|
void BlockSha3(word64* state)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register word64* state asm ("r0") = (word64*)state_p;
|
register word64* state asm ("r0") = (word64*)state_p;
|
||||||
register word64* L_sha3_arm2_rt_c asm ("r1") = (word64*)&L_sha3_arm2_rt;
|
register word64* L_sha3_arm2_rt_c asm ("r1") = (word64*)&L_sha3_arm2_rt;
|
||||||
|
#else
|
||||||
|
register word64* L_sha3_arm2_rt_c = (word64*)&L_sha3_arm2_rt;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0xcc\n\t"
|
"sub sp, sp, #0xcc\n\t"
|
||||||
|
"mov r1, %[L_sha3_arm2_rt]\n\t"
|
||||||
"mov r2, #12\n\t"
|
"mov r2, #12\n\t"
|
||||||
"\n"
|
"\n"
|
||||||
"L_sha3_arm32_begin_%=: \n\t"
|
"L_sha3_arm32_begin_%=: \n\t"
|
||||||
|
|
|
@ -44,11 +44,15 @@
|
||||||
#ifdef __IAR_SYSTEMS_ICC__
|
#ifdef __IAR_SYSTEMS_ICC__
|
||||||
#define __asm__ asm
|
#define __asm__ asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
#endif /* __IAR_SYSTEMS_ICC__ */
|
#endif /* __IAR_SYSTEMS_ICC__ */
|
||||||
#ifdef __KEIL__
|
#ifdef __KEIL__
|
||||||
#define __asm__ __asm
|
#define __asm__ __asm
|
||||||
#define __volatile__ volatile
|
#define __volatile__ volatile
|
||||||
#endif /* __KEIL__ */
|
#endif /* __KEIL__ */
|
||||||
|
#ifdef __ghs__
|
||||||
|
#define WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
#endif /* __ghs__ */
|
||||||
#if defined(WOLFSSL_SHA512) || defined(WOLFSSL_SHA384)
|
#if defined(WOLFSSL_SHA512) || defined(WOLFSSL_SHA384)
|
||||||
#include <wolfssl/wolfcrypt/sha512.h>
|
#include <wolfssl/wolfcrypt/sha512.h>
|
||||||
|
|
||||||
|
@ -98,16 +102,27 @@ static const word64 L_SHA512_transform_len_k[] = {
|
||||||
|
|
||||||
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p,
|
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p,
|
||||||
word32 len_p);
|
word32 len_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p, word32 len_p)
|
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p, word32 len_p)
|
||||||
|
#else
|
||||||
|
void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register wc_Sha512* sha512 asm ("r0") = (wc_Sha512*)sha512_p;
|
register wc_Sha512* sha512 asm ("r0") = (wc_Sha512*)sha512_p;
|
||||||
register const byte* data asm ("r1") = (const byte*)data_p;
|
register const byte* data asm ("r1") = (const byte*)data_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register word64* L_SHA512_transform_len_k_c asm ("r3") =
|
register word64* L_SHA512_transform_len_k_c asm ("r3") =
|
||||||
(word64*)&L_SHA512_transform_len_k;
|
(word64*)&L_SHA512_transform_len_k;
|
||||||
|
#else
|
||||||
|
register word64* L_SHA512_transform_len_k_c =
|
||||||
|
(word64*)&L_SHA512_transform_len_k;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"sub sp, sp, #0xc0\n\t"
|
"sub sp, sp, #0xc0\n\t"
|
||||||
|
"mov r3, %[L_SHA512_transform_len_k]\n\t"
|
||||||
/* Copy digest to add in at end */
|
/* Copy digest to add in at end */
|
||||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||||
"ldm r0, {r4, r5}\n\t"
|
"ldm r0, {r4, r5}\n\t"
|
||||||
|
@ -7576,15 +7591,26 @@ static const word64 L_SHA512_transform_neon_len_k[] = {
|
||||||
|
|
||||||
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p,
|
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p,
|
||||||
word32 len_p);
|
word32 len_p);
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p, word32 len_p)
|
void Transform_Sha512_Len(wc_Sha512* sha512_p, const byte* data_p, word32 len_p)
|
||||||
|
#else
|
||||||
|
void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
|
||||||
|
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
{
|
{
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
register wc_Sha512* sha512 asm ("r0") = (wc_Sha512*)sha512_p;
|
register wc_Sha512* sha512 asm ("r0") = (wc_Sha512*)sha512_p;
|
||||||
register const byte* data asm ("r1") = (const byte*)data_p;
|
register const byte* data asm ("r1") = (const byte*)data_p;
|
||||||
register word32 len asm ("r2") = (word32)len_p;
|
register word32 len asm ("r2") = (word32)len_p;
|
||||||
register word64* L_SHA512_transform_neon_len_k_c asm ("r3") =
|
register word64* L_SHA512_transform_neon_len_k_c asm ("r3") =
|
||||||
(word64*)&L_SHA512_transform_neon_len_k;
|
(word64*)&L_SHA512_transform_neon_len_k;
|
||||||
|
#else
|
||||||
|
register word64* L_SHA512_transform_neon_len_k_c =
|
||||||
|
(word64*)&L_SHA512_transform_neon_len_k;
|
||||||
|
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
"mov r3, %[L_SHA512_transform_neon_len_k]\n\t"
|
||||||
/* Load digest into working vars */
|
/* Load digest into working vars */
|
||||||
"vldm.64 %[sha512], {d0-d7}\n\t"
|
"vldm.64 %[sha512], {d0-d7}\n\t"
|
||||||
/* Start of loop processing a block */
|
/* Start of loop processing a block */
|
||||||
|
|
|
@ -2290,12 +2290,12 @@ L_AES_ECB_decrypt_end:
|
||||||
.type AES_CBC_decrypt, %function
|
.type AES_CBC_decrypt, %function
|
||||||
AES_CBC_decrypt:
|
AES_CBC_decrypt:
|
||||||
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
|
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
|
||||||
LDR r8, [sp, #36]
|
|
||||||
LDR r4, [sp, #40]
|
|
||||||
MOV lr, r0
|
MOV lr, r0
|
||||||
LDR r0, L_AES_Thumb2_td_ecb
|
LDR r0, L_AES_Thumb2_td_ecb
|
||||||
MOV r12, r2
|
MOV r12, r2
|
||||||
ADR r2, L_AES_Thumb2_td4
|
ADR r2, L_AES_Thumb2_td4
|
||||||
|
LDR r8, [sp, #36]
|
||||||
|
LDR r4, [sp, #40]
|
||||||
PUSH {r3, r4}
|
PUSH {r3, r4}
|
||||||
CMP r8, #0xa
|
CMP r8, #0xa
|
||||||
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
|
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
|
||||||
|
|
|
@ -214,6 +214,11 @@ void AES_invert_key(unsigned char* ks, word32 rounds)
|
||||||
register word32* L_AES_Thumb2_td_c __asm__ ("r3") =
|
register word32* L_AES_Thumb2_td_c __asm__ ("r3") =
|
||||||
(word32*)L_AES_Thumb2_td;
|
(word32*)L_AES_Thumb2_td;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_c = (word32*)L_AES_Thumb2_te;
|
||||||
|
|
||||||
|
register word32* L_AES_Thumb2_td_c = (word32*)L_AES_Thumb2_td;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -359,6 +364,11 @@ void AES_set_encrypt_key(const unsigned char* key, word32 len,
|
||||||
register word32* L_AES_Thumb2_rcon_c __asm__ ("r4") =
|
register word32* L_AES_Thumb2_rcon_c __asm__ ("r4") =
|
||||||
(word32*)&L_AES_Thumb2_rcon;
|
(word32*)&L_AES_Thumb2_rcon;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_c = (word32*)L_AES_Thumb2_te;
|
||||||
|
|
||||||
|
register word32* L_AES_Thumb2_rcon_c = (word32*)&L_AES_Thumb2_rcon;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -893,6 +903,9 @@ void AES_ECB_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r5") =
|
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r5") =
|
||||||
(word32*)L_AES_Thumb2_te_ecb;
|
(word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_ecb_c = (word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -901,7 +914,7 @@ void AES_ECB_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r12, r4\n\t"
|
"MOV r12, r4\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r12, [sp, #36]\n\t"
|
"MOV r12, %[nr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"PUSH {%[ks]}\n\t"
|
"PUSH {%[ks]}\n\t"
|
||||||
"CMP r12, #0xa\n\t"
|
"CMP r12, #0xa\n\t"
|
||||||
|
@ -1115,18 +1128,21 @@ void AES_CBC_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r6") =
|
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r6") =
|
||||||
(word32*)L_AES_Thumb2_te_ecb;
|
(word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_ecb_c = (word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r8, r4\n\t"
|
"MOV r8, r4\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r8, [sp, #36]\n\t"
|
"MOV r8, %[nr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r9, r5\n\t"
|
"MOV r9, r5\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r9, [sp, #40]\n\t"
|
"MOV r9, %[iv]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"MOV lr, %[in]\n\t"
|
"MOV lr, %[in]\n\t"
|
||||||
"MOV r0, %[L_AES_Thumb2_te_ecb]\n\t"
|
"MOV r0, %[L_AES_Thumb2_te_ecb]\n\t"
|
||||||
|
@ -1356,18 +1372,21 @@ void AES_CTR_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r6") =
|
register word32* L_AES_Thumb2_te_ecb_c __asm__ ("r6") =
|
||||||
(word32*)L_AES_Thumb2_te_ecb;
|
(word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_ecb_c = (word32*)L_AES_Thumb2_te_ecb;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r12, r4\n\t"
|
"MOV r12, r4\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r12, [sp, #36]\n\t"
|
"MOV r12, %[nr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r8, r5\n\t"
|
"MOV r8, r5\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r8, [sp, #40]\n\t"
|
"MOV r8, %[ctr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"MOV lr, %[in]\n\t"
|
"MOV lr, %[in]\n\t"
|
||||||
"MOV r0, %[L_AES_Thumb2_te_ecb]\n\t"
|
"MOV r0, %[L_AES_Thumb2_te_ecb]\n\t"
|
||||||
|
@ -1889,8 +1908,12 @@ void AES_ECB_decrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_td_ecb_c __asm__ ("r5") =
|
register word32* L_AES_Thumb2_td_ecb_c __asm__ ("r5") =
|
||||||
(word32*)L_AES_Thumb2_td_ecb;
|
(word32*)L_AES_Thumb2_td_ecb;
|
||||||
|
|
||||||
register byte* L_AES_Thumb2_td4_c __asm__ ("r6") =
|
register byte L_AES_Thumb2_td4_c __asm__ ("r6") = (byte)&L_AES_Thumb2_td4;
|
||||||
(byte*)&L_AES_Thumb2_td4;
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_td_ecb_c = (word32*)L_AES_Thumb2_td_ecb;
|
||||||
|
|
||||||
|
register byte L_AES_Thumb2_td4_c = (byte)&L_AES_Thumb2_td4;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
|
@ -1898,7 +1921,7 @@ void AES_ECB_decrypt(const unsigned char* in, unsigned char* out,
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r8, r4\n\t"
|
"MOV r8, r4\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r8, [sp, #36]\n\t"
|
"MOV r8, %[nr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"MOV lr, %[in]\n\t"
|
"MOV lr, %[in]\n\t"
|
||||||
"MOV r0, %[L_AES_Thumb2_td_ecb]\n\t"
|
"MOV r0, %[L_AES_Thumb2_td_ecb]\n\t"
|
||||||
|
@ -2111,26 +2134,30 @@ void AES_CBC_decrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_td_ecb_c __asm__ ("r6") =
|
register word32* L_AES_Thumb2_td_ecb_c __asm__ ("r6") =
|
||||||
(word32*)L_AES_Thumb2_td_ecb;
|
(word32*)L_AES_Thumb2_td_ecb;
|
||||||
|
|
||||||
register byte* L_AES_Thumb2_td4_c __asm__ ("r7") =
|
register byte L_AES_Thumb2_td4_c __asm__ ("r7") = (byte)&L_AES_Thumb2_td4;
|
||||||
(byte*)&L_AES_Thumb2_td4;
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_td_ecb_c = (word32*)L_AES_Thumb2_td_ecb;
|
||||||
|
|
||||||
|
register byte L_AES_Thumb2_td4_c = (byte)&L_AES_Thumb2_td4;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
|
||||||
"MOV r8, r4\n\t"
|
|
||||||
#else
|
|
||||||
"LDR r8, [sp, #36]\n\t"
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
|
||||||
"MOV r4, r5\n\t"
|
|
||||||
#else
|
|
||||||
"LDR r4, [sp, #40]\n\t"
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
|
||||||
"MOV lr, %[in]\n\t"
|
"MOV lr, %[in]\n\t"
|
||||||
"MOV r0, %[L_AES_Thumb2_td_ecb]\n\t"
|
"MOV r0, %[L_AES_Thumb2_td_ecb]\n\t"
|
||||||
"MOV r12, %[len]\n\t"
|
"MOV r12, %[len]\n\t"
|
||||||
"MOV r2, %[L_AES_Thumb2_td4]\n\t"
|
"MOV r2, %[L_AES_Thumb2_td4]\n\t"
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
"MOV r8, r4\n\t"
|
||||||
|
#else
|
||||||
|
"MOV r8, %[nr]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
|
"MOV r4, r5\n\t"
|
||||||
|
#else
|
||||||
|
"MOV r4, %[iv]\n\t"
|
||||||
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"PUSH {%[ks], r4}\n\t"
|
"PUSH {%[ks], r4}\n\t"
|
||||||
"CMP r8, #0xa\n\t"
|
"CMP r8, #0xa\n\t"
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
|
@ -2524,6 +2551,9 @@ void GCM_gmult_len(unsigned char* x, const unsigned char** m,
|
||||||
register word32* L_GCM_gmult_len_r_c __asm__ ("r4") =
|
register word32* L_GCM_gmult_len_r_c __asm__ ("r4") =
|
||||||
(word32*)&L_GCM_gmult_len_r;
|
(word32*)&L_GCM_gmult_len_r;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_GCM_gmult_len_r_c = (word32*)&L_GCM_gmult_len_r;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -3117,18 +3147,21 @@ void AES_GCM_encrypt(const unsigned char* in, unsigned char* out,
|
||||||
register word32* L_AES_Thumb2_te_gcm_c __asm__ ("r6") =
|
register word32* L_AES_Thumb2_te_gcm_c __asm__ ("r6") =
|
||||||
(word32*)L_AES_Thumb2_te_gcm;
|
(word32*)L_AES_Thumb2_te_gcm;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_AES_Thumb2_te_gcm_c = (word32*)L_AES_Thumb2_te_gcm;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r12, r4\n\t"
|
"MOV r12, r4\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r12, [sp, #36]\n\t"
|
"MOV r12, %[nr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
|
||||||
"MOV r8, r5\n\t"
|
"MOV r8, r5\n\t"
|
||||||
#else
|
#else
|
||||||
"LDR r8, [sp, #40]\n\t"
|
"MOV r8, %[ctr]\n\t"
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
"MOV lr, %[in]\n\t"
|
"MOV lr, %[in]\n\t"
|
||||||
"MOV r0, %[L_AES_Thumb2_te_gcm]\n\t"
|
"MOV r0, %[L_AES_Thumb2_te_gcm]\n\t"
|
||||||
|
|
|
@ -95,6 +95,10 @@ void wc_chacha_setkey(word32* x, const byte* key, word32 keySz)
|
||||||
register word32* L_chacha_thumb2_constants_c __asm__ ("r3") =
|
register word32* L_chacha_thumb2_constants_c __asm__ ("r3") =
|
||||||
(word32*)&L_chacha_thumb2_constants;
|
(word32*)&L_chacha_thumb2_constants;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_chacha_thumb2_constants_c =
|
||||||
|
(word32*)&L_chacha_thumb2_constants;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
|
@ -77,6 +77,10 @@ void mlkem_thumb2_ntt(sword16* r)
|
||||||
register word16* L_mlkem_thumb2_ntt_zetas_c __asm__ ("r1") =
|
register word16* L_mlkem_thumb2_ntt_zetas_c __asm__ ("r1") =
|
||||||
(word16*)&L_mlkem_thumb2_ntt_zetas;
|
(word16*)&L_mlkem_thumb2_ntt_zetas;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_thumb2_ntt_zetas_c =
|
||||||
|
(word16*)&L_mlkem_thumb2_ntt_zetas;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -1396,6 +1400,10 @@ void mlkem_thumb2_invntt(sword16* r)
|
||||||
register word16* L_mlkem_invntt_zetas_inv_c __asm__ ("r1") =
|
register word16* L_mlkem_invntt_zetas_inv_c __asm__ ("r1") =
|
||||||
(word16*)&L_mlkem_invntt_zetas_inv;
|
(word16*)&L_mlkem_invntt_zetas_inv;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_invntt_zetas_inv_c =
|
||||||
|
(word16*)&L_mlkem_invntt_zetas_inv;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -3085,6 +3093,10 @@ void mlkem_thumb2_basemul_mont(sword16* r, const sword16* a, const sword16* b)
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r3") =
|
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r3") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -3232,6 +3244,10 @@ void mlkem_thumb2_basemul_mont_add(sword16* r, const sword16* a,
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r3") =
|
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r3") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -3387,6 +3403,10 @@ void mlkem_thumb2_csubq(sword16* p)
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r1") =
|
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r1") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
@ -3501,6 +3521,10 @@ unsigned int mlkem_thumb2_rej_uniform(sword16* p, unsigned int len,
|
||||||
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r4") =
|
register word16* L_mlkem_basemul_mont_zetas_c __asm__ ("r4") =
|
||||||
(word16*)&L_mlkem_basemul_mont_zetas;
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word16* L_mlkem_basemul_mont_zetas_c =
|
||||||
|
(word16*)&L_mlkem_basemul_mont_zetas;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
|
@ -315,6 +315,10 @@ void poly1305_set_key(Poly1305* ctx, const byte* key)
|
||||||
register word32* L_poly1305_thumb2_clamp_c __asm__ ("r2") =
|
register word32* L_poly1305_thumb2_clamp_c __asm__ ("r2") =
|
||||||
(word32*)&L_poly1305_thumb2_clamp;
|
(word32*)&L_poly1305_thumb2_clamp;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_poly1305_thumb2_clamp_c =
|
||||||
|
(word32*)&L_poly1305_thumb2_clamp;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
|
@ -81,6 +81,10 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
|
||||||
register word32* L_SHA256_transform_len_k_c __asm__ ("r3") =
|
register word32* L_SHA256_transform_len_k_c __asm__ ("r3") =
|
||||||
(word32*)&L_SHA256_transform_len_k;
|
(word32*)&L_SHA256_transform_len_k;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word32* L_SHA256_transform_len_k_c =
|
||||||
|
(word32*)&L_SHA256_transform_len_k;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
|
@ -73,6 +73,9 @@ void BlockSha3(word64* state)
|
||||||
register word64* L_sha3_thumb2_rt_c __asm__ ("r1") =
|
register word64* L_sha3_thumb2_rt_c __asm__ ("r1") =
|
||||||
(word64*)&L_sha3_thumb2_rt;
|
(word64*)&L_sha3_thumb2_rt;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word64* L_sha3_thumb2_rt_c = (word64*)&L_sha3_thumb2_rt;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
|
@ -105,6 +105,10 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
|
||||||
register word64* L_SHA512_transform_len_k_c __asm__ ("r3") =
|
register word64* L_SHA512_transform_len_k_c __asm__ ("r3") =
|
||||||
(word64*)&L_SHA512_transform_len_k;
|
(word64*)&L_SHA512_transform_len_k;
|
||||||
|
|
||||||
|
#else
|
||||||
|
register word64* L_SHA512_transform_len_k_c =
|
||||||
|
(word64*)&L_SHA512_transform_len_k;
|
||||||
|
|
||||||
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
|
|
Loading…
Reference in New Issue