diff --git a/src/ssl.c b/src/ssl.c index 5db36244d..76333b423 100644 --- a/src/ssl.c +++ b/src/ssl.c @@ -11703,37 +11703,37 @@ int wolfSSL_EVP_MD_type(const WOLFSSL_EVP_MD *md) if (XSTRNCMP(type, "SHA256", 6) == 0) { ctx->macType = SHA256; - wolfSSL_SHA256_Init((SHA256_CTX*)&ctx->hash); + wolfSSL_SHA256_Init(&(ctx->hash.sha256)); } #ifdef WOLFSSL_SHA224 else if (XSTRNCMP(type, "SHA224", 6) == 0) { ctx->macType = SHA224; - wolfSSL_SHA224_Init((SHA224_CTX*)&ctx->hash); + wolfSSL_SHA224_Init(&(ctx->hash.sha224)); } #endif #ifdef WOLFSSL_SHA384 else if (XSTRNCMP(type, "SHA384", 6) == 0) { ctx->macType = SHA384; - wolfSSL_SHA384_Init((SHA384_CTX*)&ctx->hash); + wolfSSL_SHA384_Init(&(ctx->hash.sha384)); } #endif #ifdef WOLFSSL_SHA512 else if (XSTRNCMP(type, "SHA512", 6) == 0) { ctx->macType = SHA512; - wolfSSL_SHA512_Init((SHA512_CTX*)&ctx->hash); + wolfSSL_SHA512_Init(&(ctx->hash.sha512)); } #endif #ifndef NO_MD5 else if (XSTRNCMP(type, "MD5", 3) == 0) { ctx->macType = MD5; - wolfSSL_MD5_Init((MD5_CTX*)&ctx->hash); + wolfSSL_MD5_Init(&(ctx->hash.md5)); } #endif #ifndef NO_SHA /* has to be last since would pick or 224, 256, 384, or 512 too */ else if (XSTRNCMP(type, "SHA", 3) == 0) { ctx->macType = SHA; - wolfSSL_SHA_Init((SHA_CTX*)&ctx->hash); + wolfSSL_SHA_Init(&(ctx->hash.sha)); } #endif /* NO_SHA */ else diff --git a/wolfssl/openssl/sha.h b/wolfssl/openssl/sha.h index a881a0bd0..632862089 100644 --- a/wolfssl/openssl/sha.h +++ b/wolfssl/openssl/sha.h @@ -46,8 +46,11 @@ typedef WOLFSSL_SHA_CTX SHA_CTX; #ifdef WOLFSSL_SHA224 +/* Using ALIGN16 because when AES-NI is enabled digest and buffer in Sha256 + * struct are 16 byte aligned. Any derefrence to those elements after casting to + * Sha224, is expected to also be 16 byte aligned addresses. */ typedef struct WOLFSSL_SHA224_CTX { - long long holder[28]; /* big enough, but check on init */ + ALIGN16 long long holder[28]; /* big enough, but check on init */ } WOLFSSL_SHA224_CTX; WOLFSSL_API void wolfSSL_SHA224_Init(WOLFSSL_SHA224_CTX*); @@ -69,8 +72,11 @@ typedef WOLFSSL_SHA224_CTX SHA224_CTX; #endif /* WOLFSSL_SHA224 */ +/* Using ALIGN16 because when AES-NI is enabled digest and buffer in Sha256 + * struct are 16 byte aligned. Any derefrence to those elements after casting to + * Sha256, is expected to also be 16 byte aligned addresses. */ typedef struct WOLFSSL_SHA256_CTX { - int holder[28]; /* big enough to hold wolfcrypt sha, but check on init */ + ALIGN16 int holder[28]; /* big enough to hold wolfcrypt sha, but check on init */ } WOLFSSL_SHA256_CTX; WOLFSSL_API void wolfSSL_SHA256_Init(WOLFSSL_SHA256_CTX*); diff --git a/wolfssl/wolfcrypt/sha256.h b/wolfssl/wolfcrypt/sha256.h index 790d87c94..997b0c1e1 100644 --- a/wolfssl/wolfcrypt/sha256.h +++ b/wolfssl/wolfcrypt/sha256.h @@ -63,11 +63,12 @@ typedef struct Sha256 { #ifdef FREESCALE_LTC_SHA ltc_hash_ctx_t ctx; #else + /* alignment on digest and buffer speeds up ARMv8 crypto operations */ + ALIGN16 word32 digest[SHA256_DIGEST_SIZE / sizeof(word32)]; + ALIGN16 word32 buffer[SHA256_BLOCK_SIZE / sizeof(word32)]; word32 buffLen; /* in bytes */ word32 loLen; /* length in bytes */ word32 hiLen; /* length in bytes */ - ALIGN16 word32 digest[SHA256_DIGEST_SIZE / sizeof(word32)]; - ALIGN16 word32 buffer[SHA256_BLOCK_SIZE / sizeof(word32)]; #ifdef WOLFSSL_PIC32MZ_HASH pic32mz_desc desc ; /* Crypt Engine descriptor */ #endif