From 0a725f4d56de42cbe76ca47788291b02c9a88410 Mon Sep 17 00:00:00 2001 From: David Garske Date: Fri, 7 Dec 2018 16:40:41 -0800 Subject: [PATCH 1/2] Fixes for AES with STM32 crypto hardware: * Fixes to ensure the "const" input buffer is not modified for AES GCM calls with STM32 hardware crypto. * Improvements to allow AES GCM hardware acceleration for inputs that are not a multiple of AES block size. * Switched the wolfCrypt test for STM32_CRYPTO to use the standard AES GCM tests with 12-byte IV and less than 16-byte auth data. * Fixes for building with the standard peripheral library. * Fixes for building with `NO_AES_DECRYPT`. `./configure --enable-debug --disable-shared --enable-cryptonly CFLAGS="-DNO_AES_DECRYPT"` --- wolfcrypt/benchmark/benchmark.c | 19 ++- wolfcrypt/src/aes.c | 265 ++++++++++++++++++-------------- wolfcrypt/src/asn.c | 2 +- wolfcrypt/test/test.c | 22 +-- 4 files changed, 178 insertions(+), 130 deletions(-) diff --git a/wolfcrypt/benchmark/benchmark.c b/wolfcrypt/benchmark/benchmark.c index 630ea9ef9..cfc5b6e49 100644 --- a/wolfcrypt/benchmark/benchmark.c +++ b/wolfcrypt/benchmark/benchmark.c @@ -1868,7 +1868,9 @@ static void bench_aesgcm_internal(int doAsync, const byte* key, word32 keySz, { int ret = 0, i, count = 0, times, pending = 0; Aes enc[BENCH_MAX_PENDING]; +#ifdef HAVE_AES_DECRYPT Aes dec[BENCH_MAX_PENDING]; +#endif double start; DECLARE_VAR(bench_additional, byte, AES_AUTH_ADD_SZ, HEAP_HINT); @@ -1876,14 +1878,17 @@ static void bench_aesgcm_internal(int doAsync, const byte* key, word32 keySz, /* clear for done cleanup */ XMEMSET(enc, 0, sizeof(enc)); +#ifdef HAVE_AES_DECRYPT + XMEMSET(dec, 0, sizeof(dec)); +#endif #ifdef WOLFSSL_ASYNC_CRYPT if (bench_additional) #endif - { XMEMSET(bench_additional, 0, AES_AUTH_ADD_SZ); } + XMEMSET(bench_additional, 0, AES_AUTH_ADD_SZ); #ifdef WOLFSSL_ASYNC_CRYPT if (bench_tag) #endif - { XMEMSET(bench_tag, 0, AES_AUTH_TAG_SZ); } + XMEMSET(bench_tag, 0, AES_AUTH_TAG_SZ); /* init keys */ for (i = 0; i < BENCH_MAX_PENDING; i++) { @@ -1962,10 +1967,6 @@ exit_aes_gcm: } while (bench_stats_sym_check(start)); exit_aes_gcm_dec: bench_stats_sym_finish(decLabel, doAsync, count, bench_size, start, ret); - - for (i = 0; i < BENCH_MAX_PENDING; i++) { - wc_AesFree(&dec[i]); - } #endif /* HAVE_AES_DECRYPT */ (void)decLabel; @@ -1975,7 +1976,11 @@ exit: if (ret < 0) { printf("bench_aesgcm failed: %d\n", ret); } - +#ifdef HAVE_AES_DECRYPT + for (i = 0; i < BENCH_MAX_PENDING; i++) { + wc_AesFree(&dec[i]); + } +#endif for (i = 0; i < BENCH_MAX_PENDING; i++) { wc_AesFree(&enc[i]); } diff --git a/wolfcrypt/src/aes.c b/wolfcrypt/src/aes.c index 8e1d9677e..9972ad0e1 100644 --- a/wolfcrypt/src/aes.c +++ b/wolfcrypt/src/aes.c @@ -421,7 +421,7 @@ /* enable crypto processor */ CRYP_Cmd(ENABLE); - /* wait until decrypt key has been intialized */ + /* wait until decrypt key has been initialized */ while (CRYP_GetFlagStatus(CRYP_FLAG_BUSY) != RESET) {} /* set direction and mode */ @@ -2407,6 +2407,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv) #else /* STD_PERI_LIB */ int wc_AesCbcEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) { + int ret; word32 *iv; word32 blocks = (sz / AES_BLOCK_SIZE); CRYP_InitTypeDef cryptInit; @@ -2469,12 +2470,13 @@ int wc_AesSetIV(Aes* aes, const byte* iv) /* disable crypto processor */ CRYP_Cmd(DISABLE); - return 0; + return ret; } #ifdef HAVE_AES_DECRYPT int wc_AesCbcDecrypt(Aes* aes, byte* out, const byte* in, word32 sz) { + int ret; word32 *iv; word32 blocks = (sz / AES_BLOCK_SIZE); CRYP_InitTypeDef cryptInit; @@ -2548,7 +2550,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv) /* disable crypto processor */ CRYP_Cmd(DISABLE); - return 0; + return ret; } #endif /* HAVE_AES_DECRYPT */ #endif /* WOLFSSL_STM32_CUBEMX */ @@ -3036,7 +3038,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv) hcryp.Init.ChainingMode = CRYP_CHAINMODE_AES_CTR; hcryp.Init.KeyWriteFlag = CRYP_KEY_WRITE_ENABLE; #endif - hcryp.Init.pInitVect = (byte*)aes->reg; + hcryp.Init.pInitVect = (uint8_t*)aes->reg; HAL_CRYP_Init(&hcryp); #ifdef STM32_CRYPTO_AES_ONLY @@ -8177,32 +8179,60 @@ static WC_INLINE int wc_AesGcmEncrypt_STM32(Aes* aes, byte* out, const byte* in, const byte* authIn, word32 authInSz) { int ret; - word32 keySize; - byte initialCounter[AES_BLOCK_SIZE]; #ifdef WOLFSSL_STM32_CUBEMX CRYP_HandleTypeDef hcryp; #else - byte keyCopy[AES_BLOCK_SIZE * 2]; + word32 keyCopy[AES_256_KEY_SIZE/sizeof(word32)]; #endif + word32 keySize; int status = 0; + int outPadSz, authPadSz; + word32 tag[AES_BLOCK_SIZE/sizeof(word32)]; + word32 initialCounter[AES_BLOCK_SIZE/sizeof(word32)]; + byte* outPadded = NULL; byte* authInPadded = NULL; - byte tag[AES_BLOCK_SIZE]; - int authPadSz; ret = wc_AesGetKeySize(aes, &keySize); if (ret != 0) return ret; - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); - XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = STM32_GCM_IV_START; +#ifdef WOLFSSL_STM32_CUBEMX + ret = wc_Stm32_Aes_Init(aes, &hcryp); + if (ret != 0) + return ret; +#endif + + XMEMSET(initialCounter, 0, sizeof(initialCounter)); + XMEMCPY(initialCounter, iv, ivSz); + *((byte*)initialCounter + (AES_BLOCK_SIZE - 1)) = STM32_GCM_IV_START; + + /* Need to pad the AAD and input cipher text to a full block size since + * CRYP_AES_GCM will assume these are a multiple of AES_BLOCK_SIZE. + * It is okay to pad with zeros because GCM does this before GHASH already. + * See NIST SP 800-38D */ + if ((sz % AES_BLOCK_SIZE) != 0 || sz == 0) { + outPadSz = ((sz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; + outPadded = (byte*)XMALLOC(outPadSz, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + if (outPadded == NULL) { + return MEMORY_E; + } + XMEMSET(outPadded, 0, outPadSz); + } + else { + outPadSz = sz; + outPadded = out; + } + XMEMCPY(outPadded, in, sz); - /* pad authIn if it is not a block multiple */ if ((authInSz % AES_BLOCK_SIZE) != 0) { - authPadSz = ((authInSz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; /* Need to pad the AAD to a full block with zeros. */ - authInPadded = XMALLOC(authPadSz, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + authPadSz = ((authInSz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; + authInPadded = (byte*)XMALLOC(authPadSz, aes->heap, + DYNAMIC_TYPE_TMP_BUFFER); if (authInPadded == NULL) { + if (outPadded != out) { + XFREE(outPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + } return MEMORY_E; } XMEMSET(authInPadded, 0, authPadSz); @@ -8214,11 +8244,7 @@ static WC_INLINE int wc_AesGcmEncrypt_STM32(Aes* aes, byte* out, const byte* in, #ifdef WOLFSSL_STM32_CUBEMX - ret = wc_Stm32_Aes_Init(aes, &hcryp); - if (ret != 0) - return ret; - - hcryp.Init.pInitVect = initialCounter; + hcryp.Init.pInitVect = (uint8_t*)initialCounter; hcryp.Init.Header = authInPadded; hcryp.Init.HeaderSize = authInSz; @@ -8238,22 +8264,25 @@ static WC_INLINE int wc_AesGcmEncrypt_STM32(Aes* aes, byte* out, const byte* in, if (status == HAL_OK) { /* GCM payload phase */ hcryp.Init.GCMCMACPhase = CRYP_PAYLOAD_PHASE; - status = HAL_CRYPEx_AES_Auth(&hcryp, (byte*)in, sz, out, STM32_HAL_TIMEOUT); + status = HAL_CRYPEx_AES_Auth(&hcryp, outPadded, sz, outPadded, + STM32_HAL_TIMEOUT); if (status == HAL_OK) { /* GCM final phase */ hcryp.Init.GCMCMACPhase = CRYP_FINAL_PHASE; - status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, sz, tag, STM32_HAL_TIMEOUT); + status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, sz, (byte*)tag, + STM32_HAL_TIMEOUT); } } } #else HAL_CRYP_Init(&hcryp); - status = HAL_CRYPEx_AESGCM_Encrypt(&hcryp, (byte*)in, sz, - out, STM32_HAL_TIMEOUT); + status = HAL_CRYPEx_AESGCM_Encrypt(&hcryp, outPadded, sz, + outPadded, STM32_HAL_TIMEOUT); /* Compute the authTag */ if (status == HAL_OK) { - status = HAL_CRYPEx_AESGCM_Finish(&hcryp, sz, tag, STM32_HAL_TIMEOUT); + status = HAL_CRYPEx_AESGCM_Finish(&hcryp, sz, (byte*)tag, + STM32_HAL_TIMEOUT); } #endif @@ -8262,22 +8291,31 @@ static WC_INLINE int wc_AesGcmEncrypt_STM32(Aes* aes, byte* out, const byte* in, HAL_CRYP_DeInit(&hcryp); #else /* STD_PERI_LIB */ - ByteReverseWords((word32*)keyCopy, (word32*)aes->key, keySize); + ByteReverseWords(keyCopy, (word32*)aes->key, keySize); status = CRYP_AES_GCM(MODE_ENCRYPT, (uint8_t*)initialCounter, (uint8_t*)keyCopy, keySize * 8, - (uint8_t*)in, sz, + (uint8_t*)outPadded, sz, (uint8_t*)authInPadded,authInSz, - (uint8_t*)out, tag); + (uint8_t*)outPadded, (byte*)tag); if (status != SUCCESS) ret = AES_GCM_AUTH_E; #endif /* WOLFSSL_STM32_CUBEMX */ - /* authTag may be shorter than AES_BLOCK_SZ, store separately */ - if (ret == 0) + if (ret == 0) { + /* return authTag */ XMEMCPY(authTag, tag, authTagSz); - /* We only allocate extra memory if authInPadded is not a multiple of AES_BLOCK_SZ */ - if (authInPadded != NULL && authInSz != authPadSz) { + /* return output if allocated padded used */ + if (outPadded != out) { + XMEMCPY(out, outPadded, sz); + } + } + + /* Free memory if not a multiple of AES_BLOCK_SZ */ + if (outPadded != out) { + XFREE(outPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + } + if (authInPadded != authIn) { XFREE(authInPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); } @@ -8321,7 +8359,7 @@ int AES_GCM_encrypt_C(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef WOLFSSL_PIC32MZ_CRYPT if (blocks) { - /* use intitial IV for PIC32 HW, but don't use it below */ + /* use initial IV for PIC32 HW, but don't use it below */ XMEMCPY(aes->reg, ctr, AES_BLOCK_SIZE); ret = wc_Pic32AesCrypt( @@ -8409,20 +8447,7 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, defined(WOLFSSL_STM32L4)) /* additional argument checks - STM32 HW only supports 12 byte IV */ - if (ivSz != GCM_NONCE_MID_SZ) { - return BAD_FUNC_ARG; - } - - /* STM32 HW AES-GCM requires / assumes inputs are a multiple of block size. - * We can avoid this by zero padding (authIn) AAD, but zero-padded plaintext - * will be encrypted and output incorrectly, causing a bad authTag. - * We will use HW accelerated AES-GCM if plain%AES_BLOCK_SZ==0. - * Otherwise, we will use accelerated AES_CTR for encrypt, and then - * perform GHASH in software. - * See NIST SP 800-38D */ - - /* Plain text is a multiple of block size, so use HW-Accelerated AES_GCM */ - if (sz % AES_BLOCK_SIZE == 0) { + if (ivSz == GCM_NONCE_MID_SZ) { return wc_AesGcmEncrypt_STM32(aes, out, in, sz, iv, ivSz, authTag, authTagSz, authIn, authInSz); } @@ -8544,43 +8569,57 @@ static WC_INLINE int wc_AesGcmDecrypt_STM32(Aes* aes, byte* out, #ifdef WOLFSSL_STM32_CUBEMX CRYP_HandleTypeDef hcryp; #else - byte keyCopy[AES_BLOCK_SIZE * 2]; + word32 keyCopy[AES_256_KEY_SIZE/sizeof(word32)]; #endif - int status; - int inPadSz, authPadSz; - byte tag[AES_BLOCK_SIZE]; - byte *inPadded = NULL; - byte *authInPadded = NULL; - byte initialCounter[AES_BLOCK_SIZE]; + word32 keySize; + int status; + int outPadSz, authPadSz; + word32 tag[AES_BLOCK_SIZE/sizeof(word32)]; + word32 initialCounter[AES_BLOCK_SIZE/sizeof(word32)]; + byte* outPadded = NULL; + byte* authInPadded = NULL; - XMEMSET(initialCounter, 0, AES_BLOCK_SIZE); + ret = wc_AesGetKeySize(aes, &keySize); + if (ret != 0) + return ret; + +#ifdef WOLFSSL_STM32_CUBEMX + ret = wc_Stm32_Aes_Init(aes, &hcryp); + if (ret != 0) + return ret; +#endif + + XMEMSET(initialCounter, 0, sizeof(initialCounter)); XMEMCPY(initialCounter, iv, ivSz); - initialCounter[AES_BLOCK_SIZE - 1] = STM32_GCM_IV_START; + *((byte*)initialCounter + (AES_BLOCK_SIZE - 1)) = STM32_GCM_IV_START; /* Need to pad the AAD and input cipher text to a full block size since * CRYP_AES_GCM will assume these are a multiple of AES_BLOCK_SIZE. * It is okay to pad with zeros because GCM does this before GHASH already. * See NIST SP 800-38D */ - - if ((sz % AES_BLOCK_SIZE) > 0) { - inPadSz = ((sz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; - inPadded = XMALLOC(inPadSz, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); - if (inPadded == NULL) { + if ((sz % AES_BLOCK_SIZE) != 0 || sz == 0) { + outPadSz = ((sz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; + outPadded = (byte*)XMALLOC(outPadSz, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + if (outPadded == NULL) { return MEMORY_E; } - XMEMSET(inPadded, 0, inPadSz); - XMEMCPY(inPadded, in, sz); - } else { - inPadSz = sz; - inPadded = (byte*)in; + XMEMSET(outPadded, 0, outPadSz); } + else { + outPadSz = sz; + outPadded = out; + } + XMEMCPY(outPadded, in, sz); - if ((authInSz % AES_BLOCK_SIZE) > 0) { + if ((authInSz % AES_BLOCK_SIZE) != 0) { + /* Need to pad the AAD to a full block with zeros. */ authPadSz = ((authInSz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; - authInPadded = XMALLOC(authPadSz, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + authInPadded = (byte*)XMALLOC(authPadSz, aes->heap, + DYNAMIC_TYPE_TMP_BUFFER); if (authInPadded == NULL) { - if (inPadded != NULL && inPadSz != sz) - XFREE(inPadded , aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + if (outPadded != out) { + XFREE(outPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + } return MEMORY_E; } XMEMSET(authInPadded, 0, authPadSz); @@ -8591,11 +8630,7 @@ static WC_INLINE int wc_AesGcmDecrypt_STM32(Aes* aes, byte* out, } #ifdef WOLFSSL_STM32_CUBEMX - ret = wc_Stm32_Aes_Init(aes, &hcryp); - if (ret != 0) - return ret; - - hcryp.Init.pInitVect = initialCounter; + hcryp.Init.pInitVect = (uint8_t*)initialCounter; hcryp.Init.Header = authInPadded; hcryp.Init.HeaderSize = authInSz; @@ -8610,30 +8645,31 @@ static WC_INLINE int wc_AesGcmDecrypt_STM32(Aes* aes, byte* out, status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, 0, NULL, STM32_HAL_TIMEOUT); if (status == HAL_OK) { /* GCM header phase */ - hcryp.Init.GCMCMACPhase = CRYP_HEADER_PHASE; + hcryp.Init.GCMCMACPhase = CRYP_HEADER_PHASE; status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, 0, NULL, STM32_HAL_TIMEOUT); if (status == HAL_OK) { /* GCM payload phase */ - hcryp.Init.GCMCMACPhase = CRYP_PAYLOAD_PHASE; - status = HAL_CRYPEx_AES_Auth(&hcryp, (byte*)inPadded, sz, inPadded, + hcryp.Init.GCMCMACPhase = CRYP_PAYLOAD_PHASE; + status = HAL_CRYPEx_AES_Auth(&hcryp, outPadded, sz, outPadded, STM32_HAL_TIMEOUT); if (status == HAL_OK) { /* GCM final phase */ - hcryp.Init.GCMCMACPhase = CRYP_FINAL_PHASE; - status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, sz, tag, + hcryp.Init.GCMCMACPhase = CRYP_FINAL_PHASE; + status = HAL_CRYPEx_AES_Auth(&hcryp, NULL, sz, (byte*)tag, STM32_HAL_TIMEOUT); } } } #else HAL_CRYP_Init(&hcryp); - /* Use inPadded for output buffer instead of - * out so that we don't overflow our size. */ - status = HAL_CRYPEx_AESGCM_Decrypt(&hcryp, (byte*)inPadded, - sz, inPadded, STM32_HAL_TIMEOUT); + /* Use outPadded for output buffer instead of out so that we don't overflow + * our size. */ + status = HAL_CRYPEx_AESGCM_Decrypt(&hcryp, outPadded, sz, outPadded, + STM32_HAL_TIMEOUT); /* Compute the authTag */ if (status == HAL_OK) { - status = HAL_CRYPEx_AESGCM_Finish(&hcryp, sz, tag, STM32_HAL_TIMEOUT); + status = HAL_CRYPEx_AESGCM_Finish(&hcryp, sz, (byte*)tag, + STM32_HAL_TIMEOUT); } #endif @@ -8643,34 +8679,39 @@ static WC_INLINE int wc_AesGcmDecrypt_STM32(Aes* aes, byte* out, HAL_CRYP_DeInit(&hcryp); #else /* STD_PERI_LIB */ - ByteReverseWords((word32*)keyCopy, (word32*)aes->key, aes->keylen); + ByteReverseWords(keyCopy, (word32*)aes->key, aes->keylen); /* Input size and auth size need to be the actual sizes, even though * they are not block aligned, because this length (in bits) is used - * in the final GHASH. Use inPadded for output buffer instead of + * in the final GHASH. Use outPadded for output buffer instead of * out so that we don't overflow our size. */ status = CRYP_AES_GCM(MODE_DECRYPT, (uint8_t*)initialCounter, (uint8_t*)keyCopy, keySize * 8, - (uint8_t*)inPadded, sz, + (uint8_t*)outPadded, sz, (uint8_t*)authInPadded,authInSz, - (uint8_t*)inPadded, tag); + (uint8_t*)outPadded, (byte*)tag); if (status != SUCCESS) ret = AES_GCM_AUTH_E; #endif /* WOLFSSL_STM32_CUBEMX */ - if (ConstantCompare(authTag, tag, authTagSz) != 0) { + if (ConstantCompare(authTag, (byte*)tag, authTagSz) != 0) { ret = AES_GCM_AUTH_E; } + if (ret == 0) { - /* Only return the decrypted data if authTag success. */ - XMEMCPY(out, inPadded, sz); + /* return output if allocated padded used */ + if (outPadded != out) { + XMEMCPY(out, outPadded, sz); + } } - /* only allocate padding buffers if the inputs are not a multiple of block sz */ - if (inPadded != NULL && inPadSz != sz) - XFREE(inPadded , aes->heap, DYNAMIC_TYPE_TMP_BUFFER); - if (authInPadded != NULL && authPadSz != authInSz) + /* Free memory if not a multiple of AES_BLOCK_SZ */ + if (outPadded != out) { + XFREE(outPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + } + if (authInPadded != authIn) { XFREE(authInPadded, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); + } return ret; } @@ -8723,7 +8764,7 @@ int AES_GCM_decrypt_C(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef WOLFSSL_PIC32MZ_CRYPT if (blocks) { - /* use intitial IV for PIC32 HW, but don't use it below */ + /* use initial IV for PIC32 HW, but don't use it below */ XMEMCPY(aes->reg, ctr, AES_BLOCK_SIZE); ret = wc_Pic32AesCrypt( @@ -8808,20 +8849,7 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, defined(WOLFSSL_STM32L4)) /* additional argument checks - STM32 HW only supports 12 byte IV */ - if (ivSz != GCM_NONCE_MID_SZ) { - return BAD_FUNC_ARG; - } - - /* STM32 HW AES-GCM requires / assumes inputs are a multiple of block size. - * We can avoid this by zero padding (authIn) AAD, but zero-padded plaintext - * will be encrypted and output incorrectly, causing a bad authTag. - * We will use HW accelerated AES-GCM if plain%AES_BLOCK_SZ==0. - * Otherwise, we will use accelerated AES_CTR for encrypt, and then - * perform GHASH in software. - * See NIST SP 800-38D */ - - /* Plain text is a multiple of block size, so use HW-Accelerated AES_GCM */ - if (sz % AES_BLOCK_SIZE == 0) { + if (ivSz == GCM_NONCE_MID_SZ) { return wc_AesGcmDecrypt_STM32(aes, out, in, sz, iv, ivSz, authTag, authTagSz, authIn, authInSz); } @@ -9039,8 +9067,9 @@ int wc_GmacVerify(const byte* key, word32 keySz, const byte* authIn, word32 authInSz, const byte* authTag, word32 authTagSz) { - Aes aes; int ret; +#ifndef NO_AES_DECRYPT + Aes aes; if (key == NULL || iv == NULL || (authIn == NULL && authInSz != 0) || authTag == NULL || authTagSz == 0 || authTagSz > AES_BLOCK_SIZE) { @@ -9057,7 +9086,17 @@ int wc_GmacVerify(const byte* key, word32 keySz, wc_AesFree(&aes); } ForceZero(&aes, sizeof(aes)); - +#else + (void)key; + (void)keySz; + (void)iv; + (void)ivSz; + (void)authIn; + (void)authInSz; + (void)authTag; + (void)authTagSz; + ret = NOT_COMPILED_IN; +#endif return ret; } diff --git a/wolfcrypt/src/asn.c b/wolfcrypt/src/asn.c index 11e8f6df6..b11890d9e 100644 --- a/wolfcrypt/src/asn.c +++ b/wolfcrypt/src/asn.c @@ -6133,7 +6133,7 @@ static int ConfirmSignature(SignatureCtx* sigCtx, #ifdef WOLFSSL_ASYNC_CRYPT if (sigCtx->devId != INVALID_DEVID && sigCtx->asyncDev && sigCtx->asyncCtx) { - /* make sure event is intialized */ + /* make sure event is initialized */ WOLF_EVENT* event = &sigCtx->asyncDev->event; ret = wolfAsync_EventInit(event, WOLF_EVENT_TYPE_ASYNC_WOLFSSL, sigCtx->asyncCtx, WC_ASYNC_FLAG_CALL_AGAIN); diff --git a/wolfcrypt/test/test.c b/wolfcrypt/test/test.c index f86ce1fe5..71127909a 100644 --- a/wolfcrypt/test/test.c +++ b/wolfcrypt/test/test.c @@ -769,7 +769,7 @@ initDefaultName(); printf( "AES256 test passed!\n"); #endif #ifdef HAVE_AESGCM - #if !defined(WOLFSSL_AFALG) && !defined(WOLFSSL_DEVCRYPTO) + #if !defined(WOLFSSL_AFALG) && !defined(WOLFSSL_DEVCRYPTO) && !defined(STM32_CRYPTO) if ( (ret = aesgcm_test()) != 0) return err_sys("AES-GCM test failed!\n", ret); else @@ -5934,13 +5934,14 @@ int aes_test(void) if (wc_AesInit(&enc, HEAP_HINT, devId) != 0) return -5400; +#if defined(HAVE_AES_DECRYPT) || defined(WOLFSSL_AES_COUNTER) if (wc_AesInit(&dec, HEAP_HINT, devId) != 0) return -5401; - +#endif ret = wc_AesSetKey(&enc, key, AES_BLOCK_SIZE, iv, AES_ENCRYPTION); if (ret != 0) return -5402; -#ifdef HAVE_AES_DECRYPT +#if defined(HAVE_AES_DECRYPT) || defined(WOLFSSL_AES_COUNTER) ret = wc_AesSetKey(&dec, key, AES_BLOCK_SIZE, iv, AES_DECRYPTION); if (ret != 0) return -5403; @@ -7254,9 +7255,9 @@ int aesgcm_test(void) randIV, sizeof(randIV), resultT, sizeof(resultT), a, sizeof(a)); -#if defined(WOLFSSL_ASYNC_CRYPT) + #if defined(WOLFSSL_ASYNC_CRYPT) result = wc_AsyncWait(result, &enc.asyncDev, WC_ASYNC_FLAG_NONE); -#endif + #endif if (result != 0) return -8209; @@ -7270,22 +7271,25 @@ int aesgcm_test(void) return -8210; } +#ifdef HAVE_AES_DECRYPT result = wc_AesGcmDecrypt(&enc, resultP, resultC, sizeof(resultC), randIV, sizeof(randIV), resultT, sizeof(resultT), a, sizeof(a)); -#if defined(WOLFSSL_ASYNC_CRYPT) + #if defined(WOLFSSL_ASYNC_CRYPT) result = wc_AsyncWait(result, &enc.asyncDev, WC_ASYNC_FLAG_NONE); -#endif + #endif if (result != 0) return -8211; if (XMEMCMP(p, resultP, sizeof(resultP))) return -8212; +#endif /* HAVE_AES_DECRYPT */ + wc_FreeRng(&rng); } -#endif /* WC_NO_RNG HAVE_SELFTEST */ -#endif +#endif /* WOLFSSL_AES_256 && !(WC_NO_RNG || HAVE_SELFTEST) */ +#endif /* HAVE_FIPS_VERSION >= 2 */ wc_AesFree(&enc); From 6552455968d5397c0e955a0a8bba99e15c05d4ab Mon Sep 17 00:00:00 2001 From: David Garske Date: Mon, 10 Dec 2018 11:40:06 -0800 Subject: [PATCH 2/2] Minor improvements to the STM32 CubeMX AES-GCM logic. --- wolfcrypt/src/aes.c | 12 ++++++------ wolfcrypt/test/test.c | 7 ++++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/wolfcrypt/src/aes.c b/wolfcrypt/src/aes.c index 9972ad0e1..76140a721 100644 --- a/wolfcrypt/src/aes.c +++ b/wolfcrypt/src/aes.c @@ -8224,7 +8224,7 @@ static WC_INLINE int wc_AesGcmEncrypt_STM32(Aes* aes, byte* out, const byte* in, } XMEMCPY(outPadded, in, sz); - if ((authInSz % AES_BLOCK_SIZE) != 0) { + if (authInSz == 0 || (authInSz % AES_BLOCK_SIZE) != 0) { /* Need to pad the AAD to a full block with zeros. */ authPadSz = ((authInSz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; authInPadded = (byte*)XMALLOC(authPadSz, aes->heap, @@ -8446,8 +8446,8 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, defined(WOLFSSL_STM32F7) || \ defined(WOLFSSL_STM32L4)) - /* additional argument checks - STM32 HW only supports 12 byte IV */ - if (ivSz == GCM_NONCE_MID_SZ) { + /* STM32 HW only supports 12 byte IV and 16 byte auth */ + if (ivSz == GCM_NONCE_MID_SZ && authInSz == AES_BLOCK_SIZE) { return wc_AesGcmEncrypt_STM32(aes, out, in, sz, iv, ivSz, authTag, authTagSz, authIn, authInSz); } @@ -8611,7 +8611,7 @@ static WC_INLINE int wc_AesGcmDecrypt_STM32(Aes* aes, byte* out, } XMEMCPY(outPadded, in, sz); - if ((authInSz % AES_BLOCK_SIZE) != 0) { + if (authInSz == 0 || (authInSz % AES_BLOCK_SIZE) != 0) { /* Need to pad the AAD to a full block with zeros. */ authPadSz = ((authInSz / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; authInPadded = (byte*)XMALLOC(authPadSz, aes->heap, @@ -8848,8 +8848,8 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, defined(WOLFSSL_STM32F7) || \ defined(WOLFSSL_STM32L4)) - /* additional argument checks - STM32 HW only supports 12 byte IV */ - if (ivSz == GCM_NONCE_MID_SZ) { + /* STM32 HW only supports 12 byte IV and 16 byte auth */ + if (ivSz == GCM_NONCE_MID_SZ && authInSz == AES_BLOCK_SIZE) { return wc_AesGcmDecrypt_STM32(aes, out, in, sz, iv, ivSz, authTag, authTagSz, authIn, authInSz); } diff --git a/wolfcrypt/test/test.c b/wolfcrypt/test/test.c index 71127909a..617a85823 100644 --- a/wolfcrypt/test/test.c +++ b/wolfcrypt/test/test.c @@ -769,7 +769,8 @@ initDefaultName(); printf( "AES256 test passed!\n"); #endif #ifdef HAVE_AESGCM - #if !defined(WOLFSSL_AFALG) && !defined(WOLFSSL_DEVCRYPTO) && !defined(STM32_CRYPTO) + #if !defined(WOLFSSL_AFALG) && !defined(WOLFSSL_DEVCRYPTO) && \ + !defined(STM32_CRYPTO) if ( (ret = aesgcm_test()) != 0) return err_sys("AES-GCM test failed!\n", ret); else @@ -6630,8 +6631,8 @@ static int aesgcm_default_test_helper(byte* key, int keySz, byte* iv, int ivSz, byte* plain, int plainSz, byte* cipher, int cipherSz, byte* aad, int aadSz, byte* tag, int tagSz) { -Aes enc; -Aes dec; + Aes enc; + Aes dec; byte resultT[AES_BLOCK_SIZE]; byte resultP[AES_BLOCK_SIZE * 3];