
Move to calling the abstraction which allows for hardware acceleration.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com --- common/image-fit.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/common/image-fit.c b/common/image-fit.c index 8c15ed1..01ea864 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1082,19 +1082,25 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp) int calculate_hash(const void *data, int data_len, const char *algo, uint8_t *value, int *value_len) { + struct hash_algo *hash_algo; + int ret; + + ret = hash_lookup_algo(algo, &hash_algo); + if (ret) + return ret; + if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) { - *((uint32_t *)value) = crc32_wd(0, data, data_len, - CHUNKSZ_CRC32); - *((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value)); - *value_len = 4; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) { - sha1_csum_wd((unsigned char *)data, data_len, - (unsigned char *)value, CHUNKSZ_SHA1); - *value_len = 20; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) { - sha256_csum_wd((unsigned char *)data, data_len, - (unsigned char *)value, CHUNKSZ_SHA256); - *value_len = SHA256_SUM_LEN; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) { md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5); *value_len = 16;