[U-Boot] [PATCH 1/2] fit: allow fit to call hardware accelerated hash

Move to calling the abstraction which allows for hardware acceleration.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com --- common/image-fit.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/common/image-fit.c b/common/image-fit.c index 8c15ed1..01ea864 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1082,19 +1082,25 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp) int calculate_hash(const void *data, int data_len, const char *algo, uint8_t *value, int *value_len) { + struct hash_algo *hash_algo; + int ret; + + ret = hash_lookup_algo(algo, &hash_algo); + if (ret) + return ret; + if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) { - *((uint32_t *)value) = crc32_wd(0, data, data_len, - CHUNKSZ_CRC32); - *((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value)); - *value_len = 4; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) { - sha1_csum_wd((unsigned char *)data, data_len, - (unsigned char *)value, CHUNKSZ_SHA1); - *value_len = 20; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) { - sha256_csum_wd((unsigned char *)data, data_len, - (unsigned char *)value, CHUNKSZ_SHA256); - *value_len = SHA256_SUM_LEN; + hash_algo->hash_func_ws((unsigned char *)data, data_len, + (unsigned char *)value, hash_algo->chunk_size); + *value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) { md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5); *value_len = 16;

We can use the hardware hash block to reduce space, particularly useful for verifying FIT signatures from SPL.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com --- drivers/crypto/Kconfig | 5 + drivers/crypto/Makefile | 1 + drivers/crypto/atmel_sha.c | 289 +++++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/atmel_sha.h | 52 ++++++++ lib/Makefile | 2 + 5 files changed, 349 insertions(+) create mode 100644 drivers/crypto/atmel_sha.c create mode 100644 drivers/crypto/atmel_sha.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1ea116b..7a20edb 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -2,4 +2,9 @@ menu "Hardware crypto devices"
source drivers/crypto/fsl/Kconfig
+config ATMEL_SHA + bool "Atmel SHA Driver support" + help + Enables the Atmel SHA accelerator. + endmenu diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index efbd1d3..07af449 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -4,5 +4,6 @@ # http://www.samsung.com
obj-$(CONFIG_EXYNOS_ACE_SHA) += ace_sha.o +obj-$(CONFIG_ATMEL_SHA) += atmel_sha.o obj-y += rsa_mod_exp/ obj-y += fsl/ diff --git a/drivers/crypto/atmel_sha.c b/drivers/crypto/atmel_sha.c new file mode 100644 index 0000000..ef969eb --- /dev/null +++ b/drivers/crypto/atmel_sha.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Atmel SHA engine + * Copyright (c) 2018 Laird + */ + +#include <common.h> +#include <malloc.h> +#include "atmel_sha.h" + +#ifdef CONFIG_SHA_HW_ACCEL +#include <u-boot/sha256.h> +#include <u-boot/sha1.h> +#include <hw_sha.h> + +#include <asm/io.h> +#include <asm/arch/clk.h> +#include <asm/arch/at91_pmc.h> + +enum atmel_hash_algos { + ATMEL_HASH_SHA1, + ATMEL_HASH_SHA256 +}; + +struct sha_ctx { + enum atmel_hash_algos algo; + u32 length; + u8 buffer[64]; +}; + +const u8 sha256_der_prefix[SHA256_DER_LEN] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, + 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, + 0x00, 0x04, 0x20 +}; + +const u8 sha1_der_prefix[SHA1_DER_LEN] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, + 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 +}; + +static enum atmel_hash_algos get_hash_type(struct hash_algo *algo) +{ + if (!strcmp(algo->name, "sha1")) + return ATMEL_HASH_SHA1; + else + return ATMEL_HASH_SHA256; +}; + +static int atmel_sha_process(const u8 *in_addr, u8 buflen) +{ + struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA; + int i; + u32 *addr_buf; + + /* Copy data in */ + addr_buf = (u32 *)in_addr; + for (i = 0; i < (buflen / 4); i++) + sha->idatarx[i] = addr_buf[i]; + debug("Atmel sha, engine is loaded\n"); + + /* Wait for hash to complete */ + while ((readl(&sha->isr) & ATMEL_HASH_ISR_MASK) + != ATMEL_HASH_ISR_DATRDY) + ; + debug("Atmel sha, engine signaled completion\n"); + + return 0; +} + +static int atmel_sha_chunk(struct sha_ctx *ctx, const u8 *buf, unsigned int size) +{ + u8 remaining, fill; + + /* Chunk to 64 byte blocks */ + remaining = ctx->length & 0x3F; + fill = 64 - remaining; + + /* If we have things in the buffer transfer the remaining into it */ + if (remaining && size >= fill) { + memcpy(ctx->buffer + remaining, buf, fill); + + /* Process 64 byte chunk */ + atmel_sha_process(ctx->buffer, 64); + + size -= fill; + buf += fill; + ctx->length += fill; + remaining = 0; + } + + /* We are aligned take from source for any additional */ + while (size >= 64) { + /* Process 64 byte chunk */ + atmel_sha_process(buf, 64); + + size -= 64; + buf += 64; + ctx->length += 64; + } + + if (size) { + memcpy(ctx->buffer + remaining, buf, size); + ctx->length += size; + } + + return 0; +} + +static int atmel_sha_fill_padding(struct sha_ctx *ctx) +{ + unsigned int index, padlen; + u64 size, bits; + u8 sha256_padding[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + size = ctx->length; + + bits = cpu_to_be64(size << 3); + + /* 64 byte, 512 bit block size */ + index = ctx->length & 0x3F; + padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); + + /* set last entry to be 0x80 then 0's*/ + atmel_sha_chunk(ctx, sha256_padding, padlen); + /* Bolt number of bits to the end */ + atmel_sha_chunk(ctx, (u8 *)&bits, 8); + + if (ctx->length & 0x3F) + debug("ERROR, Remainder after PADDING"); + + return 0; +} + +/** + * Computes hash value of input pbuf using h/w acceleration + * + * @param in_addr A pointer to the input buffer + * @param buflen Byte length of input buffer + * @param out_addr A pointer to the output buffer. When complete + * 32 bytes are copied to pout[0]...pout[31]. Thus, a user + * should allocate at least 32 bytes at pOut in advance. + * @param chunk_size chunk size for sha256 + */ +void hw_sha256(const uchar *in_addr, uint buflen, + uchar *out_addr, uint chunk_size) +{ + struct hash_algo *algo; + struct sha_ctx *ctx; + + hash_lookup_algo("sha256", &algo); + hw_sha_init(algo, (void *)&ctx); + atmel_sha_chunk((void *)ctx, in_addr, buflen); + atmel_sha_fill_padding(ctx); + hw_sha_finish(algo, (void *)ctx, out_addr, buflen); +} + +/** + * Computes hash value of input pbuf using h/w acceleration + * + * @param in_addr A pointer to the input buffer + * @param buflen Byte length of input buffer + * @param out_addr A pointer to the output buffer. When complete + * 32 bytes are copied to pout[0]...pout[31]. Thus, a user + * should allocate at least 32 bytes at pOut in advance. + * @param chunk_size chunk_size for sha1 + */ +void hw_sha1(const uchar *in_addr, uint buflen, + uchar *out_addr, uint chunk_size) +{ + struct hash_algo *algo; + struct sha_ctx *ctx; + + hash_lookup_algo("sha1", &algo); + hw_sha_init(algo, (void *)&ctx); + atmel_sha_chunk((void *)ctx, in_addr, buflen); + atmel_sha_fill_padding(ctx); + hw_sha_finish(algo, (void *)ctx, out_addr, buflen); +} + +/* + * Create the context for sha progressive hashing using h/w acceleration + * + * @algo: Pointer to the hash_algo struct + * @ctxp: Pointer to the pointer of the context for hashing + * @return 0 if ok, -ve on error + */ +int hw_sha_init(struct hash_algo *algo, void **ctxp) +{ + struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA; + struct sha_ctx *ctx; + u32 reg; + + ctx = malloc(sizeof(struct sha_ctx)); + if (!ctx) { + debug("Failed to allocate context\n"); + return -ENOMEM; + } + *ctxp = ctx; + + ctx->algo = get_hash_type(algo); + ctx->length = 0; + + debug("Atmel sha init\n"); + at91_periph_clk_enable(ATMEL_ID_SHA); + + /* Reset the SHA engine */ + writel(ATMEL_HASH_CR_SWRST, &sha->cr); + + /* Set AUTO mode and fastest operation */ + reg = ATMEL_HASH_MR_SMOD_AUTO | ATMEL_HASH_MR_PROCDLY_SHORT; + if (ctx->algo == ATMEL_HASH_SHA1) + reg |= ATMEL_HASH_MR_ALGO_SHA1; + else + reg |= ATMEL_HASH_MR_ALGO_SHA256; + writel(reg, &sha->mr); + + /* Set ready to receive first */ + writel(ATMEL_HASH_CR_FIRST, &sha->cr); + + /* Ready to roll */ + return 0; +} + +/* + * Update buffer for sha progressive hashing using h/w acceleration + * + * The context is freed by this function if an error occurs. + * + * @algo: Pointer to the hash_algo struct + * @ctx: Pointer to the context for hashing + * @buf: Pointer to the buffer being hashed + * @size: Size of the buffer being hashed + * @is_last: 1 if this is the last update; 0 otherwise + * @return 0 if ok, -ve on error + */ +int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf, + unsigned int size, int is_last) +{ + struct sha_ctx *sha_ctx = ctx; + + debug("Atmel sha update: %d bytes\n", size); + + /* Send down in chunks */ + atmel_sha_chunk(sha_ctx, buf, size); + + if (is_last) + atmel_sha_fill_padding(sha_ctx); + + return 0; +} + +/* + * Copy sha hash result at destination location + * + * The context is freed after completion of hash operation or after an error. + * + * @algo: Pointer to the hash_algo struct + * @ctx: Pointer to the context for hashing + * @dest_buf: Pointer to the destination buffer where hash is to be copied + * @size: Size of the buffer being hashed + * @return 0 if ok, -ve on error + */ +int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf, + int size) +{ + struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA; + struct sha_ctx *sha_ctx = ctx; + unsigned int len, i; + u32 *addr_buf; + + /* Copy data back */ + len = (sha_ctx->algo == ATMEL_HASH_SHA1) ? + SHA1_SUM_LEN : SHA256_SUM_LEN; + addr_buf = (u32 *)dest_buf; + for (i = 0; i < (len / 4); i++) + addr_buf[i] = sha->iodatarx[i]; + + free(ctx); + + return 0; +} + +#endif /* CONFIG_SHA_HW_ACCEL */ diff --git a/drivers/crypto/atmel_sha.h b/drivers/crypto/atmel_sha.h new file mode 100644 index 0000000..68ed988 --- /dev/null +++ b/drivers/crypto/atmel_sha.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Header file for Atmel SHA engine - SFR definitions + * + * Copyright (c) 2018 Laird + */ + +#ifndef __DRIVERS_ATMEL_SHA_H__ +#define __DRIVERS_ATMEL_SHA_H__ + +/* SHA register footprint */ + +struct atmel_sha { + u32 cr; + u32 mr; + u32 reserved0[2]; + u32 ier; + u32 idr; + u32 imr; + u32 isr; + u32 reserved1[8]; + u32 idatarx[16]; + u32 iodatarx[16]; + u32 reserved2[16]; +}; + +/* CR */ +#define ATMEL_HASH_CR_MASK (0xffff << 0) +#define ATMEL_HASH_CR_START (1 << 0) +#define ATMEL_HASH_CR_FIRST (1 << 4) +#define ATMEL_HASH_CR_SWRST (1 << 8) + +/* MR */ +#define ATMEL_HASH_MR_MASK (0xffff << 0) +#define ATMEL_HASH_MR_SMOD_MANUAL (0 << 0) +#define ATMEL_HASH_MR_SMOD_AUTO (1 << 0) +#define ATMEL_HASH_MR_SMOD_IDATAR0 (2 << 0) +#define ATMEL_HASH_MR_PROCDLY_SHORT (0 << 4) +#define ATMEL_HASH_MR_PROCDLY_LONG (1 << 4) +#define ATMEL_HASH_MR_ALGO_SHA1 (0 << 8) +#define ATMEL_HASH_MR_ALGO_SHA256 (1 << 8) +#define ATMEL_HASH_MR_ALGO_SHA384 (2 << 8) +#define ATMEL_HASH_MR_ALGO_SHA512 (3 << 8) +#define ATMEL_HASH_MR_ALGO_SHA224 (4 << 8) +#define ATMEL_HASH_MR_DUALBUFF_INACTIVE (0 << 16) +#define ATMEL_HASH_MR_DUALBUFF_ACTIVE (1 << 16) + +/* ISR */ +#define ATMEL_HASH_ISR_MASK (1 << 0) +#define ATMEL_HASH_ISR_DATRDY (1 << 0) + +#endif /* __DRIVERS_ATMEL_SHA_H__ */ diff --git a/lib/Makefile b/lib/Makefile index d531ea5..ff0ad2f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -46,8 +46,10 @@ obj-y += list_sort.o endif
obj-$(CONFIG_RSA) += rsa/ +ifneq ($(CONFIG_SHA_PROG_HW_ACCEL),y) obj-$(CONFIG_SHA1) += sha1.o obj-$(CONFIG_SHA256) += sha256.o +endif
obj-$(CONFIG_$(SPL_)ZLIB) += zlib/ obj-$(CONFIG_$(SPL_)GZIP) += gunzip.o

On Thu, May 24, 2018 at 02:43:24PM +0100, Ben Whitten wrote:
Move to calling the abstraction which allows for hardware acceleration.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com
common/image-fit.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/common/image-fit.c b/common/image-fit.c index 8c15ed1..01ea864 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1082,19 +1082,25 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp) int calculate_hash(const void *data, int data_len, const char *algo, uint8_t *value, int *value_len) {
- struct hash_algo *hash_algo;
- int ret;
- ret = hash_lookup_algo(algo, &hash_algo);
- if (ret)
return ret;
- if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) {
*((uint32_t *)value) = crc32_wd(0, data, data_len,
CHUNKSZ_CRC32);
*((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value));
*value_len = 4;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
} else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) {*value_len = hash_algo->digest_size;
sha1_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA1);
*value_len = 20;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
} else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) {*value_len = hash_algo->digest_size;
sha256_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA256);
*value_len = SHA256_SUM_LEN;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
} else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) { md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5); *value_len = 16;*value_len = hash_algo->digest_size;
I think we can vastly simplify this function to just, roughly: + struct hash_algo *hash_algo; + int ret; + + ret = hash_lookup_algo(algo, &hash_algo); + if (ret) + return ret; + ret = hash_algo->hash_func_ws((unsigned char *)data, data_len, (unsigned char *)value, hash_algo->chunk_size); if (!ret) *value_len = hash_algo->digest_size;
return ret;
But I didn't confirm that md5 will be covered, but I assume it is.

On 25 May 2018 at 12:46, Tom Rini trini@konsulko.com wrote:
On Thu, May 24, 2018 at 02:43:24PM +0100, Ben Whitten wrote:
Move to calling the abstraction which allows for hardware acceleration.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com
common/image-fit.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/common/image-fit.c b/common/image-fit.c index 8c15ed1..01ea864 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1082,19 +1082,25 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp) int calculate_hash(const void *data, int data_len, const char *algo, uint8_t *value, int *value_len) {
struct hash_algo *hash_algo;
int ret;
ret = hash_lookup_algo(algo, &hash_algo);
if (ret)
return ret;
if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) {
*((uint32_t *)value) = crc32_wd(0, data, data_len,
CHUNKSZ_CRC32);
*((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value));
*value_len = 4;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) {
sha1_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA1);
*value_len = 20;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) {
sha256_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA256);
*value_len = SHA256_SUM_LEN;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) { md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5); *value_len = 16;
I think we can vastly simplify this function to just, roughly:
struct hash_algo *hash_algo;
int ret;
ret = hash_lookup_algo(algo, &hash_algo);
if (ret)
return ret;
ret = hash_algo->hash_func_ws((unsigned char *)data, data_len, (unsigned char *)value, hash_algo->chunk_size); if (!ret) *value_len = hash_algo->digest_size; return ret;
But I didn't confirm that md5 will be covered, but I assume it is.
It's not currently but I can add that and resend. Do you need a resend of the series or this individually.
Thanks, Ben

On Wed, Jun 06, 2018 at 11:23:05AM +0100, Ben Whitten wrote:
On 25 May 2018 at 12:46, Tom Rini trini@konsulko.com wrote:
On Thu, May 24, 2018 at 02:43:24PM +0100, Ben Whitten wrote:
Move to calling the abstraction which allows for hardware acceleration.
Signed-off-by: Ben Whitten ben.whitten@lairdtech.com
common/image-fit.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/common/image-fit.c b/common/image-fit.c index 8c15ed1..01ea864 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1082,19 +1082,25 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp) int calculate_hash(const void *data, int data_len, const char *algo, uint8_t *value, int *value_len) {
struct hash_algo *hash_algo;
int ret;
ret = hash_lookup_algo(algo, &hash_algo);
if (ret)
return ret;
if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) {
*((uint32_t *)value) = crc32_wd(0, data, data_len,
CHUNKSZ_CRC32);
*((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value));
*value_len = 4;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) {
sha1_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA1);
*value_len = 20;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) {
sha256_csum_wd((unsigned char *)data, data_len,
(unsigned char *)value, CHUNKSZ_SHA256);
*value_len = SHA256_SUM_LEN;
hash_algo->hash_func_ws((unsigned char *)data, data_len,
(unsigned char *)value, hash_algo->chunk_size);
*value_len = hash_algo->digest_size; } else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) { md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5); *value_len = 16;
I think we can vastly simplify this function to just, roughly:
struct hash_algo *hash_algo;
int ret;
ret = hash_lookup_algo(algo, &hash_algo);
if (ret)
return ret;
ret = hash_algo->hash_func_ws((unsigned char *)data, data_len, (unsigned char *)value, hash_algo->chunk_size); if (!ret) *value_len = hash_algo->digest_size; return ret;
But I didn't confirm that md5 will be covered, but I assume it is.
It's not currently but I can add that and resend. Do you need a resend of the series or this individually.
Please re-work the series, thanks!
participants (2)
-
Ben Whitten
-
Tom Rini