
Add TPM2 functions to support boot measurement. This includes starting up the TPM, initializing/appending the event log, and measuring the U-Boot version.
Signed-off-by: Eddie James eajames@linux.ibm.com --- include/efi_tcg2.h | 44 --- include/tpm-v2.h | 135 +++++++++ lib/tpm-v2.c | 700 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 835 insertions(+), 44 deletions(-)
diff --git a/include/efi_tcg2.h b/include/efi_tcg2.h index 874306dc11..23016773f4 100644 --- a/include/efi_tcg2.h +++ b/include/efi_tcg2.h @@ -129,50 +129,6 @@ struct efi_tcg2_boot_service_capability { #define BOOT_SERVICE_CAPABILITY_MIN \ offsetof(struct efi_tcg2_boot_service_capability, number_of_pcr_banks)
-#define TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03 "Spec ID Event03" -#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2 2 -#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 0 -#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2 2 - -/** - * struct TCG_EfiSpecIdEventAlgorithmSize - hashing algorithm information - * - * @algorithm_id: algorithm defined in enum tpm2_algorithms - * @digest_size: size of the algorithm - */ -struct tcg_efi_spec_id_event_algorithm_size { - u16 algorithm_id; - u16 digest_size; -} __packed; - -/** - * struct TCG_EfiSpecIDEventStruct - content of the event log header - * - * @signature: signature, set to Spec ID Event03 - * @platform_class: class defined in TCG ACPI Specification - * Client Common Header. - * @spec_version_minor: minor version - * @spec_version_major: major version - * @spec_version_errata: major version - * @uintn_size: size of the efi_uintn_t fields used in various - * data structures used in this specification. - * 0x01 indicates u32 and 0x02 indicates u64 - * @number_of_algorithms: hashing algorithms used in this event log - * @digest_sizes: array of number_of_algorithms pairs - * 1st member defines the algorithm id - * 2nd member defines the algorithm size - */ -struct tcg_efi_spec_id_event { - u8 signature[16]; - u32 platform_class; - u8 spec_version_minor; - u8 spec_version_major; - u8 spec_errata; - u8 uintn_size; - u32 number_of_algorithms; - struct tcg_efi_spec_id_event_algorithm_size digest_sizes[]; -} __packed; - /** * struct tdEFI_TCG2_FINAL_EVENTS_TABLE - log entries after Get Event Log * @version: version number for this structure diff --git a/include/tpm-v2.h b/include/tpm-v2.h index 85feda3e06..62a245e9d9 100644 --- a/include/tpm-v2.h +++ b/include/tpm-v2.h @@ -214,6 +214,50 @@ struct tcg_pcr_event2 { u8 event[]; } __packed;
+/** + * struct TCG_EfiSpecIdEventAlgorithmSize - hashing algorithm information + * + * @algorithm_id: algorithm defined in enum tpm2_algorithms + * @digest_size: size of the algorithm + */ +struct tcg_efi_spec_id_event_algorithm_size { + u16 algorithm_id; + u16 digest_size; +} __packed; + +#define TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03 "Spec ID Event03" +#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2 2 +#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 0 +#define TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2 2 + +/** + * struct TCG_EfiSpecIDEventStruct - content of the event log header + * + * @signature: signature, set to Spec ID Event03 + * @platform_class: class defined in TCG ACPI Specification + * Client Common Header. + * @spec_version_minor: minor version + * @spec_version_major: major version + * @spec_version_errata: major version + * @uintn_size: size of the efi_uintn_t fields used in various + * data structures used in this specification. + * 0x01 indicates u32 and 0x02 indicates u64 + * @number_of_algorithms: hashing algorithms used in this event log + * @digest_sizes: array of number_of_algorithms pairs + * 1st member defines the algorithm id + * 2nd member defines the algorithm size + */ +struct tcg_efi_spec_id_event { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintn_size; + u32 number_of_algorithms; + struct tcg_efi_spec_id_event_algorithm_size digest_sizes[]; +} __packed; + /** * TPM2 Structure Tags for command/response buffers. * @@ -340,6 +384,24 @@ enum tpm2_algorithms { TPM2_ALG_SM3_256 = 0x12, };
+static inline u16 tpm2_algorithm_to_len(enum tpm2_algorithms a) +{ + switch (a) { + case TPM2_ALG_SHA1: + return TPM2_SHA1_DIGEST_SIZE; + case TPM2_ALG_SHA256: + return TPM2_SHA256_DIGEST_SIZE; + case TPM2_ALG_SHA384: + return TPM2_SHA384_DIGEST_SIZE; + case TPM2_ALG_SHA512: + return TPM2_SHA512_DIGEST_SIZE; + default: + return 0; + } +} + +#define tpm2_algorithm_to_mask(a) (1 << (a)) + /* NV index attributes */ enum tpm_index_attrs { TPMA_NV_PPWRITE = 1UL << 0, @@ -419,6 +481,66 @@ enum { HR_NV_INDEX = TPM_HT_NV_INDEX << HR_SHIFT, };
+/** + * Measure data into the TPM PCRs and the platform event log. + * + * @pcr_index Index of the PCR + * @size Size of the data + * @data Pointer to the data + * @event_type Event log type + * @event_size Size of the event + * @event Pointer to the event + * + * Return: zero on success, negative errno otherwise + */ +int tcg2_measure_data(u32 pcr_index, u32 size, const u8 *data, u32 event_type, + u32 event_size, const u8 *event); + +/** + * Measure an event into the platform event log. + * + * @pcr_index Index of the PCR + * @event_type Event log type + * @size Size of the event + * @event Pointer to the event + * + * Return: zero on success, negative errno otherwise + */ +int tcg2_measure_event(u32 pcr_index, u32 event_type, u32 size, + const u8 *event); + +/** + * Begin measurements. + * + * Return: zero on success, negative errno otherwise + */ +int tcg2_measurement_init(void); + +/** + * Stop measurements and record separator events. + */ +void tcg2_measurement_term(void); + +/** + * Get the platform event log address and size. + * + * @dev TPM device + * @addr Address of the log + * @size Size of the log + * + * Return: zero on success, negative errno otherwise + */ +int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size); + +/** + * Get the first TPM2 device found. + * + * @dev TPM device + * + * Return: zero on success, negative errno otherwise + */ +int tcg2_platform_get_tpm2(struct udevice **dev); + /** * Issue a TPM2_Startup command. * @@ -538,6 +660,19 @@ u32 tpm2_pcr_read(struct udevice *dev, u32 idx, unsigned int idx_min_sz, u32 tpm2_get_capability(struct udevice *dev, u32 capability, u32 property, void *buf, size_t prop_count);
+/** + * tpm2_get_pcr_info() - get the supported, active PCRs and number of banks + * + * @dev: TPM device + * @supported_pcr: bitmask with the algorithms supported + * @active_pcr: bitmask with the active algorithms + * @pcr_banks: number of PCR banks + * + * @return 0 on success, code of operation or negative errno on failure + */ +int tpm2_get_pcr_info(struct udevice *dev, u32 *supported_pcr, u32 *active_pcr, + u32 *pcr_banks); + /** * Issue a TPM2_DictionaryAttackLockReset command. * diff --git a/lib/tpm-v2.c b/lib/tpm-v2.c index 697b982e07..68291841e5 100644 --- a/lib/tpm-v2.c +++ b/lib/tpm-v2.c @@ -4,13 +4,589 @@ * Author: Miquel Raynal miquel.raynal@bootlin.com */
+#include <asm/io.h> #include <common.h> #include <dm.h> +#include <dm/of_access.h> #include <tpm-common.h> #include <tpm-v2.h> #include <linux/bitops.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> +#include <linux/unaligned/le_byteshift.h> +#include <u-boot/sha1.h> +#include <u-boot/sha256.h> +#include <u-boot/sha512.h> +#include <version_string.h> #include "tpm-utils.h"
+static struct tcg2_measurement_context { + struct udevice *dev; + u8 *log; + u32 log_position; + u32 log_size; +} tcg2mctx; + +static const enum tpm2_algorithms tcg2algos[] = { + TPM2_ALG_SHA1, + TPM2_ALG_SHA256, + TPM2_ALG_SHA384, + TPM2_ALG_SHA512, +}; + +static int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks) +{ + u32 supported = 0; + u32 pcr_banks = 0; + u32 active = 0; + int rc; + + rc = tpm2_get_pcr_info(dev, &supported, &active, &pcr_banks); + if (rc) + return rc; + + *active_pcr_banks = active; + + return 0; +} + +static u32 tcg2_event_get_size(struct tpml_digest_values *digest_list) +{ + u32 len; + size_t i; + + len = offsetof(struct tcg_pcr_event2, digests); + len += offsetof(struct tpml_digest_values, digests); + for (i = 0; i < digest_list->count; ++i) { + u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg); + + if (!l) + continue; + + len += l + offsetof(struct tpmt_ha, digest); + } + len += sizeof(u32); + + return len; +} + +static int tcg2_create_digest(const u8 *input, u32 length, + struct tpml_digest_values *digest_list) +{ + u8 final[sizeof(union tpmu_ha)]; + sha256_context ctx_256; + sha512_context ctx_512; + sha1_context ctx; + u32 active; + size_t i; + u32 len; + int rc; + + rc = tcg2_get_active_pcr_banks(tcg2mctx.dev, &active); + if (rc) + return rc; + + digest_list->count = 0; + for (i = 0; i < ARRAY_SIZE(tcg2algos); ++i) { + u32 mask = tpm2_algorithm_to_mask(tcg2algos[i]); + + if (!(active & mask)) + continue; + + switch (tcg2algos[i]) { + case TPM2_ALG_SHA1: + sha1_starts(&ctx); + sha1_update(&ctx, input, length); + sha1_finish(&ctx, final); + len = TPM2_SHA1_DIGEST_SIZE; + break; + case TPM2_ALG_SHA256: + sha256_starts(&ctx_256); + sha256_update(&ctx_256, input, length); + sha256_finish(&ctx_256, final); + len = TPM2_SHA256_DIGEST_SIZE; + break; + case TPM2_ALG_SHA384: + sha384_starts(&ctx_512); + sha384_update(&ctx_512, input, length); + sha384_finish(&ctx_512, final); + len = TPM2_SHA384_DIGEST_SIZE; + break; + case TPM2_ALG_SHA512: + sha512_starts(&ctx_512); + sha512_update(&ctx_512, input, length); + sha512_finish(&ctx_512, final); + len = TPM2_SHA512_DIGEST_SIZE; + break; + default: + printf("%s: unsupported algorithm %x\n", __func__, + tcg2algos[i]); + return -EINVAL; + } + + digest_list->digests[digest_list->count].hash_alg = + tcg2algos[i]; + memcpy(&digest_list->digests[digest_list->count].digest, final, + len); + digest_list->count++; + } + + return 0; +} + +static int tcg2_log_append(u32 pcr_index, u32 event_type, + struct tpml_digest_values *digest_list, u32 size, + const u8 *event) +{ + u32 event_size; + size_t len; + size_t pos; + u8 *log; + u32 i; + + event_size = size + tcg2_event_get_size(digest_list); + if (tcg2mctx.log_position + event_size > tcg2mctx.log_size) { + printf("%s: log too large: %u + %u > %u\n", __func__, + tcg2mctx.log_position, event_size, tcg2mctx.log_size); + return -ENOBUFS; + } + + log = tcg2mctx.log + tcg2mctx.log_position; + + pos = offsetof(struct tcg_pcr_event2, pcr_index); + put_unaligned_le32(pcr_index, log); + pos = offsetof(struct tcg_pcr_event2, event_type); + put_unaligned_le32(event_type, log + pos); + pos = offsetof(struct tcg_pcr_event2, digests) + + offsetof(struct tpml_digest_values, count); + put_unaligned_le32(digest_list->count, log + pos); + + pos = offsetof(struct tcg_pcr_event2, digests) + + offsetof(struct tpml_digest_values, digests); + for (i = 0; i < digest_list->count; ++i) { + u16 hash_alg = digest_list->digests[i].hash_alg; + + len = tpm2_algorithm_to_len(hash_alg); + if (!len) + continue; + + pos += offsetof(struct tpmt_ha, hash_alg); + put_unaligned_le16(hash_alg, log + pos); + pos += offsetof(struct tpmt_ha, digest); + memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len); + pos += len; + } + + put_unaligned_le32(size, log + pos); + pos += sizeof(u32); + memcpy(log + pos, event, size); + pos += size; + + /* + * Ensure the calculated buffer is what we checked against + * This check should never fail. It checks the code above is + * calculating the right length for the event we are adding + */ + if (pos != event_size) + printf("%s: size miscalculation: %u != %u\n", __func__, pos, + event_size); + else + tcg2mctx.log_position += pos; + + return 0; +} + +static int tcg2_log_init(void) +{ + struct tcg_efi_spec_id_event *ev; + struct tcg_pcr_event *log; + u32 event_size; + u32 count = 0; + u32 log_size; + u32 active; + u32 mask; + size_t i; + u16 len; + int rc; + + rc = tcg2_get_active_pcr_banks(tcg2mctx.dev, &active); + if (rc) + return rc; + + event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes); + for (i = 0; i < ARRAY_SIZE(tcg2algos); ++i) { + mask = tpm2_algorithm_to_mask(tcg2algos[i]); + + if (!(active & mask)) + continue; + + switch (tcg2algos[i]) { + case TPM2_ALG_SHA1: + case TPM2_ALG_SHA256: + case TPM2_ALG_SHA384: + case TPM2_ALG_SHA512: + count++; + break; + default: + continue; + } + } + + event_size += 1 + + (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count); + log_size = offsetof(struct tcg_pcr_event, event) + event_size; + + if (log_size > tcg2mctx.log_size) { + printf("%s: log too large: %u > %u\n", __func__, log_size, + tcg2mctx.log_size); + return -ENOBUFS; + } + + log = (struct tcg_pcr_event *)tcg2mctx.log; + put_unaligned_le32(0, &log->pcr_index); + put_unaligned_le32(EV_NO_ACTION, &log->event_type); + memset(&log->digest, 0, sizeof(log->digest)); + put_unaligned_le32(event_size, &log->event_size); + + ev = (struct tcg_efi_spec_id_event *)log->event; + strncpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03, + sizeof(ev->signature)); + put_unaligned_le32(0, &ev->platform_class); + ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2; + ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2; + ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2; + ev->uintn_size = sizeof(size_t) / sizeof(u32); + put_unaligned_le32(count, &ev->number_of_algorithms); + + count = 0; + for (i = 0; i < ARRAY_SIZE(tcg2algos); ++i) { + mask = tpm2_algorithm_to_mask(tcg2algos[i]); + + if (!(active & mask)) + continue; + + len = tpm2_algorithm_to_len(tcg2algos[i]); + if (!len) + continue; + + put_unaligned_le16(tcg2algos[i], + &ev->digest_sizes[count].algorithm_id); + put_unaligned_le16(len, &ev->digest_sizes[count].digest_size); + count++; + } + + *((u8 *)ev + (event_size - 1)) = 0; + tcg2mctx.log_position = log_size; + + return 0; +} + +static void tcg2_log_find_end(void) +{ + const u32 offset = offsetof(struct tcg_pcr_event2, digests) + + offsetof(struct tpml_digest_values, digests); + u32 event_size; + u32 count; + u16 algo; + u32 pos; + u16 len; + u8 *log; + u32 i; + + while (tcg2mctx.log_position + offset < tcg2mctx.log_size) { + log = tcg2mctx.log + tcg2mctx.log_position; + + pos = offsetof(struct tcg_pcr_event2, event_type); + if (!get_unaligned_le32(log + pos)) + return; + + pos = offsetof(struct tcg_pcr_event2, digests) + + offsetof(struct tpml_digest_values, count); + count = get_unaligned_le32(log + pos); + if (count > ARRAY_SIZE(tcg2algos)) + return; + + pos = offsetof(struct tcg_pcr_event2, digests) + + offsetof(struct tpml_digest_values, digests); + for (i = 0; i < count; ++i) { + pos += offsetof(struct tpmt_ha, hash_alg); + if (tcg2mctx.log_position + pos + sizeof(u16) >= + tcg2mctx.log_size) + return; + + algo = get_unaligned_le16(log + pos); + pos += offsetof(struct tpmt_ha, digest); + switch (algo) { + case TPM2_ALG_SHA1: + case TPM2_ALG_SHA256: + case TPM2_ALG_SHA384: + case TPM2_ALG_SHA512: + len = tpm2_algorithm_to_len(algo); + break; + default: + return; + } + + pos += len; + } + + if (tcg2mctx.log_position + pos + sizeof(u32) >= + tcg2mctx.log_size) + return; + + event_size = get_unaligned_le32(log + pos); + pos += event_size + sizeof(u32); + if (tcg2mctx.log_position + pos >= tcg2mctx.log_size) + return; + + tcg2mctx.log_position += pos; + } +} + +static int tcg2_log_parse(void) +{ + struct tcg_efi_spec_id_event *event; + struct tcg_pcr_event *log; + u32 calc_size; + u32 active; + u32 count; + u32 evsz; + u32 mask; + u16 algo; + u16 len; + int rc; + u32 i; + + if (tcg2mctx.log_size <= offsetof(struct tcg_pcr_event, event)) + return 0; + + log = (struct tcg_pcr_event *)tcg2mctx.log; + if (get_unaligned_le32(&log->pcr_index) != 0 || + get_unaligned_le32(&log->event_type) != EV_NO_ACTION) + return 0; + + for (i = 0; i < sizeof(log->digest); i++) { + if (log->digest[i]) + return 0; + } + + evsz = get_unaligned_le32(&log->event_size); + if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) || + evsz + offsetof(struct tcg_pcr_event, event) > tcg2mctx.log_size) + return 0; + + event = (struct tcg_efi_spec_id_event *)log->event; + if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03, + sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03))) + return 0; + + if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 || + event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2) + return 0; + + count = get_unaligned_le32(&event->number_of_algorithms); + if (count > ARRAY_SIZE(tcg2algos)) + return 0; + + calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) + + (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) + + 1; + if (evsz != calc_size) + return 0; + + rc = tcg2_get_active_pcr_banks(tcg2mctx.dev, &active); + if (rc) + return rc; + + for (i = 0; i < count; ++i) { + algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id); + mask = tpm2_algorithm_to_mask(algo); + + if (!(active & mask)) + return 0; + + switch (algo) { + case TPM2_ALG_SHA1: + case TPM2_ALG_SHA256: + case TPM2_ALG_SHA384: + case TPM2_ALG_SHA512: + len = get_unaligned_le16(&event->digest_sizes[i].digest_size); + if (tpm2_algorithm_to_len(algo) != len) + return 0; + break; + default: + return 0; + } + } + + tcg2mctx.log_position = offsetof(struct tcg_pcr_event, event) + evsz; + + tcg2_log_find_end(); + + return 1; +} + +static int tcg2_pcr_extend(u32 pcr_index, + struct tpml_digest_values *digest_list) +{ + u32 rc; + size_t i; + + for (i = 0; i < digest_list->count; i++) { + u32 alg = digest_list->digests[i].hash_alg; + + rc = tpm2_pcr_extend(tcg2mctx.dev, pcr_index, alg, + (u8 *)&digest_list->digests[i].digest, + tpm2_algorithm_to_len(alg)); + if (rc) { + printf("%s: error pcr:%u alg:%08x\n", __func__, + pcr_index, alg); + return rc; + } + } + + return 0; +} + +int tcg2_measure_data(u32 pcr_index, u32 size, const u8 *data, u32 event_type, + u32 event_size, const u8 *event) +{ + struct tpml_digest_values digest_list; + int rc; + + rc = tcg2_create_digest(data, size, &digest_list); + if (rc) + return rc; + + rc = tcg2_pcr_extend(pcr_index, &digest_list); + if (rc) + return rc; + + return tcg2_log_append(pcr_index, event_type, &digest_list, event_size, + event); +} + +int tcg2_measure_event(u32 pcr_index, u32 event_type, u32 size, + const u8 *event) +{ + struct tpml_digest_values digest_list; + int rc; + + rc = tcg2_create_digest(event, size, &digest_list); + if (rc) + return rc; + + rc = tcg2_pcr_extend(pcr_index, &digest_list); + if (rc) + return rc; + + return tcg2_log_append(pcr_index, event_type, &digest_list, size, + event); +} + +int tcg2_measurement_init(void) +{ + int rc; + + rc = tcg2_platform_get_tpm2(&tcg2mctx.dev); + if (rc) + return rc; + + rc = tcg2_platform_get_log(tcg2mctx.dev, (void **)&tcg2mctx.log, + &tcg2mctx.log_size); + if (rc) + return rc; + + tcg2mctx.log_position = 0; + + rc = tpm_init(tcg2mctx.dev); + if (rc) + return rc; + + rc = tpm2_startup(tcg2mctx.dev, TPM2_SU_CLEAR); + if (rc) + return rc; + + rc = tpm2_self_test(tcg2mctx.dev, TPMI_YES); + if (rc) + return rc; + + rc = tcg2_log_parse(); + if (rc < 0) + return rc; + + if (!rc) { + rc = tcg2_log_init(); + if (rc) + return rc; + } + + return tcg2_measure_event(0, EV_S_CRTM_VERSION, + strlen(version_string) + 1, + (u8 *)version_string); +} + +void tcg2_measurement_term(void) +{ + u32 event = 0xffffffff; + int rc; + int i; + + for (i = 0; i < 8; ++i) { + rc = tcg2_measure_event(i, EV_SEPARATOR, sizeof(event), + (const u8 *)&event); + if (rc) + return; + } + + unmap_physmem(tcg2mctx.log, MAP_NOCACHE); +} + +int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size) +{ + const __be32 *addr_prop; + const __be32 *size_prop; + int asize; + int ssize; + + addr_prop = dev_read_prop(dev, "linux,sml-base", &asize); + size_prop = dev_read_prop(dev, "linux,sml-size", &ssize); + if (addr_prop && size_prop) { + u64 a = of_read_number(addr_prop, asize / sizeof(__be32)); + u64 s = of_read_number(size_prop, ssize / sizeof(__be32)); + + *addr = map_physmem(a, s, MAP_NOCACHE); + *size = (u32)s; + } else { + struct ofnode_phandle_args args; + phys_addr_t a; + phys_size_t s; + + if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0, + 0, &args)) + return -ENODEV; + + a = ofnode_get_addr_size(args.node, "reg", &s); + if (a == FDT_ADDR_T_NONE) + return -ENOMEM; + + *addr = map_physmem(a, s, MAP_NOCACHE); + *size = (u32)s; + } + + return 0; +} + +int tcg2_platform_get_tpm2(struct udevice **dev) +{ + for_each_tpm_device(*dev) { + if (tpm_get_version(*dev) == TPM_V2) + return 0; + } + + return -ENODEV; +} + u32 tpm2_startup(struct udevice *dev, enum tpm2_startup_types mode) { const u8 command_v2[12] = { @@ -342,6 +918,130 @@ u32 tpm2_get_capability(struct udevice *dev, u32 capability, u32 property, return 0; }
+static int tpm2_get_num_pcr(struct udevice *dev, u32 *num_pcr) +{ + u8 response[(sizeof(struct tpms_capability_data) - + offsetof(struct tpms_capability_data, data))]; + u32 properties_offset = + offsetof(struct tpml_tagged_tpm_property, tpm_property) + + offsetof(struct tpms_tagged_property, value); + u32 ret; + + memset(response, 0, sizeof(response)); + ret = tpm2_get_capability(dev, TPM2_CAP_TPM_PROPERTIES, + TPM2_PT_PCR_COUNT, response, 1); + if (ret) + return ret; + + *num_pcr = get_unaligned_be32(response + properties_offset); + if (*num_pcr > TPM2_MAX_PCRS) { + printf("%s: too many pcrs: %u\n", __func__, *num_pcr); + return -E2BIG; + } + + return 0; +} + +static bool tpm2_is_active_pcr(struct tpms_pcr_selection *selection) +{ + int i; + + /* + * check the pcr_select. If at least one of the PCRs supports the + * algorithm add it on the active ones + */ + for (i = 0; i < selection->size_of_select; i++) { + if (selection->pcr_select[i]) + return true; + } + + return false; +} + +int tpm2_get_pcr_info(struct udevice *dev, u32 *supported_pcr, u32 *active_pcr, + u32 *pcr_banks) +{ + u8 response[(sizeof(struct tpms_capability_data) - + offsetof(struct tpms_capability_data, data))]; + struct tpml_pcr_selection pcrs; + u32 num_pcr; + size_t i; + u32 ret; + + *supported_pcr = 0; + *active_pcr = 0; + *pcr_banks = 0; + memset(response, 0, sizeof(response)); + ret = tpm2_get_capability(dev, TPM2_CAP_PCRS, 0, response, 1); + if (ret) + return ret; + + pcrs.count = get_unaligned_be32(response); + /* + * We only support 5 algorithms for now so check against that + * instead of TPM2_NUM_PCR_BANKS + */ + if (pcrs.count > ARRAY_SIZE(tcg2algos) || pcrs.count < 1) { + printf("%s: too many pcrs: %u\n", __func__, pcrs.count); + return -EMSGSIZE; + } + + ret = tpm2_get_num_pcr(dev, &num_pcr); + if (ret) + return ret; + + for (i = 0; i < pcrs.count; i++) { + /* + * Definition of TPMS_PCR_SELECTION Structure + * hash: u16 + * size_of_select: u8 + * pcr_select: u8 array + * + * The offsets depend on the number of the device PCRs + * so we have to calculate them based on that + */ + u32 hash_offset = offsetof(struct tpml_pcr_selection, selection) + + i * offsetof(struct tpms_pcr_selection, pcr_select) + + i * ((num_pcr + 7) / 8); + u32 size_select_offset = + hash_offset + offsetof(struct tpms_pcr_selection, + size_of_select); + u32 pcr_select_offset = + hash_offset + offsetof(struct tpms_pcr_selection, + pcr_select); + + pcrs.selection[i].hash = + get_unaligned_be16(response + hash_offset); + pcrs.selection[i].size_of_select = + __get_unaligned_be(response + size_select_offset); + if (pcrs.selection[i].size_of_select > TPM2_PCR_SELECT_MAX) { + printf("%s: pcrs selection too large: %u\n", __func__, + pcrs.selection[i].size_of_select); + return -ENOBUFS; + } + /* copy the array of pcr_select */ + memcpy(pcrs.selection[i].pcr_select, response + pcr_select_offset, + pcrs.selection[i].size_of_select); + } + + for (i = 0; i < pcrs.count; i++) { + u32 hash_mask = tpm2_algorithm_to_mask(pcrs.selection[i].hash); + + if (hash_mask) { + *supported_pcr |= hash_mask; + if (tpm2_is_active_pcr(&pcrs.selection[i])) + *active_pcr |= hash_mask; + } else { + printf("%s: unknown algorithm %x\n", __func__, + pcrs.selection[i].hash); + } + } + + *pcr_banks = pcrs.count; + + return 0; +} + u32 tpm2_dam_reset(struct udevice *dev, const char *pw, const ssize_t pw_sz) { u8 command_v2[COMMAND_BUFFER_SIZE] = {