summaryrefslogtreecommitdiff
path: root/services/std_svc
diff options
context:
space:
mode:
Diffstat (limited to 'services/std_svc')
-rw-r--r--services/std_svc/drtm/drtm_cache.c119
-rw-r--r--services/std_svc/drtm/drtm_cache.h19
-rw-r--r--services/std_svc/drtm/drtm_dma_prot.c295
-rw-r--r--services/std_svc/drtm/drtm_dma_prot.h71
-rw-r--r--services/std_svc/drtm/drtm_main.c810
-rw-r--r--services/std_svc/drtm/drtm_main.h70
-rw-r--r--services/std_svc/drtm/drtm_mbedtls_config.h69
-rw-r--r--services/std_svc/drtm/drtm_measurements.c259
-rw-r--r--services/std_svc/drtm/drtm_measurements.h50
-rw-r--r--services/std_svc/drtm/drtm_qemu_virt_cached_resources_init.c116
-rw-r--r--services/std_svc/drtm/drtm_remediation.c72
-rw-r--r--services/std_svc/drtm/drtm_remediation.h15
-rw-r--r--services/std_svc/drtm/drtm_res_tcb_hashes.c183
-rw-r--r--services/std_svc/drtm/drtm_res_tcb_hashes.h18
-rw-r--r--services/std_svc/std_svc_setup.c12
15 files changed, 2178 insertions, 0 deletions
diff --git a/services/std_svc/drtm/drtm_cache.c b/services/std_svc/drtm/drtm_cache.c
new file mode 100644
index 000000000..d1b2a3b71
--- /dev/null
+++ b/services/std_svc/drtm/drtm_cache.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM protected-resources cache
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ */
+
+#include <common/debug.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <services/drtm_cache.h>
+
+/*
+ * XXX Note: the generic protected DRTM resources are being specialised into
+ * DRTM TCB hashes. Platform resources retrieved through the generic DRTM cache
+ * are going to be retrieved through bespoke interfaces instead.
+ * This file and drtm_qemu_virt_cached_resources_init.c will be removed once the
+ * transition is complete.
+ */
+
+static char cache[1 * 1024];
+static char *cache_free = cache;
+#define CACHE_END ((char *)cache + sizeof(cache))
+
+#include "drtm_qemu_virt_cached_resources_init.c"
+
+
+static struct cached_res *cache_alloc(size_t bytes)
+{
+ struct cached_res *r;
+
+ if (cache_free + bytes >= CACHE_END) {
+ return NULL;
+ }
+
+ r = (struct cached_res *)cache_free;
+ cache_free += bytes;
+
+ return r;
+}
+
+
+void drtm_cache_init(void)
+{
+ const struct cached_res *r;
+
+ memset(&cache, 0, sizeof(cache));
+
+ r = CACHED_RESOURCES_INIT;
+ while (r < CACHED_RESOURCES_INIT_END) {
+ int rc;
+
+ if (r->data_ptr) {
+ rc = drtm_cache_resource_ptr(r->id, r->bytes, r->data_ptr);
+ } else {
+ rc = drtm_cache_resource(r->id, r->bytes, r->data);
+ }
+ if (rc) {
+ WARN("%s: drtm_cache_resource_opt() failed rc=%d\n", __func__, rc);
+ break;
+ }
+
+ r = (struct cached_res *)((char *)r + sizeof(*r)
+ + (r->data_ptr ? 0 : r->bytes));
+ }
+}
+
+int drtm_cache_resource_opt(const char *id, size_t bytes, const char *data,
+ bool copy_the_data)
+{
+ struct cached_res *res;
+ size_t bytes_req = sizeof(struct cached_res) + (copy_the_data ? bytes : 0);
+
+ if (strnlen(id, sizeof(res->id)) == sizeof(res->id) || !data) {
+ return -EINVAL;
+ }
+
+ res = cache_alloc(bytes_req);
+ if (!res) {
+ return -ENOMEM;
+ }
+
+ (void)strlcpy(res->id, id, sizeof(res->id));
+
+ res->bytes = bytes;
+ if (copy_the_data) {
+ res->data_ptr = NULL;
+ (void)memcpy((char *)res->data, data, bytes);
+ } else {
+ res->data_ptr = data;
+ }
+
+ return 0;
+}
+
+void drtm_cache_get_resource(const char *id,
+ const char **res_out, size_t *res_out_bytes)
+{
+ struct cached_res *r = (struct cached_res *)cache;
+
+ while ((char *)r < CACHE_END) {
+ if (strncmp(r->id, id, sizeof(r->id)) == 0) {
+ *res_out = r->data_ptr ? r->data_ptr : r->data;
+ *res_out_bytes = r->bytes;
+ return;
+ }
+ r = (struct cached_res *)((char *)r + sizeof(*r)
+ + (r->data_ptr ? 0 : r->bytes));
+ }
+
+ *res_out = NULL;
+ *res_out_bytes = 0;
+}
diff --git a/services/std_svc/drtm/drtm_cache.h b/services/std_svc/drtm/drtm_cache.h
new file mode 100644
index 000000000..67f80ea31
--- /dev/null
+++ b/services/std_svc/drtm/drtm_cache.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_CACHE_H
+#define DRTM_CACHE_H
+
+#pragma pack(push, 1)
+struct cached_res {
+ char id[32];
+ size_t bytes;
+ const char *data_ptr; /* If NULL, then the data follows. */
+ const char data[];
+};
+#pragma pack(pop)
+
+#endif /* DRTM_CACHE_H */
diff --git a/services/std_svc/drtm/drtm_dma_prot.c b/services/std_svc/drtm/drtm_dma_prot.c
new file mode 100644
index 000000000..e41f36073
--- /dev/null
+++ b/services/std_svc/drtm/drtm_dma_prot.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM DMA protection.
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ *
+ */
+#include <stdint.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <drivers/arm/smmu_v3.h>
+#include <services/drtm_svc_plat.h>
+#include <smccc_helpers.h>
+
+#include "drtm_dma_prot.h"
+#include "drtm_remediation.h"
+#include "drtm_main.h"
+
+
+/* Values for DRTM_PROTECT_MEMORY */
+enum dma_prot_type {
+ PROTECT_NONE = -1,
+ PROTECT_MEM_ALL = 0,
+ PROTECT_MEM_REGION = 1,
+};
+
+struct dma_prot {
+ enum dma_prot_type type;
+};
+
+/*
+ * ________________________ LAUNCH success ________________________
+ * | Initial | -------------------> | Prot engaged |
+ * |````````````````````````| |````````````````````````|
+ * | request.type == NONE | | request.type != NONE |
+ * | | <------------------- | |
+ * `________________________' UNPROTECT_MEM `________________________'
+ *
+ * Transitions that are not shown correspond to ABI calls that do not change
+ * state and result in an error being returned to the caller.
+ */
+static struct dma_prot active_prot = {
+ .type = PROTECT_NONE,
+};
+
+/* Version-independent type. */
+typedef struct drtm_dl_dma_prot_args_v1 struct_drtm_dl_dma_prot_args;
+
+
+int drtm_dma_prot_init(void)
+{
+ bool must_init_fail = false;
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+ unsigned int num_smmus_total;
+
+ /* Report presence of non-host platforms, for info only. */
+ if (plat_has_non_host_platforms()) {
+ WARN("DRTM: the platform includes trusted DMA-capable devices"
+ " (non-host platforms)\n");
+ }
+
+ /*
+ * DLME protection is uncertain on platforms with peripherals whose
+ * DMA is not managed by an SMMU. DRTM doesn't work on such platforms.
+ */
+ if (plat_has_unmanaged_dma_peripherals()) {
+ ERROR("DRTM: this platform does not provide DMA protection\n");
+ must_init_fail = true;
+ }
+
+ /*
+ * Check that the platform reported all SMMUs.
+ * It is acceptable if the platform doesn't have any SMMUs when it
+ * doesn't have any DMA-capable devices.
+ */
+ num_smmus_total = plat_get_total_num_smmus();
+ plat_enumerate_smmus((const uintptr_t (*)[])&smmus, &num_smmus);
+ if (num_smmus != num_smmus_total) {
+ ERROR("DRTM: could not discover all SMMUs\n");
+ must_init_fail = true;
+ }
+
+ /* Check any SMMUs enumerated. */
+ for (const uintptr_t *smmu = smmus; smmu < smmus + num_smmus; smmu++) {
+ if (*smmu == 0) {
+ WARN("DRTM: SMMU reported at unusual PA 0x0\n");
+ }
+ }
+
+ return (int)must_init_fail;
+}
+
+uint64_t drtm_features_dma_prot(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
+ 1u /* DMA protection support: Complete DMA protection. */
+ );
+}
+
+/*
+ * Checks that the DMA protection arguments are valid and that the given
+ * protected regions would be covered by DMA protection.
+ */
+enum drtm_retc drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args *a,
+ int a_dma_prot_type,
+ struct __protected_regions p)
+{
+ switch ((enum dma_prot_type)a_dma_prot_type) {
+ case PROTECT_MEM_ALL:
+ if (a->dma_prot_table_paddr || a->dma_prot_table_size) {
+ ERROR("DRTM: invalid launch due to inconsistent"
+ " DMA protection arguments\n");
+ return MEM_PROTECT_INVALID;
+ }
+ /*
+ * Full DMA protection ought to ensure that the DLME and NWd
+ * DCE regions are protected, no further checks required.
+ */
+ return SUCCESS;
+
+ default:
+ ERROR("DRTM: invalid launch due to unsupported DMA protection type\n");
+ return MEM_PROTECT_INVALID;
+ }
+}
+
+enum drtm_retc drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args *a,
+ int a_dma_prot_type)
+{
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+
+ if (active_prot.type != PROTECT_NONE) {
+ ERROR("DRTM: launch denied as previous DMA protection"
+ " is still engaged\n");
+ return DENIED;
+ }
+
+ if (a_dma_prot_type == PROTECT_NONE) {
+ return SUCCESS;
+ /* Only PROTECT_MEM_ALL is supported currently. */
+ } else if (a_dma_prot_type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ /*
+ * Engage SMMUs in accordance with the request we have previously received.
+ * Only PROTECT_MEM_ALL is implemented currently.
+ */
+ plat_enumerate_smmus((const uintptr_t (*)[])&smmus, &num_smmus);
+ for (const uintptr_t *smmu = smmus; smmu < smmus+num_smmus; smmu++) {
+ int rc;
+
+ /*
+ * TODO: Invalidate SMMU's Stage-1 and Stage-2 TLB entries. This ensures
+ * that any outstanding device transactions are completed, see Section
+ * 3.21.1, specification IHI_0070_C_a for an approximate reference.
+ */
+
+ if ((rc = smmuv3_ns_set_abort_all(*smmu))) {
+ ERROR("DRTM: SMMU at PA 0x%lx failed to engage DMA protection"
+ " rc=%d\n", *smmu, rc);
+ return INTERNAL_ERROR;
+ }
+ }
+
+ /*
+ * TODO: Restrict DMA from the GIC.
+ *
+ * Full DMA protection may be achieved as follows:
+ *
+ * With a GICv3:
+ * - Set GICR_CTLR.EnableLPIs to 0, for each GICR;
+ * GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+ * - Set GITS_CTLR.Enabled to 0;
+ * GITS_CTLR.Quiescent == 1 must be the case before finishing.
+ *
+ * In addition, with a GICv4:
+ * - Set GICR_VPENDBASER.Valid to 0, for each GICR;
+ * GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+ *
+ * Alternatively, e.g. if some bit values cannot be changed at runtime,
+ * this procedure should return an error if the LPI Pending and
+ * Configuration tables overlap the regions being protected.
+ */
+
+ active_prot.type = a_dma_prot_type;
+
+ return SUCCESS;
+}
+
+/*
+ * Undo what has previously been done in drtm_dma_prot_engage(), or enter
+ * remediation if it is not possible.
+ */
+enum drtm_retc drtm_dma_prot_disengage(void)
+{
+ const uintptr_t *smmus;
+ size_t num_smmus = 0;
+
+ if (active_prot.type == PROTECT_NONE) {
+ return SUCCESS;
+ /* Only PROTECT_MEM_ALL is supported currently. */
+ } else if (active_prot.type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ /*
+ * For PROTECT_MEM_ALL, undo the SMMU configuration for "abort all" mode
+ * done during engage().
+ */
+ /* Simply enter remediation for now. */
+ (void)smmus;
+ (void)num_smmus;
+ drtm_enter_remediation(1, "cannot undo PROTECT_MEM_ALL SMMU configuration");
+
+ /* TODO: Undo GIC DMA restrictions. */
+
+ active_prot.type = PROTECT_NONE;
+
+ return SUCCESS;
+}
+
+uint64_t drtm_unprotect_mem(void *ctx)
+{
+ enum drtm_retc ret;
+
+ switch (active_prot.type) {
+ case PROTECT_NONE:
+ ERROR("DRTM: invalid UNPROTECT_MEM, no DMA protection has"
+ " previously been engaged\n");
+ ret = DENIED;
+ break;
+
+ case PROTECT_MEM_ALL:
+ /*
+ * UNPROTECT_MEM is a no-op for PROTECT_MEM_ALL: DRTM must not touch
+ * the NS SMMU as it is expected that the DLME has configured it.
+ */
+ active_prot.type = PROTECT_NONE;
+
+ ret = SUCCESS;
+ break;
+
+ default:
+ ret = drtm_dma_prot_disengage();
+ break;
+ }
+
+ SMC_RET1(ctx, ret);
+}
+
+void drtm_dma_prot_serialise_table(char *dst, size_t *size_out)
+{
+ if (active_prot.type == PROTECT_NONE) {
+ if (size_out) {
+ *size_out = 0;
+ }
+ return;
+ } else if (active_prot.type != PROTECT_MEM_ALL) {
+ ERROR("%s(): unimplemented DMA protection type\n", __func__);
+ panic();
+ }
+
+ struct __packed descr_table_1 {
+ struct_drtm_mem_region_descr_table header;
+ struct_drtm_mem_region_descr regions[1];
+ } prot_table = {
+ .header = {
+ .version = 1,
+ .num_regions = sizeof(((struct descr_table_1 *)NULL)->regions) /
+ sizeof(((struct descr_table_1 *)NULL)->regions[0])
+ },
+ #define PAGES_AND_TYPE(pages, type) \
+ .pages_and_type = DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type)
+ .regions = {
+ {.paddr = 0, PAGES_AND_TYPE(UINT64_MAX, 0x3)},
+ }
+ };
+
+ if (dst) {
+ (void)memcpy(dst, &prot_table, sizeof(prot_table));
+ }
+ if (size_out) {
+ *size_out = sizeof(prot_table);
+ }
+}
diff --git a/services/std_svc/drtm/drtm_dma_prot.h b/services/std_svc/drtm/drtm_dma_prot.h
new file mode 100644
index 000000000..69519b991
--- /dev/null
+++ b/services/std_svc/drtm/drtm_dma_prot.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_DMA_PROT_H
+#define DRTM_DMA_PROT_H
+
+#include <stdint.h>
+
+#include <lib/utils.h>
+
+
+struct __packed drtm_dl_dma_prot_args_v1 {
+ uint64_t dma_prot_table_paddr;
+ uint64_t dma_prot_table_size;
+};
+/* Opaque / encapsulated type. */
+typedef struct drtm_dl_dma_prot_args_v1 drtm_dl_dma_prot_args_v1_t;
+
+struct __protected_regions {
+ struct p_mem_region dlme_region;
+ struct p_mem_region dce_nwd_region;
+};
+
+
+struct __packed drtm_mem_region_descr_v1 {
+ uint64_t paddr;
+ uint64_t pages_and_type;
+};
+#define DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type) \
+ (((uint64_t)(pages) & (((uint64_t)1 << 52) - 1)) \
+ | (((uint64_t)(type) & 0x7) << 52))
+#define DRTM_MEM_REGION_PAGES(pages_and_type) \
+ ((uint64_t)(pages_and_type) & (((uint64_t)1 << 52) - 1))
+#define DRTM_MEM_REGION_TYPE(pages_and_type) \
+ ((uint8_t)((pages_and_type) >> 52 & 0x7))
+enum drtm_mem_region_type {
+ DRTM_MEM_REGION_TYPE_NORMAL = 0,
+ DRTM_MEM_REGION_TYPE_NORMAL_WITH_CACHEABILITY_ATTRS = 1,
+ DRTM_MEM_REGION_TYPE_DEVICE = 2,
+ DRTM_MEM_REGION_TYPE_NON_VOLATILE = 3,
+ DRTM_MEM_REGION_TYPE_RESERVED = 4,
+};
+
+
+struct __packed drtm_mem_region_descr_table_v1 {
+ uint16_t version; /* Must be 1. */
+ uint8_t __res[2];
+ uint32_t num_regions;
+ struct drtm_mem_region_descr_v1 regions[];
+};
+
+
+typedef struct drtm_mem_region_descr_v1 struct_drtm_mem_region_descr;
+typedef struct drtm_mem_region_descr_table_v1 struct_drtm_mem_region_descr_table;
+
+
+int drtm_dma_prot_init(void);
+uint64_t drtm_features_dma_prot(void *ctx);
+enum drtm_retc drtm_dma_prot_check_args(const drtm_dl_dma_prot_args_v1_t *a,
+ int a_dma_prot_type,
+ struct __protected_regions p);
+enum drtm_retc drtm_dma_prot_engage(const drtm_dl_dma_prot_args_v1_t *a,
+ int a_dma_prot_type);
+enum drtm_retc drtm_dma_prot_disengage(void);
+uint64_t drtm_unprotect_mem(void *ctx);
+void drtm_dma_prot_serialise_table(char *dst, size_t *prot_table_size_out);
+
+#endif /* DRTM_DMA_PROT_H */
diff --git a/services/std_svc/drtm/drtm_main.c b/services/std_svc/drtm/drtm_main.c
new file mode 100644
index 000000000..ef0894c22
--- /dev/null
+++ b/services/std_svc/drtm/drtm_main.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM service
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ * Brian Nezvadovitz
+ */
+
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/common/platform.h>
+#include <services/drtm_svc.h>
+#include <services/drtm_cache.h>
+#include <tools_share/uuid.h>
+
+#include "drtm_dma_prot.h"
+#include "drtm_main.h"
+#include "drtm_measurements.h"
+#include "drtm_remediation.h"
+#include "drtm_res_tcb_hashes.h"
+
+#define XLAT_PAGE_SIZE PAGE_SIZE
+#if XLAT_PAGE_SIZE != DRTM_PAGE_SIZE
+#warning "xlat library page size differs from DRTM page size;"\
+ " mmap_add_dynamic_region() calls to the xlat library might fail"
+#endif
+
+
+enum drtm_dlme_el {
+ DLME_AT_EL1,
+ DLME_AT_EL2
+};
+static enum drtm_dlme_el drtm_dlme_el(unsigned int el)
+{
+ return (enum drtm_dlme_el)el - 1;
+}
+
+struct __packed dlme_data_header_v1 {
+ uint16_t version; /* Must be 1. */
+ uint16_t this_hdr_size;
+ uint8_t __res[4];
+ uint64_t dlme_data_size;
+ uint64_t dlme_prot_regions_size;
+ uint64_t dlme_addr_map_size;
+ uint64_t dlme_tpm_log_size;
+ uint64_t dlme_tcb_hashes_table_size;
+ uint64_t dlme_impdef_region_size;
+} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version'. */));
+
+typedef struct dlme_data_header_v1 struct_dlme_data_header;
+
+
+static uint64_t boot_pe_aff_value;
+static int locality2, locality3;
+
+
+static unsigned int get_highest_ns_el_implemented(void)
+{
+ return nonsecure_el_implemented(2) != EL_IMPL_NONE ? 2 : 1;
+}
+
+
+int drtm_setup(void)
+{
+ int rc;
+
+ INFO("++ DRTM service setup\n");
+
+ boot_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ if ((rc = drtm_dma_prot_init())) {
+ return rc;
+ }
+
+ if ((rc = drtm_tcb_hashes_init())) {
+ return rc;
+ }
+
+ drtm_cache_init();
+
+ if ((rc = drtm_measurements_init())) {
+ return rc;
+ }
+
+ return 0;
+}
+
+
+static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
+{
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
+ uint64_t dl_caller_el;
+ uint64_t dl_caller_aarch;
+
+ dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
+ dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
+
+ if (dl_caller_el == MODE_EL3) {
+ ERROR("DRTM: invalid launch from EL3\n");
+ return DENIED;
+ }
+
+ if (dl_caller_aarch != MODE_RW_64) {
+ ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
+ return DENIED;
+ }
+
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_dl_check_cores(void)
+{
+ unsigned int core_not_off;
+ uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ if (this_pe_aff_value != boot_pe_aff_value) {
+ ERROR("DRTM: invalid launch on a non-boot PE\n");
+ return DENIED;
+ }
+
+ core_not_off = psci_is_last_on_core_safe();
+ if (core_not_off < PLATFORM_CORE_COUNT) {
+ ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
+ return DENIED;
+ }
+
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args,
+ const drtm_event_log_t *ev_log,
+ size_t *dlme_data_size_out);
+
+/*
+ * Note: accesses to the dynamic launch args, and to the DLME data are
+ * little-endian as required, thanks to TF-A BL31 init requirements.
+ */
+static enum drtm_retc drtm_dl_check_args(uint64_t x1,
+ struct_drtm_dl_args *a_out)
+{
+ uint64_t dlme_start, dlme_end;
+ uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
+ uint64_t dlme_data_start, dlme_data_end;
+ uintptr_t args_mapping;
+ size_t args_mapping_size;
+ struct_drtm_dl_args *a;
+ struct_drtm_dl_args args_buf;
+ size_t dlme_data_size_req;
+ struct __protected_regions protected_regions;
+ int rc;
+ enum drtm_retc ret;
+
+ if (x1 % DRTM_PAGE_SIZE != 0) {
+ ERROR("DRTM: parameters structure is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ args_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
+ rc = mmap_add_dynamic_region_alloc_va(x1, &args_mapping, args_mapping_size,
+ MT_MEMORY | MT_NS | MT_RO | MT_SHAREABILITY_ISH);
+ if (rc) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
+ __func__, rc);
+ return INTERNAL_ERROR;
+ }
+ a = (struct_drtm_dl_args *)args_mapping;
+ /*
+ * TODO: invalidate all data cache before reading the data passed by the
+ * DCE Preamble. This is required to avoid / defend against racing with
+ * cache evictions.
+ */
+ args_buf = *a;
+
+ rc = mmap_remove_dynamic_region(args_mapping, args_mapping_size);
+ if (rc) {
+ ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
+ " rc=%d\n", __func__, rc);
+ panic();
+ }
+ a = &args_buf;
+
+ if (a->version != 1) {
+ ERROR("DRTM: parameters structure incompatible with major version %d\n",
+ ARM_DRTM_VERSION_MAJOR);
+ return NOT_SUPPORTED;
+ }
+
+ if (!(a->dlme_img_off < a->dlme_size &&
+ a->dlme_data_off < a->dlme_size)) {
+ ERROR("DRTM: argument offset is outside of the DLME region\n");
+ return INVALID_PARAMETERS;
+ }
+ dlme_start = a->dlme_paddr;
+ dlme_end = a->dlme_paddr + a->dlme_size;
+ dlme_img_start = a->dlme_paddr + a->dlme_img_off;
+ dlme_img_ep = DL_ARGS_GET_DLME_ENTRY_POINT(a);
+ dlme_img_end = dlme_img_start + a->dlme_img_size;
+ dlme_data_start = a->dlme_paddr + a->dlme_data_off;
+ dlme_data_end = dlme_end;
+
+ /*
+ * TODO: validate that the DLME physical address range is all NS memory,
+ * return INVALID_PARAMETERS if it is not.
+ * Note that this check relies on platform-specific information. For
+ * examples, see psci_plat_pm_ops->validate_ns_entrypoint() or
+ * arm_validate_ns_entrypoint().
+ */
+
+ /* Check the DLME regions arguments. */
+ if (dlme_start % DRTM_PAGE_SIZE) {
+ ERROR("DRTM: argument DLME region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (!(dlme_start < dlme_end &&
+ dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
+ dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
+ ERROR("DRTM: argument DLME region is discontiguous\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
+ ERROR("DRTM: argument DLME regions overlap\n");
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the DLME image region arguments. */
+ if (dlme_img_start % DRTM_PAGE_SIZE) {
+ ERROR("DRTM: argument DLME image region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
+ ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dlme_img_ep % 4) {
+ ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the DLME data region arguments. */
+ if (dlme_data_start % DRTM_PAGE_SIZE) {
+ ERROR("DRTM: argument DLME data region is not "
+ DRTM_PAGE_SIZE_STR "-aligned\n");
+ return INVALID_PARAMETERS;
+ }
+
+ rc = drtm_dl_prepare_dlme_data(NULL, NULL, &dlme_data_size_req);
+ if (rc) {
+ ERROR("%s: drtm_dl_prepare_dlme_data() failed unexpectedly rc=%d\n",
+ __func__, rc);
+ panic();
+ }
+ if (dlme_data_end - dlme_data_start < dlme_data_size_req) {
+ ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
+ dlme_data_size_req - (size_t)(dlme_data_end - dlme_data_start));
+ return INVALID_PARAMETERS;
+ }
+
+ /* Check the Normal World DCE region arguments. */
+ if (a->dce_nwd_paddr != 0) {
+ uint32_t dce_nwd_start = a->dce_nwd_paddr;
+ uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
+
+ if (!(dce_nwd_start < dce_nwd_end)) {
+ ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
+ return INVALID_PARAMETERS;
+ }
+
+ if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
+ ERROR("DRTM: argument Normal World DCE regions overlap\n");
+ return INVALID_PARAMETERS;
+ }
+ }
+
+ protected_regions = (struct __protected_regions) {
+ .dlme_region = { a->dlme_paddr, a->dlme_size },
+ .dce_nwd_region = { a->dce_nwd_paddr, a->dce_nwd_size },
+ };
+ if ((ret = drtm_dma_prot_check_args(&a->dma_prot_args,
+ DL_ARGS_GET_DMA_PROT_TYPE(a),
+ protected_regions))){
+ return ret;
+ }
+
+ *a_out = *a;
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args,
+ const drtm_event_log_t *drtm_event_log,
+ size_t *dlme_data_size_out)
+{
+ int rc;
+ size_t dlme_data_total_bytes_req = 0;
+ uint64_t dlme_data_paddr;
+ size_t dlme_data_max_size;
+ uintptr_t dlme_data_mapping;
+ size_t dlme_data_mapping_bytes;
+ struct_dlme_data_header *dlme_data_hdr;
+ char *dlme_data_cursor;
+ size_t dlme_prot_tables_bytes;
+ const char *dlme_addr_map;
+ size_t dlme_addr_map_bytes;
+ size_t drtm_event_log_bytes;
+ size_t drtm_tcb_hashes_bytes;
+ size_t serialised_bytes_actual;
+
+ /* Size the DLME protected regions. */
+ drtm_dma_prot_serialise_table(NULL, &dlme_prot_tables_bytes);
+ dlme_data_total_bytes_req += dlme_prot_tables_bytes;
+
+ /* Size the DLME address map. */
+ drtm_cache_get_resource("address-map",
+ &dlme_addr_map, &dlme_addr_map_bytes);
+ dlme_data_total_bytes_req += dlme_addr_map_bytes;
+
+ /* Size the DRTM event log. */
+ drtm_serialise_event_log(NULL, drtm_event_log, &drtm_event_log_bytes);
+ dlme_data_total_bytes_req += drtm_event_log_bytes;
+
+ /* Size the TCB hashes table. */
+ drtm_serialise_tcb_hashes_table(NULL, &drtm_tcb_hashes_bytes);
+ dlme_data_total_bytes_req += drtm_tcb_hashes_bytes;
+
+ /* Size the implementation-specific DLME region. */
+
+ if (args == NULL) {
+ if (dlme_data_size_out) {
+ *dlme_data_size_out = dlme_data_total_bytes_req;
+ }
+ return SUCCESS;
+ }
+
+ dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
+ dlme_data_max_size = args->dlme_size - args->dlme_data_off;
+
+ /*
+ * The capacity of the given DLME data region is checked when
+ * the other dynamic launch arguments are.
+ */
+ if (dlme_data_max_size < dlme_data_total_bytes_req) {
+ ERROR("%s: assertion failed:"
+ " dlme_data_max_size (%ld) < dlme_data_total_bytes_req (%ld)\n",
+ __func__, dlme_data_max_size, dlme_data_total_bytes_req);
+ panic();
+ }
+
+ /* Map the DLME data region as NS memory. */
+ dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
+ rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr, &dlme_data_mapping,
+ dlme_data_mapping_bytes, MT_RW_DATA | MT_NS | MT_SHAREABILITY_ISH);
+ if (rc) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", __func__, rc);
+ return INTERNAL_ERROR;
+ }
+ dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
+ dlme_data_cursor = (char *)dlme_data_hdr + sizeof(*dlme_data_hdr);
+
+ /* Set the header version and size. */
+ dlme_data_hdr->version = 1;
+ dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
+
+ /* Prepare DLME protected regions. */
+ drtm_dma_prot_serialise_table(dlme_data_cursor, &serialised_bytes_actual);
+ assert(serialised_bytes_actual == dlme_prot_tables_bytes);
+ dlme_data_hdr->dlme_prot_regions_size = dlme_prot_tables_bytes;
+ dlme_data_cursor += dlme_prot_tables_bytes;
+
+ /* Prepare DLME address map. */
+ if (dlme_addr_map) {
+ memcpy(dlme_data_cursor, dlme_addr_map, dlme_addr_map_bytes);
+ } else {
+ WARN("DRTM: DLME address map is not in the cache\n");
+ }
+ dlme_data_hdr->dlme_addr_map_size = dlme_addr_map_bytes;
+ dlme_data_cursor += dlme_addr_map_bytes;
+
+ /* Prepare DRTM event log for DLME. */
+ drtm_serialise_event_log(dlme_data_cursor, drtm_event_log,
+ &serialised_bytes_actual);
+ assert(serialised_bytes_actual <= drtm_event_log_bytes);
+ dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
+ dlme_data_cursor += serialised_bytes_actual;
+
+ /* Prepare the TCB hashes for DLME. */
+ drtm_serialise_tcb_hashes_table(dlme_data_cursor, &serialised_bytes_actual);
+ assert(serialised_bytes_actual == drtm_tcb_hashes_bytes);
+ dlme_data_hdr->dlme_tcb_hashes_table_size = drtm_tcb_hashes_bytes;
+ dlme_data_cursor += drtm_tcb_hashes_bytes;
+
+ /* Implementation-specific region size is unused. */
+ dlme_data_hdr->dlme_impdef_region_size = 0;
+ dlme_data_cursor += 0;
+
+ /* Prepare DLME data size. */
+ dlme_data_hdr->dlme_data_size = dlme_data_cursor - (char *)dlme_data_hdr;
+
+ /* Unmap the DLME data region. */
+ rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
+ if (rc) {
+ ERROR("%s(): mmap_remove_dynamic_region() failed"
+ " unexpectedly rc=%d\n", __func__, rc);
+ panic();
+ }
+
+ if (dlme_data_size_out) {
+ *dlme_data_size_out = dlme_data_total_bytes_req;
+ }
+ return SUCCESS;
+}
+
+static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
+{
+ uint64_t sctlr;
+
+ /*
+ * TODO: Set PE state according to the PSCI's specification of the initial
+ * state after CPU_ON, or to reset values if unspecified, where they exist,
+ * or define sensible values otherwise.
+ */
+
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ sctlr = read_sctlr_el1();
+ break;
+
+ case DLME_AT_EL2:
+ sctlr = read_sctlr_el2();
+ break;
+
+ default: /* Not reached */
+ ERROR("%s(): dlme_el has the unexpected value %d\n",
+ __func__, dlme_el);
+ panic();
+ }
+
+ sctlr &= ~(
+ /* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
+ SCTLR_M_BIT
+ | SCTLR_EE_BIT /* Little-endian data accesses. */
+ );
+
+ sctlr |=
+ SCTLR_C_BIT | SCTLR_I_BIT /* Allow instruction and data caching. */
+ ;
+
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ write_sctlr_el1(sctlr);
+ break;
+
+ case DLME_AT_EL2:
+ write_sctlr_el2(sctlr);
+ break;
+ }
+}
+
+static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
+{
+ void *ns_ctx = cm_get_context(NON_SECURE);
+ gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
+
+ /* Reset all gpregs, including SP_EL0. */
+ memset(gpregs, 0, sizeof(*gpregs));
+
+ /* Reset SP_ELx. */
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ write_sp_el1(0);
+ break;
+
+ case DLME_AT_EL2:
+ write_sp_el2(0);
+ break;
+ }
+
+ /*
+ * DLME's async exceptions are masked to avoid a NWd attacker's timed
+ * interference with any state we established trust in or measured.
+ */
+ spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
+
+ write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
+}
+
+static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args,
+ enum drtm_dlme_el dlme_el)
+{
+ void *ctx = cm_get_context(NON_SECURE);
+ uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
+ uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
+
+ /* Next ERET is to the DLME's EL. */
+ spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
+ switch (dlme_el) {
+ case DLME_AT_EL1:
+ spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
+ break;
+
+ case DLME_AT_EL2:
+ spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
+ break;
+ }
+
+ /* Next ERET is to the DLME entry point. */
+ cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
+}
+
+/*
+ * TODO:
+ * - Close locality 3;
+ * - See section 4.4 and section 4.5 for other requirements;
+ */
+static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
+{
+ enum drtm_retc ret;
+ struct_drtm_dl_args args;
+ enum drtm_dlme_el dlme_el;
+ drtm_event_log_t event_log;
+
+ /*
+ * Non-secure interrupts are masked to avoid a NWd attacker's timed
+ * interference with any state we are establishing trust in or measuring.
+ * Note that in this particular implementation, both Non-secure and Secure
+ * interrupts are automatically masked consequence of the SMC call.
+ */
+
+ if ((ret = drtm_dl_check_caller_el(handle))) {
+ SMC_RET1(handle, ret);
+ }
+
+ if ((ret = drtm_dl_check_cores())) {
+ SMC_RET1(handle, ret);
+ }
+
+ if ((ret = drtm_dl_check_args(x1, &args))) {
+ SMC_RET1(handle, ret);
+ }
+
+ drtm_dl_ensure_tcb_hashes_are_final();
+
+ /*
+ * Engage the DMA protections. The launch cannot proceed without the DMA
+ * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
+ * region (and to the NWd DCE region).
+ */
+ if ((ret = drtm_dma_prot_engage(&args.dma_prot_args,
+ DL_ARGS_GET_DMA_PROT_TYPE(&args)))) {
+ SMC_RET1(handle, ret);
+ }
+
+ /*
+ * The DMA protection is now engaged. Note that any failure mode that
+ * returns an error to the DRTM-launch caller must now disengage DMA
+ * protections before returning to the caller.
+ */
+
+ if ((ret = drtm_take_measurements(&args, &event_log))) {
+ goto err_undo_dma_prot;
+ }
+
+ if ((ret = drtm_dl_prepare_dlme_data(&args, &event_log, NULL))) {
+ goto err_undo_dma_prot;
+ }
+
+ /*
+ * Note that, at the time of writing, the DRTM spec allows a successful
+ * launch from NS-EL1 to return to a DLME in NS-EL2. The practical risk
+ * of a privilege escalation, e.g. due to a compromised hypervisor, is
+ * considered small enough not to warrant the specification of additional
+ * DRTM conduits that would be necessary to maintain OSs' abstraction from
+ * the presence of EL2 were the dynamic launch only be allowed from the
+ * highest NS EL.
+ */
+ dlme_el = drtm_dlme_el(get_highest_ns_el_implemented());
+
+ drtm_dl_reset_dlme_el_state(dlme_el);
+ drtm_dl_reset_dlme_context(dlme_el);
+
+ /*
+ * TODO: Reset all SDEI event handlers, since they are untrusted. Both
+ * private and shared events for all cores must be unregistered.
+ * Note that simply calling SDEI ABIs would not be adequate for this, since
+ * there is currently no SDEI operation that clears private data for all PEs.
+ */
+
+ drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
+
+ /*
+ * TODO: invalidate the instruction cache before jumping to the DLME.
+ * This is required to defend against potentially-malicious cache contents.
+ */
+
+ /* Return the DLME region's address in x0, and the DLME data offset in x1.*/
+ SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
+
+err_undo_dma_prot:
+ ;
+ int rc;
+
+ if ((rc = drtm_dma_prot_disengage())) {
+ ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
+ " rc=%d\n", __func__, rc);
+ panic();
+ }
+ SMC_RET1(handle, ret);
+}
+
+
+static uint64_t drtm_features_tpm(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
+ 1ULL << 33 /* Default PCR usage schema */
+ | 0ULL << 32 /* Firmware-based hashing */
+ /* The firmware hashing algorithm */
+ | (uint32_t)DRTM_TPM_HASH_ALG << 0
+ );
+}
+
+static uint64_t drtm_features_mem_req(void *ctx)
+{
+ int rc;
+ size_t dlme_data_bytes_req;
+ uint64_t dlme_data_pages_req;
+
+ rc = drtm_dl_prepare_dlme_data(NULL, NULL, &dlme_data_bytes_req);
+ if (rc) {
+ ERROR("%s(): drtm_dl_prepare_dlme_data() failed unexpectedly"
+ " rc=%d\n", __func__, rc);
+ panic();
+ }
+
+ dlme_data_pages_req = ALIGNED_UP(dlme_data_bytes_req, DRTM_PAGE_SIZE)
+ / DRTM_PAGE_SIZE;
+ if (dlme_data_pages_req > UINT32_MAX) {
+ ERROR("%s(): dlme_data_pages_req is unexpectedly large"
+ " (does not fit in the bit-field)\n", __func__);
+ panic();
+ }
+
+ SMC_RET2(ctx, 1ULL, /* Feature is supported */
+ 0ULL << 32 /* Not using a Normal World DCE */
+ /* Minimum amount of space needed for the DLME data */
+ | (dlme_data_pages_req & 0xffffffffULL)
+ );
+}
+
+static uint64_t drtm_features_boot_pe_id(void *ctx)
+{
+ SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
+ boot_pe_aff_value /* Boot PE identification */
+ );
+}
+
+
+uint64_t drtm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ /* Check that the SMC call is from the Normal World. */
+ if (is_caller_secure(flags)) {
+ SMC_RET1(handle, NOT_SUPPORTED);
+ }
+
+ switch (smc_fid) {
+ case ARM_DRTM_SVC_VERSION:
+ INFO("++ DRTM service handler: version\n");
+ /* Return the version of current implementation */
+ SMC_RET1(handle, ARM_DRTM_VERSION);
+
+ case ARM_DRTM_SVC_FEATURES:
+ if ((x1 >> 63 & 0x1U) == 0) {
+ uint32_t func_id = x1;
+
+ /* Dispatch function-based queries. */
+ switch (func_id) {
+ case ARM_DRTM_SVC_VERSION:
+ INFO("++ DRTM service handler: DRTM_VERSION feature\n");
+ SMC_RET1(handle, SUCCESS);
+
+ case ARM_DRTM_SVC_FEATURES:
+ INFO("++ DRTM service handler: DRTM_FEATURES feature\n");
+ SMC_RET1(handle, SUCCESS);
+
+ case ARM_DRTM_SVC_UNPROTECT_MEM:
+ INFO("++ DRTM service handler: DRTM_UNPROTECT_MEMORY feature\n");
+ SMC_RET1(handle, SUCCESS);
+
+ case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
+ INFO("++ DRTM service handler: DRTM_DYNAMIC_LAUNCH feature\n");
+ SMC_RET1(handle, SUCCESS);
+
+ case ARM_DRTM_SVC_CLOSE_LOCALITY:
+ INFO("++ DRTM service handler: DRTM_CLOSE_LOCALITY feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+
+ case ARM_DRTM_SVC_GET_ERROR:
+ INFO("++ DRTM service handler: DRTM_GET_ERROR feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+
+ case ARM_DRTM_SVC_SET_ERROR:
+ INFO("++ DRTM service handler: DRTM_SET_ERROR feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+
+ case ARM_DRTM_SVC_SET_TCB_HASH:
+ INFO("++ DRTM service handler: DRTM_SET_TCB_HASH feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+
+ case ARM_DRTM_SVC_LOCK_TCB_HASHES:
+ INFO("++ DRTM service handler: DRTM_LOCK_TCB_HASHES feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+
+ default:
+ ERROR("Unknown ARM DRTM service function feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ }
+ } else {
+ uint8_t feat_id = x1;
+
+ /* Dispatch feature-based queries. */
+ switch (feat_id) {
+ case ARM_DRTM_FEATURES_TPM:
+ INFO("++ DRTM service handler: TPM features\n");
+ return drtm_features_tpm(handle);
+
+ case ARM_DRTM_FEATURES_MEM_REQ:
+ INFO("++ DRTM service handler: Min. mem."
+ " requirement features\n");
+ return drtm_features_mem_req(handle);
+
+ case ARM_DRTM_FEATURES_DMA_PROT:
+ INFO("++ DRTM service handler: DMA protection features\n");
+ return drtm_features_dma_prot(handle);
+
+ case ARM_DRTM_FEATURES_BOOT_PE_ID:
+ INFO("++ DRTM service handler: Boot PE ID features\n");
+ return drtm_features_boot_pe_id(handle);
+
+ case ARM_DRTM_FEATURES_TCB_HASHES:
+ INFO("++ DRTM service handler: TCB-hashes features\n");
+ return drtm_features_tcb_hashes(handle);
+
+ default:
+ ERROR("Unknown ARM DRTM service feature\n");
+ SMC_RET1(handle, NOT_SUPPORTED);
+ }
+ }
+
+ case ARM_DRTM_SVC_UNPROTECT_MEM:
+ INFO("++ DRTM service handler: unprotect mem\n");
+ return drtm_unprotect_mem(handle);
+
+ case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
+ INFO("++ DRTM service handler: dynamic launch\n");
+ //locality2 = 1;
+ //locality3 = 1;
+ return drtm_dynamic_launch(x1, handle);
+
+ case ARM_DRTM_SVC_CLOSE_LOCALITY:
+ INFO("++ DRTM service handler: close locality\n");
+ if (x1 == 2) {
+ if (locality2 == 1) {
+ locality2 = 0;
+ SMC_RET1(handle, SMC_OK);
+ }
+ SMC_RET1(handle, DENIED);
+ }
+ if (x1 == 3) {
+ if (locality3 == 1) {
+ locality3 = 0;
+ SMC_RET1(handle, SMC_OK);
+ }
+ SMC_RET1(handle, DENIED);
+ }
+ SMC_RET1(handle, INVALID_PARAMETERS);
+
+ case ARM_DRTM_SVC_GET_ERROR:
+ INFO("++ DRTM service handler: get error\n");
+ return drtm_get_error(handle);
+
+ case ARM_DRTM_SVC_SET_ERROR:
+ INFO("++ DRTM service handler: set error\n");
+ return drtm_set_error(x1, handle);
+
+ default:
+ ERROR("Unknown ARM DRTM service call: 0x%x \n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/services/std_svc/drtm/drtm_main.h b/services/std_svc/drtm/drtm_main.h
new file mode 100644
index 000000000..713199a71
--- /dev/null
+++ b/services/std_svc/drtm/drtm_main.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_MAIN_H
+#define DRTM_MAIN_H
+
+#include <stdint.h>
+
+#include <lib/smccc.h>
+
+#include "drtm_dma_prot.h"
+
+#define ALIGNED_UP(x, a) __extension__ ({ \
+ __typeof__(a) _a = (a); \
+ __typeof__(a) _one = 1; \
+ assert(IS_POWER_OF_TWO(_a)); \
+ ((x) + (_a - _one)) & ~(_a - _one); \
+})
+
+#define ALIGNED_DOWN(x, a) __extension__ ({ \
+ __typeof__(a) _a = (a); \
+ __typeof__(a) _one = 1; \
+ assert(IS_POWER_OF_TWO(_a)); \
+ (x) & ~(_a - _one); \
+})
+
+
+#define DRTM_PAGE_SIZE (4 * (1 << 10))
+#define DRTM_PAGE_SIZE_STR "4-KiB"
+
+
+enum drtm_retc {
+ SUCCESS = SMC_OK,
+ NOT_SUPPORTED = SMC_UNK,
+ INVALID_PARAMETERS = -2,
+ DENIED = -3,
+ NOT_FOUND = -4,
+ INTERNAL_ERROR = -5,
+ MEM_PROTECT_INVALID = -6,
+};
+
+struct __packed drtm_dl_args_v1 {
+ uint16_t version; /* Must be 1. */
+ uint8_t __res[2];
+ uint32_t features;
+ uint64_t dlme_paddr;
+ uint64_t dlme_size;
+ uint64_t dlme_img_off;
+ uint64_t dlme_img_ep_off;
+ uint64_t dlme_img_size;
+ uint64_t dlme_data_off;
+ uint64_t dce_nwd_paddr;
+ uint64_t dce_nwd_size;
+ drtm_dl_dma_prot_args_v1_t dma_prot_args;
+} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version' */));
+#define DL_ARGS_GET_DMA_PROT_TYPE(a) (((a)->features >> 3) & 0x7U)
+#define DL_ARGS_GET_PCR_SCHEMA(a) (((a)->features >> 1) & 0x3U)
+#define DL_ARGS_GET_DLME_ENTRY_POINT(a) \
+ (((a)->dlme_paddr + (a)->dlme_img_off + (a)->dlme_img_ep_off))
+
+/*
+ * Version-independent type. May be used to avoid excessive line of code
+ * changes when migrating to new struct versions.
+ */
+typedef struct drtm_dl_args_v1 struct_drtm_dl_args;
+
+#endif /* DRTM_MAIN_H */
diff --git a/services/std_svc/drtm/drtm_mbedtls_config.h b/services/std_svc/drtm/drtm_mbedtls_config.h
new file mode 100644
index 000000000..56492ead8
--- /dev/null
+++ b/services/std_svc/drtm/drtm_mbedtls_config.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef MBEDTLS_CONFIG_H
+#define MBEDTLS_CONFIG_H
+
+/*
+ * Key algorithms currently supported on mbed TLS libraries
+ */
+#define TF_MBEDTLS_RSA 1
+#define TF_MBEDTLS_ECDSA 2
+#define TF_MBEDTLS_RSA_AND_ECDSA 3
+
+#define TF_MBEDTLS_USE_RSA (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA \
+ || TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
+#define TF_MBEDTLS_USE_ECDSA (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_ECDSA \
+ || TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
+
+/*
+ * Hash algorithms currently supported on mbed TLS libraries
+ */
+#define TF_MBEDTLS_SHA256 1
+#define TF_MBEDTLS_SHA384 2
+#define TF_MBEDTLS_SHA512 3
+
+/*
+ * Configuration file to build mbed TLS with the required features for
+ * Trusted Boot
+ */
+
+#define MBEDTLS_PLATFORM_MEMORY
+#define MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
+/* Prevent mbed TLS from using snprintf so that it can use tf_snprintf. */
+#define MBEDTLS_PLATFORM_SNPRINTF_ALT
+#define MBEDTLS_PLATFORM_C
+
+#define MBEDTLS_MEMORY_BUFFER_ALLOC_C
+
+#if DRTM_SHA_ALG == 256
+#define MBEDTLS_SHA256_C
+#elif DRTM_SHA_ALG == 384 || DRTM_SHA_ALG == 512
+#define MBEDTLS_SHA512_C
+#else
+#define MBEDTLS_SHA512_C
+#endif
+#define MBEDTLS_MD_C
+#define MBEDTLS_ERROR_C
+#define MBEDTLS_VERSION_C
+
+/* Memory buffer allocator options */
+#define MBEDTLS_MEMORY_ALIGN_MULTIPLE 8
+
+/*
+ * Prevent the use of 128-bit division which
+ * creates dependency on external libraries.
+ */
+#define MBEDTLS_NO_UDBL_DIVISION
+
+#ifndef __ASSEMBLER__
+/* System headers required to build mbed TLS with the current configuration */
+#include <stdlib.h>
+#include <mbedtls/check_config.h>
+#endif
+
+#define TF_MBEDTLS_HEAP_SIZE U(4 * 1024)
+
+#endif /* MBEDTLS_CONFIG_H */
diff --git a/services/std_svc/drtm/drtm_measurements.c b/services/std_svc/drtm/drtm_measurements.c
new file mode 100644
index 000000000..cfc0fbbd3
--- /dev/null
+++ b/services/std_svc/drtm/drtm_measurements.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM measurements into TPM PCRs.
+ *
+ * Authors:
+ * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
+ *
+ */
+#include <assert.h>
+
+#include <mbedtls/md.h>
+
+#include <common/debug.h>
+#include <drivers/auth/mbedtls/mbedtls_common.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include "drtm_main.h"
+#include "drtm_measurements.h"
+
+#define XLAT_PAGE_SIZE PAGE_SIZE
+#if XLAT_PAGE_SIZE != DRTM_PAGE_SIZE
+#warning "xlat library page size differs from DRTM page size;"\
+ " mmap_add_dynamic_region() calls to the xlat library might fail"
+#endif
+
+
+#define DRTM_EVENT_ARM_BASE 0x9000U
+#define DRTM_EVENT_TYPE(n) (DRTM_EVENT_ARM_BASE + (unsigned int)(n))
+
+#define DRTM_EVENT_ARM_PCR_SCHEMA DRTM_EVENT_TYPE(1)
+#define DRTM_EVENT_ARM_DCE DRTM_EVENT_TYPE(2)
+#define DRTM_EVENT_ARM_DCE_PUBKEY DRTM_EVENT_TYPE(3)
+#define DRTM_EVENT_ARM_DLME DRTM_EVENT_TYPE(4)
+#define DRTM_EVENT_ARM_DLME_EP DRTM_EVENT_TYPE(5)
+#define DRTM_EVENT_ARM_DEBUG_CONFIG DRTM_EVENT_TYPE(6)
+#define DRTM_EVENT_ARM_NONSECURE_CONFIG DRTM_EVENT_TYPE(7)
+#define DRTM_EVENT_ARM_DCE_SECONDARY DRTM_EVENT_TYPE(8)
+#define DRTM_EVENT_ARM_TZFW DRTM_EVENT_TYPE(9)
+#define DRTM_EVENT_ARM_SEPARATOR DRTM_EVENT_TYPE(10)
+
+#define DRTM_NULL_DATA ((unsigned char []){ 0 })
+#define DRTM_EVENT_ARM_SEP_DATA \
+ (const unsigned char []){'A', 'R', 'M', '_', 'D', 'R', 'T', 'M' }
+
+#if !defined(DRTM_TPM_HASH_ALG)
+/*
+ * This is an error condition. However, avoid emitting a further error message,
+ * since an explanatory one will have already been emitted by the header file.
+ */
+#define DRTM_TPM_HASH_ALG TPM_ALG_NONE
+#define DRTM_MBEDTLS_HASH_ALG MBEDTLS_MD_NONE
+#else
+#define DRTM_MBEDTLS_HASH_ALG \
+ EXPAND_AND_COMBINE(MBEDTLS_MD_SHA, DRTM_SHA_ALG)
+#endif
+
+
+#define CHECK_RC(rc, func_call) { \
+ if ((rc)) { \
+ ERROR("%s(): " #func_call "failed unexpectedly rc=%d\n", \
+ __func__, rc); \
+ panic(); \
+ } \
+}
+
+
+int drtm_measurements_init(void)
+{
+ mbedtls_init();
+
+ return 0;
+}
+
+#define calc_hash(data_ptr, data_len, output) \
+ mbedtls_md(mbedtls_md_info_from_type((mbedtls_md_type_t)DRTM_MBEDTLS_HASH_ALG),\
+ data_ptr, data_len, output)
+
+enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a,
+ struct drtm_event_log *log)
+{
+ struct tpm_log_1digest_shaX {
+ struct tpm_log_digests digests_1;
+ struct tpm_log_digest d;
+ unsigned char digest[MBEDTLS_MD_MAX_SIZE];
+ } __packed __aligned(__alignof(struct tpm_log_digests));
+ struct tpm_log_1digest_shaX digests_buf = {
+ .digests_1 = {
+ .count = 1,
+ },
+ .d = (struct tpm_log_digest) {
+ .h_alg = DRTM_TPM_HASH_ALG,
+ .buf_bytes = sizeof(((struct tpm_log_1digest_shaX *)0)->digest),
+ },
+ {0}
+ };
+ int rc;
+ uint8_t pcr_schema;
+ tpm_log_info_t *const tpm_log_info = &log->tpm_log_info;
+
+ rc = tpm_log_init(log->tpm_log_mem, sizeof(log->tpm_log_mem),
+ (enum tpm_hash_alg[]){ DRTM_TPM_HASH_ALG }, 1,
+ tpm_log_info);
+ CHECK_RC(rc, tpm_log_init);
+
+ /**
+ * Measurements extended into PCR-17.
+ *
+ * PCR-17: Measure the DCE image. Extend digest of (char)0 into PCR-17
+ * since the D-CRTM and the DCE are not separate.
+ */
+ rc = calc_hash(DRTM_NULL_DATA, sizeof(DRTM_NULL_DATA), digests_buf.digest);
+ CHECK_RC(rc, calc_hash(NULL_DATA_1));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DCE, TPM_PCR_17,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event_arm_dce);
+
+ /* PCR-17: Measure the PCR schema DRTM launch argument. */
+ pcr_schema = DL_ARGS_GET_PCR_SCHEMA(a);
+ rc = calc_hash(&pcr_schema, sizeof(pcr_schema), digests_buf.digest);
+ CHECK_RC(rc, calc_hash(pcr_schema));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_PCR_SCHEMA, TPM_PCR_17,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event(ARM_PCR_SCHEMA_17));
+
+ /* PCR-17: Measure the enable state of external-debug, and trace. */
+ /*
+ * TODO: Measure the enable state of external-debug and trace. This should
+ * be returned through a platform-specific hook.
+ */
+
+ /* PCR-17: Measure the security lifecycle state. */
+ /*
+ * TODO: Measure the security lifecycle state. This is an implementation-
+ * defined value, retrieved through an implementation-defined mechanisms.
+ */
+
+ /*
+ * PCR-17: Optionally measure the NWd DCE.
+ * It is expected that such subsequent DCE stages are signed and verified.
+ * Whether they are measured in addition to signing is implementation
+ * -defined.
+ * Here the choice is to not measure any NWd DCE, in favour of PCR value
+ * resilience to any NWd DCE updates.
+ */
+
+ /* PCR-17: End of DCE measurements. */
+ rc = calc_hash(DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA),
+ digests_buf.digest);
+ CHECK_RC(rc, calc_hash(ARM_SEP_DATA_17));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_SEPARATOR, TPM_PCR_17,
+ &digests_buf.digests_1,
+ DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA));
+ CHECK_RC(rc, tpm_log_add_event(ARM_SEPARATOR_17));
+
+ /**
+ * Measurements extended into PCR-18.
+ *
+ * PCR-18: Measure the PCR schema DRTM launch argument.
+ */
+ pcr_schema = DL_ARGS_GET_PCR_SCHEMA(a);
+ rc = calc_hash(&pcr_schema, sizeof(pcr_schema), digests_buf.digest);
+ CHECK_RC(rc, calc_hash(pcr_schema));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_PCR_SCHEMA, TPM_PCR_18,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event(ARM_PCR_SCHEMA_17));
+
+ /*
+ * PCR-18: Measure the public key used to verify DCE image(s) signatures.
+ * Extend digest of (char)0, since we do not expect the NWd DCE to be
+ * present.
+ */
+ assert(a->dce_nwd_size == 0);
+ rc = calc_hash(DRTM_NULL_DATA, sizeof(DRTM_NULL_DATA), digests_buf.digest);
+ CHECK_RC(rc, calc_hash(NULL_DATA_2));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DCE_PUBKEY, TPM_PCR_18,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event(ARM_DCE_PUBKEY));
+
+ /* PCR-18: Measure the DLME image. */
+ uintptr_t dlme_img_mapping;
+ size_t dlme_img_mapping_bytes;
+
+ dlme_img_mapping_bytes = ALIGNED_UP(a->dlme_img_size, DRTM_PAGE_SIZE);
+ rc = mmap_add_dynamic_region_alloc_va(a->dlme_paddr + a->dlme_img_off,
+ &dlme_img_mapping,
+ dlme_img_mapping_bytes, MT_RO_DATA | MT_NS);
+ if (rc) {
+ WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", __func__, rc);
+ return INTERNAL_ERROR;
+ }
+
+ rc = calc_hash((void *)dlme_img_mapping, a->dlme_img_size,
+ digests_buf.digest);
+ CHECK_RC(rc, calc_hash(dlme_img));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DLME, TPM_PCR_18,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event(ARM_DLME));
+
+ rc = mmap_remove_dynamic_region(dlme_img_mapping, dlme_img_mapping_bytes);
+ CHECK_RC(rc, mmap_remove_dynamic_region);
+
+ /* PCR-18: Measure the DLME image entry point. */
+ uint64_t dlme_img_ep = DL_ARGS_GET_DLME_ENTRY_POINT(a);
+
+ rc = calc_hash((unsigned char *)&dlme_img_ep, sizeof(dlme_img_ep),
+ digests_buf.digest);
+ CHECK_RC(rc, calc_hash(dlme_img_ep_off));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DLME_EP, TPM_PCR_18,
+ &digests_buf.digests_1, NULL, 0);
+ CHECK_RC(rc, tpm_log_add_event(ARM_DLME_EP));
+
+ /* PCR-18: End of DCE measurements. */
+ rc = calc_hash(DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA),
+ digests_buf.digest);
+ CHECK_RC(rc, calc_hash(ARM_SEP_DATA_18));
+
+ rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_SEPARATOR, TPM_PCR_18,
+ &digests_buf.digests_1,
+ DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA));
+ CHECK_RC(rc, tpm_log_add_event(ARM_SEPARATOR_18));
+
+ /*
+ * If the DCE is unable to log a measurement because there is no available
+ * space in the event log region, the DCE must extend a hash of the value
+ * 0xFF (1 byte in size) into PCR[17] and PCR[18] and enter remediation.
+ */
+ return SUCCESS;
+}
+
+void drtm_serialise_event_log(char *dst, const struct drtm_event_log *src,
+ size_t *event_log_size_out)
+{
+ if (src) {
+ tpm_log_serialise(dst, &src->tpm_log_info, event_log_size_out);
+ } else {
+ if (dst != NULL) {
+ ERROR("%s(): cannot serialise the unexpected NULL event log\n",
+ __func__);
+ panic();
+ }
+ if (event_log_size_out) {
+ /*
+ * DRTM Beta0: Note that the advertised minimum required size ought
+ * to be 64KiB, rather than a more economical size of our choosing.
+ */
+ *event_log_size_out = DRTM_EVENT_LOG_INIT_SIZE;
+ }
+ }
+}
diff --git a/services/std_svc/drtm/drtm_measurements.h b/services/std_svc/drtm/drtm_measurements.h
new file mode 100644
index 000000000..127df9f79
--- /dev/null
+++ b/services/std_svc/drtm/drtm_measurements.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_MEASUREMENTS_H
+#define DRTM_MEASUREMENTS_H
+
+#include <stdint.h>
+
+#include <lib/tpm/tpm_log.h>
+
+#include "drtm_main.h"
+
+#define DRTM_EVENT_LOG_INIT_SIZE ((size_t)(768))
+
+#if !defined(DRTM_SHA_ALG)
+#error "The DRTM service requires definition of the DRTM_SHA_ALG macro"
+#else
+#define COMBINE(a, b) a ## b
+#define EXPAND_AND_COMBINE(a, b) COMBINE(a, b)
+#define DRTM_TPM_HASH_ALG EXPAND_AND_COMBINE(TPM_ALG_SHA, DRTM_SHA_ALG)
+
+#if DRTM_SHA_ALG == 256
+#define DRTM_TPM_HASH_ALG_DSIZE 32
+#elif DRTM_SHA_ALG == 384
+#define DRTM_TPM_HASH_ALG_DSIZE 48
+#elif DRTM_SHA_ALG == 512
+#define DRTM_TPM_HASH_ALG_DSIZE 64
+#endif
+
+#endif
+
+
+struct drtm_event_log {
+ tpm_log_info_t tpm_log_info;
+ uint32_t tpm_log_mem[DRTM_EVENT_LOG_INIT_SIZE / sizeof(uint32_t)];
+};
+/* Opaque / encapsulated type. */
+typedef struct drtm_event_log drtm_event_log_t;
+
+
+int drtm_measurements_init(void);
+enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a,
+ drtm_event_log_t *log);
+void drtm_serialise_event_log(char *dst, const drtm_event_log_t *src_log,
+ size_t *event_log_size_out);
+
+#endif /* DRTM_MEASUREMENTS_H */
diff --git a/services/std_svc/drtm/drtm_qemu_virt_cached_resources_init.c b/services/std_svc/drtm/drtm_qemu_virt_cached_resources_init.c
new file mode 100644
index 000000000..0b91a3059
--- /dev/null
+++ b/services/std_svc/drtm/drtm_qemu_virt_cached_resources_init.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM protected resources
+ */
+
+#include "drtm_main.h"
+#include "drtm_cache.h"
+#include "drtm_dma_prot.h"
+
+/*
+ * XXX Note: the generic protected DRTM resources are being specialised into
+ * DRTM TCB hashes. Platform resources retrieved through the generic DRTM cache
+ * are going to be retrieved through bespoke interfaces instead.
+ * This file and drtm_cache.c will be removed once the transition is complete.
+ */
+
+struct __packed __descr_table_n {
+ struct_drtm_mem_region_descr_table header;
+ struct_drtm_mem_region_descr regions[24];
+};
+
+static const struct __descr_table_n qemu_virt_address_map = {
+ .header = {
+ .version = 1,
+ .num_regions = sizeof(((struct __descr_table_n *)NULL)->regions) /
+ sizeof(((struct __descr_table_n *)NULL)->regions[0])
+ },
+ /* See qemu/hw/arm/virt.c :
+ *
+ * static const MemMapEntry base_memmap[] = {
+ * // Space up to 0x8000000 is reserved for a boot ROM
+ * [VIRT_FLASH] = { 0, 0x08000000 },
+ * [VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 },
+ * // GIC distributor and CPU interfaces sit inside the CPU peripheral space
+ * [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
+ * [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
+ * [VIRT_GIC_V2M] = { 0x08020000, 0x00001000 },
+ * [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 },
+ * [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 },
+ * // The space in between here is reserved for GICv3 CPU/vCPU/HYP
+ * [VIRT_GIC_ITS] = { 0x08080000, 0x00020000 },
+ * // This redistributor space allows up to 2*64kB*123 CPUs
+ * [VIRT_GIC_REDIST] = { 0x080A0000, 0x00F60000 },
+ * [VIRT_UART] = { 0x09000000, 0x00001000 },
+ * [VIRT_RTC] = { 0x09010000, 0x00001000 },
+ * [VIRT_FW_CFG] = { 0x09020000, 0x00000018 },
+ * [VIRT_GPIO] = { 0x09030000, 0x00001000 },
+ * [VIRT_SECURE_UART] = { 0x09040000, 0x00001000 },
+ * [VIRT_SMMU] = { 0x09050000, 0x00020000 },
+ * [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN },
+ * [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN },
+ * [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN},
+ * [VIRT_PVTIME] = { 0x090a0000, 0x00010000 },
+ * [VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 },
+ * [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
+ * // ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size
+ * [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
+ * [VIRT_SECURE_MEM] = { 0x0e000000, 0x01000000 },
+ * [VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
+ * [VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
+ * [VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
+ * // Actual RAM size depends on initial RAM and device memory settings
+ * [VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES },
+ * };
+ *
+ * Note: When adjusting the regions below, please update the array length
+ * in the __descr_table_n structure accordingly.
+ *
+ */
+#define PAGES_AND_TYPE(bytes, type) \
+ .pages_and_type = DRTM_MEM_REGION_PAGES_AND_TYPE( \
+ (size_t)(bytes) / DRTM_PAGE_SIZE + \
+ ((size_t)(bytes) % DRTM_PAGE_SIZE != 0), \
+ DRTM_MEM_REGION_TYPE_##type)
+ .regions = {
+ {.paddr = 0, PAGES_AND_TYPE(0x08000000, NON_VOLATILE)},
+ {.paddr = 0x08000000, PAGES_AND_TYPE(0x00021000, DEVICE)},
+ {.paddr = 0x08030000, PAGES_AND_TYPE(0x00020000, DEVICE)},
+ {.paddr = 0x08080000, PAGES_AND_TYPE(0x00F80000, DEVICE)},
+ {.paddr = 0x09000000, PAGES_AND_TYPE(0x00001000, DEVICE)},
+ {.paddr = 0x09010000, PAGES_AND_TYPE(0x00001000, DEVICE)},
+ {.paddr = 0x09020000, PAGES_AND_TYPE(0x00000018, DEVICE)},
+ {.paddr = 0x09030000, PAGES_AND_TYPE(0x00001000, DEVICE)},
+ /* {.paddr = 0x09040000, PAGES_AND_TYPE(0x00001000, RESERVED)}, */
+ {.paddr = 0x09050000, PAGES_AND_TYPE(0x00020000 + DRTM_PAGE_SIZE, DEVICE)},
+ {.paddr = 0x09080000, PAGES_AND_TYPE(DRTM_PAGE_SIZE, DEVICE)},
+ {.paddr = 0x09090000, PAGES_AND_TYPE(DRTM_PAGE_SIZE, DEVICE)},
+ {.paddr = 0x090a0000, PAGES_AND_TYPE(0x00010000, DEVICE)},
+ /* {.paddr = 0x090b0000, PAGES_AND_TYPE(0x00001000, RESERVED)}, */
+ {.paddr = 0x0a000000, PAGES_AND_TYPE(0x00000200, DEVICE)},
+ {.paddr = 0x0c000000, PAGES_AND_TYPE(0x02000000, DEVICE)},
+ /* {.paddr = 0x0e000000, PAGES_AND_TYPE(0x01000000, RESERVED)}, */
+ {.paddr = 0x10000000, PAGES_AND_TYPE(0x30000000, DEVICE)},
+ /*
+ * At most 3 GiB RAM, to align with TF-A's max PA on ARM QEMU.
+ * Actual RAM size depends on initial RAM and device memory settings.
+ */
+ {.paddr = 0x40000000, PAGES_AND_TYPE(0xc0000000 /* 3 GiB */, NORMAL)},
+ },
+#undef PAGES_AND_TYPE
+};
+
+
+static const struct cached_res CACHED_RESOURCES_INIT[] = {
+ {
+ .id = "address-map",
+ .bytes = sizeof(qemu_virt_address_map),
+ .data_ptr = (char *)&qemu_virt_address_map,
+ },
+};
+
+#define CACHED_RESOURCES_INIT_END (CACHED_RESOURCES_INIT + \
+ sizeof(CACHED_RESOURCES_INIT) / sizeof(CACHED_RESOURCES_INIT[0]))
diff --git a/services/std_svc/drtm/drtm_remediation.c b/services/std_svc/drtm/drtm_remediation.c
new file mode 100644
index 000000000..b896a9381
--- /dev/null
+++ b/services/std_svc/drtm/drtm_remediation.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM support for DRTM error remediation.
+ *
+ */
+#include <stdint.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+
+#include "drtm_main.h"
+
+
+static enum drtm_retc drtm_error_set(long long error_code)
+{
+ /* TODO: Store the error code in non-volatile memory. */
+
+ return SUCCESS;
+}
+
+static enum drtm_retc drtm_error_get(long long *error_code)
+{
+ /* TODO: Get error code from non-volatile memory. */
+
+ *error_code = 0;
+
+ return SUCCESS;
+}
+
+void drtm_enter_remediation(long long err_code, const char *err_str)
+{
+ int rc;
+
+ if ((rc = drtm_error_set(err_code))) {
+ ERROR("%s(): drtm_error_set() failed unexpectedly rc=%d\n",
+ __func__, rc);
+ panic();
+ }
+
+ NOTICE("DRTM: entering remediation of error:\n%lld\t\'%s\'\n",
+ err_code, err_str);
+
+ /* TODO: Reset the system rather than panic(). */
+ ERROR("%s(): system reset is not yet supported\n", __func__);
+ panic();
+}
+
+uintptr_t drtm_set_error(uint64_t x1, void *ctx)
+{
+ int rc;
+
+ if ((rc = drtm_error_set(x1))) {
+ SMC_RET1(ctx, rc);
+ }
+
+ SMC_RET1(ctx, SUCCESS);
+}
+
+uintptr_t drtm_get_error(void *ctx)
+{
+ long long error_code;
+ int rc;
+
+ if ((rc = drtm_error_get(&error_code))) {
+ SMC_RET1(ctx, rc);
+ }
+
+ SMC_RET2(ctx, SUCCESS, error_code);
+}
diff --git a/services/std_svc/drtm/drtm_remediation.h b/services/std_svc/drtm/drtm_remediation.h
new file mode 100644
index 000000000..b4a11f23e
--- /dev/null
+++ b/services/std_svc/drtm/drtm_remediation.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+#ifndef DRTM_REMEDIATION_H
+#define DRTM_REMEDIATION_H
+
+uintptr_t drtm_set_error(uint64_t x1, void *ctx);
+uintptr_t drtm_get_error(void *ctx);
+
+void drtm_enter_remediation(long long error_code, const char *error_str);
+
+#endif /* DRTM_REMEDIATION_H */
diff --git a/services/std_svc/drtm/drtm_res_tcb_hashes.c b/services/std_svc/drtm/drtm_res_tcb_hashes.c
new file mode 100644
index 000000000..afa49da6f
--- /dev/null
+++ b/services/std_svc/drtm/drtm_res_tcb_hashes.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM resource: TCB hashes.
+ *
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/runtime_svc.h>
+#include <services/drtm_svc_plat.h>
+
+#include "drtm_measurements.h" /* DRTM_TPM_HASH_ALG and _DSIZE */
+#include "drtm_remediation.h"
+
+
+struct __packed drtm_tcb_hash_v1 {
+ uint32_t hash_id;
+ uint8_t hash_val[DRTM_TPM_HASH_ALG_DSIZE];
+};
+
+struct __packed drtm_tcb_hash_table_hdr_v1 {
+ uint16_t version; /* Must be 1. */
+ uint16_t num_hashes;
+ uint32_t hashing_alg;
+};
+
+/* Version-agnostic types. */
+typedef struct drtm_tcb_hash_table_hdr_v1 struct_drtm_tcb_hash_table_hdr;
+typedef struct drtm_tcb_hash_v1 struct_drtm_tcb_hash;
+
+CASSERT(sizeof(((struct plat_drtm_tcb_hash *)NULL)->hash_val)
+ == sizeof(((struct_drtm_tcb_hash *)NULL)->hash_val),
+ bad_plat_drtm_tcb_digest_buffer_size
+);
+
+
+static bool tcb_hashes_set_at_runtime;
+static bool tcb_hashes_locked;
+
+
+/* Default platform's DRTM TCB hashes enumeration -- no hashes. */
+void plat_enumerate_drtm_tcb_hashes(const struct plat_drtm_tcb_hash **hashes_out,
+ size_t *hashes_count_out)
+{
+ *hashes_out = NULL;
+ *hashes_count_out = 0;
+}
+#pragma weak plat_enumerate_drtm_tcb_hashes
+
+
+int drtm_tcb_hashes_init(void)
+{
+ const struct plat_drtm_tcb_hash *init_hashes;
+ size_t num_init_hashes;
+ bool init_hashes_invalid = false;
+
+ plat_enumerate_drtm_tcb_hashes(&init_hashes, &num_init_hashes);
+ if (!init_hashes) {
+ return 0;
+ }
+
+ /* Validate the platform DRTM TCB hashes. */
+ for (size_t j = 0; j < num_init_hashes; j++) {
+ const struct plat_drtm_tcb_hash *plat_h = init_hashes + j;
+
+ if (plat_h->hash_bytes != DRTM_TPM_HASH_ALG_DSIZE) {
+ ERROR("DRTM: invalid hash value size of platform TCB hash"
+ " at index %ld\n", j);
+ init_hashes_invalid = true;
+ }
+
+
+ for (size_t i = 0; i < j; i++) {
+ const struct plat_drtm_tcb_hash *prev_h = init_hashes + i;
+
+ if (plat_h->hash_id.uint32 == prev_h->hash_id.uint32) {
+ ERROR("DRTM: duplicate hash value ID of platform TCB hash"
+ " at index %ld (duplicates ID at index %ld)\n", j, i);
+ init_hashes_invalid = true;
+ }
+ }
+ }
+ if (init_hashes_invalid) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+uint64_t drtm_features_tcb_hashes(void *ctx)
+{
+ SMC_RET2(ctx, 1, /* TCB hashes supported. */
+ (uint64_t)0 << 8 /* MBZ */
+ | (uint8_t)0 /* TCB hashes may not be recorded at runtime. */
+ );
+}
+
+void drtm_dl_ensure_tcb_hashes_are_final(void)
+{
+ if (!tcb_hashes_set_at_runtime || tcb_hashes_locked) {
+ return;
+ }
+
+ /*
+ * Some runtime TCB hashes were set, but the set of TCB hashes hasn't been
+ * locked / frozen by trusted Normal World firmware. Therefore there is no
+ * way to guarantee that the set of TCB hashes doesn't contain malicious
+ * ones from an untrusted Normal World component.
+ * Refuse to complete the dynamic launch, and reboot the system.
+ */
+ drtm_enter_remediation(0x4, "TCB hashes are still open (missing LOCK call)");
+}
+
+/*
+ * enum drtm_retc drtm_set_tcb_hash(uint64_t x1)
+ * {
+ * // Sets `tcb_hashes_set_at_runtime' when it succeeds
+ * }
+ */
+
+/*
+ * enum drtm_retc drtm_lock_tcb_hashes(void)
+ * {
+ * // Sets `tcb_hashes_locked' when it succeeds
+ * }
+ */
+
+void drtm_serialise_tcb_hashes_table(char *dst, size_t *size_out)
+{
+ const struct plat_drtm_tcb_hash *init_hashes;
+ size_t num_init_hashes;
+ size_t num_hashes_total = 0;
+ uintptr_t table_cur = (uintptr_t)dst;
+
+ /* Enumerate all available TCB hashes. */
+ plat_enumerate_drtm_tcb_hashes(&init_hashes, &num_init_hashes);
+ num_hashes_total += num_init_hashes;
+
+ if (num_hashes_total == 0) {
+ goto serialise_tcb_hashes_table_done;
+ }
+
+ /* Serialise DRTM TCB_HASHES_TABLE header. */
+ struct_drtm_tcb_hash_table_hdr hdr;
+ hdr.version = 1;
+ hdr.num_hashes = 0;
+ hdr.num_hashes += num_init_hashes;
+ hdr.hashing_alg = DRTM_TPM_HASH_ALG;
+
+ if (dst) {
+ memcpy((char *)table_cur, &hdr, sizeof(hdr));
+ }
+ table_cur += sizeof(hdr);
+
+ /* Serialise platform DRTM TCB hashes. */
+ for (const struct plat_drtm_tcb_hash *plat_h = init_hashes;
+ plat_h < init_hashes + num_init_hashes;
+ plat_h++) {
+ struct_drtm_tcb_hash drtm_h;
+
+ drtm_h.hash_id = plat_h->hash_id.uint32;
+ /* This assertion follows from the init-time check. */
+ assert(plat_h->hash_bytes == sizeof(drtm_h.hash_val));
+ /* This assertion follows from the one above and the compile-time one.*/
+ assert(plat_h->hash_bytes <= sizeof(plat_h->hash_val));
+ memcpy(&drtm_h.hash_val, plat_h->hash_val, plat_h->hash_bytes);
+
+ if (dst) {
+ memcpy((char *)table_cur, &drtm_h, sizeof(drtm_h));
+ }
+ table_cur += sizeof(drtm_h);
+ }
+
+serialise_tcb_hashes_table_done:
+ /* Return the number of bytes serialised. */
+ if (size_out) {
+ *size_out = table_cur - (uintptr_t)dst;
+ }
+}
diff --git a/services/std_svc/drtm/drtm_res_tcb_hashes.h b/services/std_svc/drtm/drtm_res_tcb_hashes.h
new file mode 100644
index 000000000..725f078e8
--- /dev/null
+++ b/services/std_svc/drtm/drtm_res_tcb_hashes.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * DRTM resource: TCB hashes.
+ *
+ */
+#ifndef DRTM_RES_TCB_HASHES_H
+#define DRTM_RES_TCB_HASHES_H
+
+int drtm_tcb_hashes_init(void);
+uint64_t drtm_features_tcb_hashes(void *ctx);
+void drtm_dl_ensure_tcb_hashes_are_final(void);
+void drtm_serialise_tcb_hashes_table(char *dst,
+ size_t *tcb_hashes_table_size_out);
+
+#endif /* DRTM_RES_TCB_HASHES_H */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 23f13ab82..62b90a94e 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -13,6 +13,7 @@
#include <lib/pmf/pmf.h>
#include <lib/psci/psci.h>
#include <lib/runtime_instr.h>
+#include <services/drtm_svc.h>
#include <services/sdei.h>
#include <services/spm_mm_svc.h>
#include <services/spmd_svc.h>
@@ -66,6 +67,12 @@ static int32_t std_svc_setup(void)
trng_setup();
+#if DRTM_SUPPORT
+ if (drtm_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
return ret;
}
@@ -145,6 +152,11 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
#if TRNG_SUPPORT
if (is_trng_fid(smc_fid)) {
return trng_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+#endif
+
+#if DRTM_SUPPORT
+ if (is_drtm_fid(smc_fid)) {
+ return drtm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
flags);
}
#endif