summaryrefslogtreecommitdiff
path: root/services
diff options
context:
space:
mode:
Diffstat (limited to 'services')
-rw-r--r--services/std_svc/spm/README.rst3
-rw-r--r--services/std_svc/spm/spci.c774
-rw-r--r--services/std_svc/spm/spm.mk14
-rw-r--r--services/std_svc/spm/spm_buffers.c108
-rw-r--r--services/std_svc/spm/spm_main.c364
-rw-r--r--services/std_svc/spm/spm_private.h58
-rw-r--r--services/std_svc/spm/spm_setup.c145
-rw-r--r--services/std_svc/spm/spm_xlat.c312
-rw-r--r--services/std_svc/spm/sprt.c215
-rw-r--r--services/std_svc/spm_deprecated/aarch64/spm_helpers.S74
-rw-r--r--services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S128
-rw-r--r--services/std_svc/spm_deprecated/spm.mk23
-rw-r--r--services/std_svc/spm_deprecated/spm_main.c353
-rw-r--r--services/std_svc/spm_deprecated/spm_private.h70
-rw-r--r--services/std_svc/spm_deprecated/spm_setup.c (renamed from services/std_svc/spm/sp_setup.c)0
-rw-r--r--services/std_svc/spm_deprecated/spm_shim_private.h25
-rw-r--r--services/std_svc/spm_deprecated/spm_xlat.c (renamed from services/std_svc/spm/sp_xlat.c)0
-rw-r--r--services/std_svc/std_svc_setup.c2
18 files changed, 2478 insertions, 190 deletions
diff --git a/services/std_svc/spm/README.rst b/services/std_svc/spm/README.rst
new file mode 100644
index 00000000..63406a3b
--- /dev/null
+++ b/services/std_svc/spm/README.rst
@@ -0,0 +1,3 @@
+This is a prototype loosely based on the SPCI Alpha and SPRT pre-alpha
+specifications. Any interface / platform API introduced for this is subject to
+change as it evolves.
diff --git a/services/std_svc/spm/spci.c b/services/std_svc/spm/spci.c
new file mode 100644
index 00000000..5e4ff918
--- /dev/null
+++ b/services/std_svc/spm/spci.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <spci_svc.h>
+#include <spinlock.h>
+#include <sprt_host.h>
+#include <sprt_svc.h>
+#include <string.h>
+#include <utils.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Macros to print UUIDs.
+ ******************************************************************************/
+#define PRINT_UUID_FORMAT "%08x-%08x-%08x-%08x"
+#define PRINT_UUID_ARGS(x) x[0], x[1], x[2], x[3]
+
+/*******************************************************************************
+ * Array of structs that contains information about all handles of Secure
+ * Services that are currently open.
+ ******************************************************************************/
+typedef enum spci_handle_status {
+ HANDLE_STATUS_CLOSED = 0,
+ HANDLE_STATUS_OPEN,
+} spci_handle_status_t;
+
+typedef struct spci_handle {
+ /* 16-bit value used as reference in all SPCI calls */
+ uint16_t handle;
+
+ /* Client ID of the client that requested the handle */
+ uint16_t client_id;
+
+ /* Current status of the handle */
+ spci_handle_status_t status;
+
+ /*
+ * Context of the Secure Partition that provides the Secure Service
+ * referenced by this handle.
+ */
+ sp_context_t *sp_ctx;
+
+ /*
+ * The same handle might be used for multiple requests, keep a reference
+ * counter of them.
+ */
+ unsigned int num_active_requests;
+} spci_handle_t;
+
+static spci_handle_t spci_handles[PLAT_SPCI_HANDLES_MAX_NUM];
+static spinlock_t spci_handles_lock;
+
+/*
+ * Given a handle and a client ID, return the element of the spci_handles
+ * array that contains the information of the handle. It can only return open
+ * handles. It returns NULL if it couldn't find the element in the array.
+ */
+static spci_handle_t *spci_handle_info_get(uint16_t handle, uint16_t client_id)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(spci_handles); i++) {
+ spci_handle_t *h = &(spci_handles[i]);
+
+ /* Only check for open handles */
+ if (h->status == HANDLE_STATUS_CLOSED) {
+ continue;
+ }
+
+ /* Check if either the handle or the client ID are different */
+ if ((h->handle != handle) || (h->client_id != client_id)) {
+ continue;
+ }
+
+ return h;
+ }
+
+ return NULL;
+}
+
+/*
+ * Returns a unique value for a handle. This function must be called while
+ * spci_handles_lock is locked. It returns 0 on success, -1 on error.
+ */
+static int spci_create_handle_value(uint16_t *handle)
+{
+ /*
+ * Trivial implementation that relies on the fact that any handle will
+ * be closed before 2^16 more handles have been opened.
+ */
+ static uint16_t handle_count;
+
+ *handle = handle_count;
+
+ handle_count++;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Returns a unique token for a Secure Service request.
+ ******************************************************************************/
+static uint32_t spci_create_token_value(void)
+{
+ /*
+ * Trivial implementation that relies on the fact that any response will
+ * be read before 2^32 more service requests have been done.
+ */
+ static uint32_t token_count;
+
+ return token_count++;
+}
+
+/*******************************************************************************
+ * This function looks for a Secure Partition that has a Secure Service
+ * identified by the given UUID. It returns a handle that the client can use to
+ * access the service, and an SPCI_*** error code.
+ ******************************************************************************/
+static uint64_t spci_service_handle_open_poll(void *handle, u_register_t x1,
+ u_register_t x2, u_register_t x3, u_register_t x4,
+ u_register_t x5, u_register_t x6, u_register_t x7)
+{
+ unsigned int i;
+ sp_context_t *sp_ptr;
+ uint16_t service_handle;
+
+ /* Bits 31:16 of w7 are reserved (MBZ). */
+ assert((x7 & 0xFFFF0000U) == 0);
+
+ uint16_t client_id = x7 & 0x0000FFFFU;
+ uint32_t uuid[4] = { x1, x2, x3, x4 };
+
+ /* Get pointer to the Secure Partition that handles this service */
+ sp_ptr = spm_sp_get_by_uuid(&uuid);
+ if (sp_ptr == NULL) {
+ WARN("SPCI: Service requested by client 0x%04x not found\n",
+ client_id);
+ WARN("SPCI: UUID: " PRINT_UUID_FORMAT "\n",
+ PRINT_UUID_ARGS(uuid));
+
+ SMC_RET2(handle, SPCI_NOT_PRESENT, 0);
+ }
+
+ /* Get lock of the array of handles */
+ spin_lock(&spci_handles_lock);
+
+ /*
+ * We need to record the client ID and Secure Partition that correspond
+ * to this handle. Look for the first free entry in the array.
+ */
+ for (i = 0; i < PLAT_SPCI_HANDLES_MAX_NUM; i++) {
+ if (spci_handles[i].status == HANDLE_STATUS_CLOSED) {
+ break;
+ }
+ }
+
+ if (i == PLAT_SPCI_HANDLES_MAX_NUM) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI: Can't open more handles. Client 0x%04x\n",
+ client_id);
+ WARN("SPCI: UUID: " PRINT_UUID_FORMAT "\n",
+ PRINT_UUID_ARGS(uuid));
+
+ SMC_RET2(handle, SPCI_NO_MEMORY, 0);
+ }
+
+ /* Create new handle value */
+ if (spci_create_handle_value(&service_handle) != 0) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI: Can't create a new handle value. Client 0x%04x\n",
+ client_id);
+ WARN("SPCI: UUID: " PRINT_UUID_FORMAT "\n",
+ PRINT_UUID_ARGS(uuid));
+
+ SMC_RET2(handle, SPCI_NO_MEMORY, 0);
+ }
+
+ /* Save all information about this handle */
+ spci_handles[i].status = HANDLE_STATUS_OPEN;
+ spci_handles[i].client_id = client_id;
+ spci_handles[i].handle = service_handle;
+ spci_handles[i].num_active_requests = 0U;
+ spci_handles[i].sp_ctx = sp_ptr;
+
+ /* Release lock of the array of handles */
+ spin_unlock(&spci_handles_lock);
+
+ VERBOSE("SPCI: Service handle request by client 0x%04x: 0x%04x\n",
+ client_id, service_handle);
+ VERBOSE("SPCI: UUID: " PRINT_UUID_FORMAT "\n", PRINT_UUID_ARGS(uuid));
+
+ /* The handle is returned in the top 16 bits of x1 */
+ SMC_RET2(handle, SPCI_SUCCESS, ((uint32_t)service_handle) << 16);
+}
+
+/*******************************************************************************
+ * This function closes a handle that a specific client uses to access a Secure
+ * Service. It returns a SPCI_*** error code.
+ ******************************************************************************/
+static uint64_t spci_service_handle_close(void *handle, u_register_t x1)
+{
+ spci_handle_t *handle_info;
+ uint16_t client_id = x1 & 0x0000FFFFU;
+ uint16_t service_handle = (x1 >> 16) & 0x0000FFFFU;
+
+ spin_lock(&spci_handles_lock);
+
+ handle_info = spci_handle_info_get(service_handle, client_id);
+
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI: Tried to close invalid handle 0x%04x by client 0x%04x\n",
+ service_handle, client_id);
+
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ if (handle_info->status != HANDLE_STATUS_OPEN) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI: Tried to close handle 0x%04x by client 0x%04x in status %d\n",
+ service_handle, client_id, handle_info->status);
+
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ if (handle_info->num_active_requests != 0U) {
+ spin_unlock(&spci_handles_lock);
+
+ /* A handle can't be closed if there are requests left */
+ WARN("SPCI: Tried to close handle 0x%04x by client 0x%04x with %d requests left\n",
+ service_handle, client_id,
+ handle_info->num_active_requests);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ memset(handle_info, 0, sizeof(spci_handle_t));
+
+ handle_info->status = HANDLE_STATUS_CLOSED;
+
+ spin_unlock(&spci_handles_lock);
+
+ VERBOSE("SPCI: Closed handle 0x%04x by client 0x%04x.\n",
+ service_handle, client_id);
+
+ SMC_RET1(handle, SPCI_SUCCESS);
+}
+
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_blocking(void *handle,
+ uint32_t smc_fid, u_register_t x1, u_register_t x2,
+ u_register_t x3, u_register_t x4, u_register_t x5,
+ u_register_t x6, u_register_t x7)
+{
+ spci_handle_t *handle_info;
+ sp_context_t *sp_ctx;
+ cpu_context_t *cpu_ctx;
+ uint32_t rx0;
+ u_register_t rx1, rx2, rx3;
+ uint16_t request_handle, client_id;
+
+ /* Get handle array lock */
+ spin_lock(&spci_handles_lock);
+
+ /* Get pointer to struct of this open handle and client ID. */
+ request_handle = (x7 >> 16U) & 0x0000FFFFU;
+ client_id = x7 & 0x0000FFFFU;
+
+ handle_info = spci_handle_info_get(request_handle, client_id);
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Not found.\n");
+ WARN(" Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+ client_id);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ /* Get pointer to the Secure Partition that handles the service */
+ sp_ctx = handle_info->sp_ctx;
+ assert(sp_ctx != NULL);
+ cpu_ctx = &(sp_ctx->cpu_ctx);
+
+ /* Blocking requests are only allowed if the queue is empty */
+ if (handle_info->num_active_requests > 0) {
+ spin_unlock(&spci_handles_lock);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ if (spm_sp_request_increase_if_zero(sp_ctx) == -1) {
+ spin_unlock(&spci_handles_lock);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ /* Prevent this handle from being closed */
+ handle_info->num_active_requests += 1;
+
+ /* Release handle lock */
+ spin_unlock(&spci_handles_lock);
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Wait until the Secure Partition is idle and set it to busy. */
+ sp_state_wait_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY);
+
+ /* Pass arguments to the Secure Partition */
+ struct sprt_queue_entry_message message = {
+ .type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+ .client_id = client_id,
+ .service_handle = request_handle,
+ .session_id = x6,
+ .token = 0, /* No token needed for blocking requests */
+ .args = {smc_fid, x1, x2, x3, x4, x5}
+ };
+
+ spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+ int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+ SPRT_QUEUE_NUM_BLOCKING);
+ spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+ if (rc != 0) {
+ /*
+ * This shouldn't happen, blocking requests can only be made if
+ * the request queue is empty.
+ */
+ assert(rc == -ENOMEM);
+ ERROR("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Queue is full.\n");
+ panic();
+ }
+
+ /* Jump to the Secure Partition. */
+ rx0 = spm_sp_synchronous_entry(sp_ctx, 0);
+
+ /* Verify returned value */
+ if (rx0 != SPRT_PUT_RESPONSE_AARCH64) {
+ ERROR("SPM: %s: Unexpected x0 value 0x%x\n", __func__, rx0);
+ panic();
+ }
+
+ rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+ rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+ rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ctx->state == SP_STATE_BUSY);
+ sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+ /* Decrease count of requests. */
+ spin_lock(&spci_handles_lock);
+ handle_info->num_active_requests -= 1;
+ spin_unlock(&spci_handles_lock);
+ spm_sp_request_decrease(sp_ctx);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_start(void *handle,
+ uint32_t smc_fid, u_register_t x1, u_register_t x2,
+ u_register_t x3, u_register_t x4, u_register_t x5,
+ u_register_t x6, u_register_t x7)
+{
+ spci_handle_t *handle_info;
+ sp_context_t *sp_ctx;
+ cpu_context_t *cpu_ctx;
+ uint16_t request_handle, client_id;
+ uint32_t token;
+
+ /* Get handle array lock */
+ spin_lock(&spci_handles_lock);
+
+ /* Get pointer to struct of this open handle and client ID. */
+ request_handle = (x7 >> 16U) & 0x0000FFFFU;
+ client_id = x7 & 0x0000FFFFU;
+
+ handle_info = spci_handle_info_get(request_handle, client_id);
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI_SERVICE_TUN_REQUEST_START: Not found.\n"
+ " Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+ client_id);
+
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ /* Get pointer to the Secure Partition that handles the service */
+ sp_ctx = handle_info->sp_ctx;
+ assert(sp_ctx != NULL);
+ cpu_ctx = &(sp_ctx->cpu_ctx);
+
+ /* Prevent this handle from being closed */
+ handle_info->num_active_requests += 1;
+
+ spm_sp_request_increase(sp_ctx);
+
+ /* Create new token for this request */
+ token = spci_create_token_value();
+
+ /* Release handle lock */
+ spin_unlock(&spci_handles_lock);
+
+ /* Pass arguments to the Secure Partition */
+ struct sprt_queue_entry_message message = {
+ .type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+ .client_id = client_id,
+ .service_handle = request_handle,
+ .session_id = x6,
+ .token = token,
+ .args = {smc_fid, x1, x2, x3, x4, x5}
+ };
+
+ spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+ int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+ SPRT_QUEUE_NUM_NON_BLOCKING);
+ spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+ if (rc != 0) {
+ WARN("SPCI_SERVICE_TUN_REQUEST_START: SPRT queue full.\n"
+ " Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+ client_id);
+ SMC_RET1(handle, SPCI_NO_MEMORY);
+ }
+
+ /* Try to enter the partition. If it's not possible, simply return. */
+ if (sp_state_try_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY) != 0) {
+ SMC_RET2(handle, SPCI_SUCCESS, token);
+ }
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * This request is non-blocking and needs to be interruptible by
+ * non-secure interrupts. Enable their routing to EL3 during the
+ * processing of the Secure Partition's service on this core.
+ */
+
+ /* Jump to the Secure Partition. */
+ uint64_t ret = spm_sp_synchronous_entry(sp_ctx, 1);
+
+ /* Verify returned values */
+ if (ret == SPRT_PUT_RESPONSE_AARCH64) {
+ uint32_t token;
+ uint64_t rx1, rx2, rx3, x6;
+
+ token = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1);
+ rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+ rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+ rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+ x6 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X6);
+
+ uint16_t client_id = x6 & 0xFFFFU;
+ uint16_t service_handle = x6 >> 16;
+
+ int rc = spm_response_add(client_id, service_handle, token,
+ rx1, rx2, rx3);
+ if (rc != 0) {
+ /*
+ * This is error fatal because we can't return to the SP
+ * from this SMC. The SP has crashed.
+ */
+ panic();
+ }
+ } else if ((ret != SPRT_YIELD_AARCH64) &&
+ (ret != SPM_SECURE_PARTITION_PREEMPTED)) {
+ ERROR("SPM: %s: Unexpected x0 value 0x%llx\n", __func__, ret);
+ panic();
+ }
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ctx->state == SP_STATE_BUSY);
+ sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET2(handle, SPCI_SUCCESS, token);
+}
+
+/*******************************************************************************
+ * This function returns the response of a Secure Service given a handle, a
+ * client ID and a token. If not available, it will schedule a Secure Partition
+ * and give it CPU time.
+ ******************************************************************************/
+static uint64_t spci_service_request_resume(void *handle, u_register_t x1,
+ u_register_t x7)
+{
+ int rc;
+ u_register_t rx1 = 0, rx2 = 0, rx3 = 0;
+ spci_handle_t *handle_info;
+ sp_context_t *sp_ctx;
+ cpu_context_t *cpu_ctx;
+ uint32_t token = (uint32_t) x1;
+ uint16_t client_id = x7 & 0x0000FFFF;
+ uint16_t service_handle = (x7 >> 16) & 0x0000FFFF;
+
+ /* Get pointer to struct of this open handle and client ID. */
+ spin_lock(&spci_handles_lock);
+
+ handle_info = spci_handle_info_get(service_handle, client_id);
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+ WARN("SPCI_SERVICE_REQUEST_RESUME: Not found.\n"
+ "Handle 0x%04x. Client ID 0x%04x, Token 0x%08x.\n",
+ client_id, service_handle, token);
+
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ /* Get pointer to the Secure Partition that handles the service */
+ sp_ctx = handle_info->sp_ctx;
+ assert(sp_ctx != NULL);
+ cpu_ctx = &(sp_ctx->cpu_ctx);
+
+ spin_unlock(&spci_handles_lock);
+
+ /* Look for a valid response in the global queue */
+ rc = spm_response_get(client_id, service_handle, token,
+ &rx1, &rx2, &rx3);
+ if (rc == 0) {
+ /* Decrease request count */
+ spin_lock(&spci_handles_lock);
+ handle_info->num_active_requests -= 1;
+ spin_unlock(&spci_handles_lock);
+ spm_sp_request_decrease(sp_ctx);
+
+ SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+ }
+
+ /* Try to enter the partition. If it's not possible, simply return. */
+ if (sp_state_try_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY) != 0) {
+ SMC_RET1(handle, SPCI_QUEUED);
+ }
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * This request is non-blocking and needs to be interruptible by
+ * non-secure interrupts. Enable their routing to EL3 during the
+ * processing of the Secure Partition's service on this core.
+ */
+
+ /* Jump to the Secure Partition. */
+ uint64_t ret = spm_sp_synchronous_entry(sp_ctx, 1);
+
+ /* Verify returned values */
+ if (ret == SPRT_PUT_RESPONSE_AARCH64) {
+ uint32_t token;
+ uint64_t rx1, rx2, rx3, x6;
+
+ token = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1);
+ rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+ rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+ rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+ x6 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X6);
+
+ uint16_t client_id = x6 & 0xFFFFU;
+ uint16_t service_handle = x6 >> 16;
+
+ int rc = spm_response_add(client_id, service_handle, token,
+ rx1, rx2, rx3);
+ if (rc != 0) {
+ /*
+ * This is error fatal because we can't return to the SP
+ * from this SMC. The SP has crashed.
+ */
+ panic();
+ }
+ } else if ((ret != SPRT_YIELD_AARCH64) &&
+ (ret != SPM_SECURE_PARTITION_PREEMPTED)) {
+ ERROR("SPM: %s: Unexpected x0 value 0x%llx\n", __func__, ret);
+ panic();
+ }
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ctx->state == SP_STATE_BUSY);
+ sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /* Look for a valid response in the global queue */
+ rc = spm_response_get(client_id, service_handle, token,
+ &rx1, &rx2, &rx3);
+ if (rc != 0) {
+ SMC_RET1(handle, SPCI_QUEUED);
+ }
+
+ /* Decrease request count */
+ spin_lock(&spci_handles_lock);
+ handle_info->num_active_requests -= 1;
+ spin_unlock(&spci_handles_lock);
+ spm_sp_request_decrease(sp_ctx);
+
+ /* Return response */
+ SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function returns the response of a Secure Service given a handle, a
+ * client ID and a token.
+ ******************************************************************************/
+static uint64_t spci_service_get_response(void *handle, u_register_t x1,
+ u_register_t x7)
+
+{
+ int rc;
+ u_register_t rx1 = 0, rx2 = 0, rx3 = 0;
+ spci_handle_t *handle_info;
+ uint32_t token = (uint32_t) x1;
+ uint16_t client_id = x7 & 0x0000FFFF;
+ uint16_t service_handle = (x7 >> 16) & 0x0000FFFF;
+
+ /* Get pointer to struct of this open handle and client ID. */
+
+ spin_lock(&spci_handles_lock);
+
+ handle_info = spci_handle_info_get(service_handle, client_id);
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+ WARN("SPCI_SERVICE_GET_RESPONSE: Not found.\n"
+ "Handle 0x%04x. Client ID 0x%04x, Token 0x%08x.\n",
+ client_id, service_handle, token);
+
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ spin_unlock(&spci_handles_lock);
+
+ /* Look for a valid response in the global queue */
+ rc = spm_response_get(client_id, service_handle, token,
+ &rx1, &rx2, &rx3);
+
+ if (rc != 0) {
+ SMC_RET1(handle, SPCI_QUEUED);
+ }
+
+ /* Decrease request count */
+ spin_lock(&spci_handles_lock);
+ handle_info->num_active_requests -= 1;
+ sp_context_t *sp_ctx;
+ sp_ctx = handle_info->sp_ctx;
+ spin_unlock(&spci_handles_lock);
+ spm_sp_request_decrease(sp_ctx);
+
+ /* Return response */
+ SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for SPCI.
+ ******************************************************************************/
+uint64_t spci_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie, void *handle,
+ uint64_t flags)
+{
+ uint32_t spci_fid;
+
+ /* SPCI only supported from the Non-secure world for now */
+ if (is_caller_non_secure(flags) == SMC_FROM_SECURE) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ if ((smc_fid & SPCI_FID_TUN_FLAG) == 0) {
+
+ /* Miscellaneous calls */
+
+ spci_fid = (smc_fid >> SPCI_FID_MISC_SHIFT) & SPCI_FID_MISC_MASK;
+
+ switch (spci_fid) {
+
+ case SPCI_FID_VERSION:
+ SMC_RET1(handle, SPCI_VERSION_COMPILED);
+
+ case SPCI_FID_SERVICE_HANDLE_OPEN:
+ {
+ if ((smc_fid & SPCI_SERVICE_HANDLE_OPEN_NOTIFY_BIT) != 0) {
+ /* Not supported for now */
+ WARN("SPCI_SERVICE_HANDLE_OPEN_NOTIFY not supported.\n");
+ SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+ }
+
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_handle_open_poll(handle, x1, x2, x3,
+ x4, x5, x6, x7);
+ }
+ case SPCI_FID_SERVICE_HANDLE_CLOSE:
+ return spci_service_handle_close(handle, x1);
+
+ case SPCI_FID_SERVICE_REQUEST_BLOCKING:
+ {
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_request_blocking(handle,
+ smc_fid, x1, x2, x3, x4, x5, x6, x7);
+ }
+
+ case SPCI_FID_SERVICE_REQUEST_START:
+ {
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_request_start(handle,
+ smc_fid, x1, x2, x3, x4, x5, x6, x7);
+ }
+
+ case SPCI_FID_SERVICE_GET_RESPONSE:
+ {
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_get_response(handle, x1, x7);
+ }
+
+ default:
+ break;
+ }
+
+ } else {
+
+ /* Tunneled calls */
+
+ spci_fid = (smc_fid >> SPCI_FID_TUN_SHIFT) & SPCI_FID_TUN_MASK;
+
+ switch (spci_fid) {
+
+ case SPCI_FID_SERVICE_REQUEST_RESUME:
+ {
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_request_resume(handle, x1, x7);
+ }
+
+ default:
+ break;
+ }
+ }
+
+ WARN("SPCI: Unsupported call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SPCI_NOT_SUPPORTED);
+}
diff --git a/services/std_svc/spm/spm.mk b/services/std_svc/spm/spm.mk
index 0e770860..4ba9feb3 100644
--- a/services/std_svc/spm/spm.mk
+++ b/services/std_svc/spm/spm.mk
@@ -11,13 +11,23 @@ ifneq (${ARCH},aarch64)
$(error "Error: SPM is only supported on aarch64.")
endif
+include lib/sprt/sprt_host.mk
+
SPM_SOURCES := $(addprefix services/std_svc/spm/, \
${ARCH}/spm_helpers.S \
${ARCH}/spm_shim_exceptions.S \
+ spci.c \
+ spm_buffers.c \
spm_main.c \
- sp_setup.c \
- sp_xlat.c)
+ spm_setup.c \
+ spm_xlat.c \
+ sprt.c) \
+ ${SPRT_LIB_SOURCES}
+
+INCLUDES += ${SPRT_LIB_INCLUDES}
+# Force SMC Calling Convention 2 when using SPM
+SMCCC_MAJOR_VERSION := 2
# Let the top-level Makefile know that we intend to include a BL32 image
NEED_BL32 := yes
diff --git a/services/std_svc/spm/spm_buffers.c b/services/std_svc/spm/spm_buffers.c
new file mode 100644
index 00000000..747337af
--- /dev/null
+++ b/services/std_svc/spm/spm_buffers.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <spinlock.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Secure Service response global array. All the responses to the requests done
+ * to the Secure Partition are stored here. They are removed from the array as
+ * soon as their value is read.
+ ******************************************************************************/
+struct sprt_response {
+ int is_valid;
+ uint32_t token;
+ uint16_t client_id, handle;
+ u_register_t x1, x2, x3;
+};
+
+static struct sprt_response responses[PLAT_SPM_RESPONSES_MAX];
+
+static spinlock_t responses_lock;
+
+/* Add response to the global response buffer. Returns 0 on success else -1. */
+int spm_response_add(uint16_t client_id, uint16_t handle, uint32_t token,
+ u_register_t x1, u_register_t x2, u_register_t x3)
+{
+ spin_lock(&responses_lock);
+
+ /* Make sure that there isn't any other response with the same token. */
+ for (unsigned int i = 0U; i < ARRAY_SIZE(responses); i++) {
+ struct sprt_response *resp = &(responses[i]);
+
+ if ((resp->is_valid == 1) && (resp->token == token)) {
+ return -1;
+ }
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(responses); i++) {
+ struct sprt_response *resp = &(responses[i]);
+
+ if (resp->is_valid == 0) {
+ resp->token = token;
+ resp->client_id = client_id;
+ resp->handle = handle;
+ resp->x1 = x1;
+ resp->x2 = x2;
+ resp->x3 = x3;
+
+ dmbish();
+
+ resp->is_valid = 1;
+
+ spin_unlock(&responses_lock);
+
+ return 0;
+ }
+ }
+
+ spin_unlock(&responses_lock);
+
+ return -1;
+}
+
+/*
+ * Returns a response from the requests array and removes it from it. Returns 0
+ * on success, -1 if it wasn't found.
+ */
+int spm_response_get(uint16_t client_id, uint16_t handle, uint32_t token,
+ u_register_t *x1, u_register_t *x2, u_register_t *x3)
+{
+ spin_lock(&responses_lock);
+
+ for (unsigned int i = 0U; i < ARRAY_SIZE(responses); i++) {
+ struct sprt_response *resp = &(responses[i]);
+
+ /* Ignore invalid entries */
+ if (resp->is_valid == 0) {
+ continue;
+ }
+
+ /* Make sure that all the information matches the stored one */
+ if ((resp->token != token) || (resp->client_id != client_id) ||
+ (resp->handle != handle)) {
+ continue;
+ }
+
+ *x1 = resp->x1;
+ *x2 = resp->x2;
+ *x3 = resp->x3;
+
+ dmbish();
+
+ resp->is_valid = 0;
+
+ spin_unlock(&responses_lock);
+
+ return 0;
+ }
+
+ spin_unlock(&responses_lock);
+
+ return -1;
+}
diff --git a/services/std_svc/spm/spm_main.c b/services/std_svc/spm/spm_main.c
index 880e86e4..460d1fb3 100644
--- a/services/std_svc/spm/spm_main.c
+++ b/services/std_svc/spm/spm_main.c
@@ -11,14 +11,14 @@
#include <debug.h>
#include <ehf.h>
#include <errno.h>
-#include <mm_svc.h>
+#include <interrupt_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
-#include <secure_partition.h>
#include <smccc.h>
#include <smccc_helpers.h>
#include <spinlock.h>
-#include <spm_svc.h>
+#include <string.h>
+#include <sprt_svc.h>
#include <utils.h>
#include <xlat_tables_v2.h>
@@ -27,7 +27,89 @@
/*******************************************************************************
* Secure Partition context information.
******************************************************************************/
-static sp_context_t sp_ctx;
+sp_context_t sp_ctx_array[PLAT_SPM_MAX_PARTITIONS];
+
+/* Last Secure Partition last used by the CPU */
+sp_context_t *cpu_sp_ctx[PLATFORM_CORE_COUNT];
+
+void spm_cpu_set_sp_ctx(unsigned int linear_id, sp_context_t *sp_ctx)
+{
+ assert(linear_id < PLATFORM_CORE_COUNT);
+
+ cpu_sp_ctx[linear_id] = sp_ctx;
+}
+
+sp_context_t *spm_cpu_get_sp_ctx(unsigned int linear_id)
+{
+ assert(linear_id < PLATFORM_CORE_COUNT);
+
+ return cpu_sp_ctx[linear_id];
+}
+
+/*******************************************************************************
+ * Functions to keep track of how many requests a Secure Partition has received
+ * and hasn't finished.
+ ******************************************************************************/
+void spm_sp_request_increase(sp_context_t *sp_ctx)
+{
+ spin_lock(&(sp_ctx->request_count_lock));
+ sp_ctx->request_count++;
+ spin_unlock(&(sp_ctx->request_count_lock));
+}
+
+void spm_sp_request_decrease(sp_context_t *sp_ctx)
+{
+ spin_lock(&(sp_ctx->request_count_lock));
+ sp_ctx->request_count--;
+ spin_unlock(&(sp_ctx->request_count_lock));
+}
+
+/* Returns 0 if it was originally 0, -1 otherwise. */
+int spm_sp_request_increase_if_zero(sp_context_t *sp_ctx)
+{
+ int ret = -1;
+
+ spin_lock(&(sp_ctx->request_count_lock));
+ if (sp_ctx->request_count == 0U) {
+ sp_ctx->request_count++;
+ ret = 0U;
+ }
+ spin_unlock(&(sp_ctx->request_count_lock));
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the context of the Secure Partition that
+ * handles the service specified by an UUID. It returns NULL if the UUID wasn't
+ * found.
+ ******************************************************************************/
+sp_context_t *spm_sp_get_by_uuid(const uint32_t (*svc_uuid)[4])
+{
+ unsigned int i;
+
+ for (i = 0U; i < PLAT_SPM_MAX_PARTITIONS; i++) {
+
+ sp_context_t *sp_ctx = &sp_ctx_array[i];
+
+ if (sp_ctx->is_present == 0) {
+ continue;
+ }
+
+ struct sp_rd_sect_service *rdsvc;
+
+ for (rdsvc = sp_ctx->rd.service; rdsvc != NULL;
+ rdsvc = rdsvc->next) {
+ uint32_t *rd_uuid = (uint32_t *)(rdsvc->uuid);
+
+ if (memcmp(rd_uuid, svc_uuid, sizeof(rd_uuid)) == 0) {
+ return sp_ctx;
+ }
+ }
+ }
+
+ return NULL;
+}
/*******************************************************************************
* Set state of a Secure Partition context.
@@ -85,13 +167,15 @@ int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
* This function takes an SP context pointer and performs a synchronous entry
* into it.
******************************************************************************/
-static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt)
{
uint64_t rc;
+ unsigned int linear_id = plat_my_core_pos();
assert(sp_ctx != NULL);
/* Assign the context of the SP to this CPU */
+ spm_cpu_set_sp_ctx(linear_id, sp_ctx);
cm_set_context(&(sp_ctx->cpu_ctx), SECURE);
/* Restore the context assigned above */
@@ -102,6 +186,12 @@ static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
tlbivmalle1();
dsbish();
+ if (can_preempt == 1) {
+ enable_intr_rm_local(INTR_TYPE_NS, SECURE);
+ } else {
+ disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+ }
+
/* Enter Secure Partition */
rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx);
@@ -115,9 +205,11 @@ static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
* This function returns to the place where spm_sp_synchronous_entry() was
* called originally.
******************************************************************************/
-__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+__dead2 void spm_sp_synchronous_exit(uint64_t rc)
{
- sp_context_t *ctx = &sp_ctx;
+ /* Get context of the SP in use by this CPU. */
+ unsigned int linear_id = plat_my_core_pos();
+ sp_context_t *ctx = spm_cpu_get_sp_ctx(linear_id);
/*
* The SPM must have initiated the original request through a
@@ -130,224 +222,136 @@ __dead2 static void spm_sp_synchronous_exit(uint64_t rc)
}
/*******************************************************************************
- * Jump to each Secure Partition for the first time.
+ * This function is the handler registered for Non secure interrupts by the SPM.
+ * It validates the interrupt and upon success arranges entry into the normal
+ * world for handling the interrupt.
******************************************************************************/
-static int32_t spm_init(void)
+static uint64_t spm_ns_interrupt_handler(uint32_t id, uint32_t flags,
+ void *handle, void *cookie)
{
- uint64_t rc;
- sp_context_t *ctx;
-
- INFO("Secure Partition init...\n");
-
- ctx = &sp_ctx;
-
- ctx->state = SP_STATE_RESET;
-
- rc = spm_sp_synchronous_entry(ctx);
- assert(rc == 0);
-
- ctx->state = SP_STATE_IDLE;
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == SECURE);
- INFO("Secure Partition initialized.\n");
-
- return rc;
+ spm_sp_synchronous_exit(SPM_SECURE_PARTITION_PREEMPTED);
}
/*******************************************************************************
- * Initialize contexts of all Secure Partitions.
+ * Jump to each Secure Partition for the first time.
******************************************************************************/
-int32_t spm_setup(void)
+static int32_t spm_init(void)
{
+ uint64_t rc = 0;
sp_context_t *ctx;
- /* Disable MMU at EL1 (initialized by BL2) */
- disable_mmu_icache_el1();
-
- /* Initialize context of the SP */
- INFO("Secure Partition context setup start...\n");
-
- ctx = &sp_ctx;
-
- /* Assign translation tables context. */
- ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
-
- spm_sp_setup(ctx);
-
- /* Register init function for deferred init. */
- bl31_register_bl32_init(&spm_init);
+ for (unsigned int i = 0U; i < PLAT_SPM_MAX_PARTITIONS; i++) {
- INFO("Secure Partition setup done.\n");
+ ctx = &sp_ctx_array[i];
- return 0;
-}
-
-/*******************************************************************************
- * Function to perform a call to a Secure Partition.
- ******************************************************************************/
-uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
-{
- uint64_t rc;
- sp_context_t *sp_ptr = &sp_ctx;
+ if (ctx->is_present == 0) {
+ continue;
+ }
- /* Wait until the Secure Partition is idle and set it to busy. */
- sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+ INFO("Secure Partition %u init...\n", i);
- /* Set values for registers on SP entry */
- cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+ ctx->state = SP_STATE_RESET;
- write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
- write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
- write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
- write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+ rc = spm_sp_synchronous_entry(ctx, 0);
+ if (rc != SPRT_YIELD_AARCH64) {
+ ERROR("Unexpected return value 0x%llx\n", rc);
+ panic();
+ }
- /* Jump to the Secure Partition. */
- rc = spm_sp_synchronous_entry(sp_ptr);
+ ctx->state = SP_STATE_IDLE;
- /* Flag Secure Partition as idle. */
- assert(sp_ptr->state == SP_STATE_BUSY);
- sp_state_set(sp_ptr, SP_STATE_IDLE);
+ INFO("Secure Partition %u initialized.\n", i);
+ }
return rc;
}
/*******************************************************************************
- * MM_COMMUNICATE handler
+ * Initialize contexts of all Secure Partitions.
******************************************************************************/
-static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
- uint64_t comm_buffer_address,
- uint64_t comm_size_address, void *handle)
+int32_t spm_setup(void)
{
- uint64_t rc;
-
- /* Cookie. Reserved for future use. It must be zero. */
- if (mm_cookie != 0U) {
- ERROR("MM_COMMUNICATE: cookie is not zero\n");
- SMC_RET1(handle, SPM_INVALID_PARAMETER);
- }
-
- if (comm_buffer_address == 0U) {
- ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
- SMC_RET1(handle, SPM_INVALID_PARAMETER);
- }
+ int rc;
+ sp_context_t *ctx;
+ void *sp_base, *rd_base;
+ size_t sp_size, rd_size;
+ uint64_t flags = 0U;
- if (comm_size_address != 0U) {
- VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
- }
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
/*
- * The current secure partition design mandates
- * - at any point, only a single core can be
- * executing in the secure partiton.
- * - a core cannot be preempted by an interrupt
- * while executing in secure partition.
- * Raise the running priority of the core to the
- * interrupt level configured for secure partition
- * so as to block any interrupt from preempting this
- * core.
+ * Non-blocking services can be interrupted by Non-secure interrupts.
+ * Register an interrupt handler for NS interrupts when generated while
+ * the CPU is in secure state. They are routed to EL3.
*/
- ehf_activate_priority(PLAT_SP_PRI);
-
- /* Save the Normal world context */
- cm_el1_sysregs_context_save(NON_SECURE);
-
- rc = spm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
- plat_my_core_pos());
-
- /* Restore non-secure state */
- cm_el1_sysregs_context_restore(NON_SECURE);
- cm_set_next_eret_context(NON_SECURE);
+ set_interrupt_rm_flag(flags, SECURE);
+
+ uint64_t rc_int = register_interrupt_type_handler(INTR_TYPE_NS,
+ spm_ns_interrupt_handler, flags);
+ if (rc_int) {
+ ERROR("SPM: Failed to register NS interrupt handler with rc = %llx\n",
+ rc_int);
+ panic();
+ }
/*
- * Exited from secure partition. This core can take
- * interrupts now.
+ * Setup all Secure Partitions.
*/
- ehf_deactivate_priority(PLAT_SP_PRI);
-
- SMC_RET1(handle, rc);
-}
+ unsigned int i = 0U;
-/*******************************************************************************
- * Secure Partition Manager SMC handler.
- ******************************************************************************/
-uint64_t spm_smc_handler(uint32_t smc_fid,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *cookie,
- void *handle,
- uint64_t flags)
-{
- unsigned int ns;
-
- /* Determine which security state this SMC originated from */
- ns = is_caller_non_secure(flags);
-
- if (ns == SMC_FROM_SECURE) {
-
- /* Handle SMCs from Secure world. */
-
- assert(handle == cm_get_context(SECURE));
-
- /* Make next ERET jump to S-EL0 instead of S-EL1. */
- cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+ while (1) {
+ rc = plat_spm_sp_get_next_address(&sp_base, &sp_size,
+ &rd_base, &rd_size);
+ if (rc < 0) {
+ /* Reached the end of the package. */
+ break;
+ }
- switch (smc_fid) {
+ if (i >= PLAT_SPM_MAX_PARTITIONS) {
+ ERROR("Too many partitions in the package.\n");
+ panic();
+ }
- case SPM_VERSION_AARCH32:
- SMC_RET1(handle, SPM_VERSION_COMPILED);
+ ctx = &sp_ctx_array[i];
- case SP_EVENT_COMPLETE_AARCH64:
- spm_sp_synchronous_exit(x1);
+ assert(ctx->is_present == 0);
- case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
- INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
+ /* Initialize context of the SP */
+ INFO("Secure Partition %u context setup start...\n", i);
- if (sp_ctx.state != SP_STATE_RESET) {
- WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
- SMC_RET1(handle, SPM_NOT_SUPPORTED);
- }
- SMC_RET1(handle,
- spm_memory_attributes_get_smc_handler(
- &sp_ctx, x1));
+ /* Assign translation tables context. */
+ ctx->xlat_ctx_handle = spm_sp_xlat_context_alloc();
- case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
- INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
+ /* Save location of the image in physical memory */
+ ctx->image_base = (uintptr_t)sp_base;
+ ctx->image_size = sp_size;
- if (sp_ctx.state != SP_STATE_RESET) {
- WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
- SMC_RET1(handle, SPM_NOT_SUPPORTED);
- }
- SMC_RET1(handle,
- spm_memory_attributes_set_smc_handler(
- &sp_ctx, x1, x2, x3));
- default:
- break;
+ rc = plat_spm_sp_rd_load(&ctx->rd, rd_base, rd_size);
+ if (rc < 0) {
+ ERROR("Error while loading RD blob.\n");
+ panic();
}
- } else {
-
- /* Handle SMCs from Non-secure world. */
-
- assert(handle == cm_get_context(NON_SECURE));
- switch (smc_fid) {
+ spm_sp_setup(ctx);
- case MM_VERSION_AARCH32:
- SMC_RET1(handle, MM_VERSION_COMPILED);
+ ctx->is_present = 1;
- case MM_COMMUNICATE_AARCH32:
- case MM_COMMUNICATE_AARCH64:
- return mm_communicate(smc_fid, x1, x2, x3, handle);
+ INFO("Secure Partition %u setup done.\n", i);
- case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
- case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
- /* SMC interfaces reserved for secure callers. */
- SMC_RET1(handle, SPM_NOT_SUPPORTED);
+ i++;
+ }
- default:
- break;
- }
+ if (i == 0U) {
+ ERROR("No present partitions in the package.\n");
+ panic();
}
- SMC_RET1(handle, SMC_UNK);
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spm_init);
+
+ return 0;
}
diff --git a/services/std_svc/spm/spm_private.h b/services/std_svc/spm/spm_private.h
index ec3f48ea..c1aad933 100644
--- a/services/std_svc/spm/spm_private.h
+++ b/services/std_svc/spm/spm_private.h
@@ -29,9 +29,13 @@
#define SP_C_RT_CTX_SIZE 0x60
#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+/* Value returned by spm_sp_synchronous_entry() when a partition is preempted */
+#define SPM_SECURE_PARTITION_PREEMPTED U(0x1234)
+
#ifndef __ASSEMBLY__
#include <spinlock.h>
+#include <sp_res_desc.h>
#include <stdint.h>
#include <xlat_tables_v2.h>
@@ -42,28 +46,68 @@ typedef enum sp_state {
} sp_state_t;
typedef struct sp_context {
+ /* 1 if the partition is present, 0 otherwise */
+ int is_present;
+
+ /* Location of the image in physical memory */
+ unsigned long long image_base;
+ size_t image_size;
+
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
+ struct sp_res_desc rd;
+
+ /* Translation tables context */
xlat_ctx_t *xlat_ctx_handle;
+ spinlock_t xlat_ctx_lock;
sp_state_t state;
spinlock_t state_lock;
+
+ unsigned int request_count;
+ spinlock_t request_count_lock;
+
+ /* Base and size of the shared SPM<->SP buffer */
+ uintptr_t spm_sp_buffer_base;
+ size_t spm_sp_buffer_size;
+ spinlock_t spm_sp_buffer_lock;
} sp_context_t;
+/* Functions used to enter/exit a Secure Partition synchronously */
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt);
+__dead2 void spm_sp_synchronous_exit(uint64_t rc);
+
/* Assembly helpers */
uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+/* Secure Partition setup */
void spm_sp_setup(sp_context_t *sp_ctx);
-xlat_ctx_t *spm_get_sp_xlat_context(void);
+/* Secure Partition state management helpers */
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state);
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+
+/* Functions to keep track of the number of active requests per SP */
+void spm_sp_request_increase(sp_context_t *sp_ctx);
+void spm_sp_request_decrease(sp_context_t *sp_ctx);
+int spm_sp_request_increase_if_zero(sp_context_t *sp_ctx);
+
+/* Functions related to the translation tables management */
+xlat_ctx_t *spm_sp_xlat_context_alloc(void);
+void sp_map_memory_regions(sp_context_t *sp_ctx);
+
+/* Functions to handle Secure Partition contexts */
+void spm_cpu_set_sp_ctx(unsigned int linear_id, sp_context_t *sp_ctx);
+sp_context_t *spm_cpu_get_sp_ctx(unsigned int linear_id);
+sp_context_t *spm_sp_get_by_uuid(const uint32_t (*svc_uuid)[4]);
-int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
- uintptr_t base_va);
-int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
- u_register_t page_address,
- u_register_t pages_count,
- u_register_t smc_attributes);
+/* Functions to manipulate response and requests buffers */
+int spm_response_add(uint16_t client_id, uint16_t handle, uint32_t token,
+ u_register_t x1, u_register_t x2, u_register_t x3);
+int spm_response_get(uint16_t client_id, uint16_t handle, uint32_t token,
+ u_register_t *x1, u_register_t *x2, u_register_t *x3);
#endif /* __ASSEMBLY__ */
diff --git a/services/std_svc/spm/spm_setup.c b/services/std_svc/spm/spm_setup.c
new file mode 100644
index 00000000..aca779f5
--- /dev/null
+++ b/services/std_svc/spm/spm_setup.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common_def.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <platform.h>
+#include <sp_res_desc.h>
+#include <sprt_host.h>
+#include <string.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+#include "spm_shim_private.h"
+
+/* Setup context of the Secure Partition */
+void spm_sp_setup(sp_context_t *sp_ctx)
+{
+ cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
+
+ /*
+ * Initialize CPU context
+ * ----------------------
+ */
+
+ entry_point_info_t ep_info = {0};
+
+ SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+
+ /* Setup entrypoint and SPSR */
+ ep_info.pc = sp_ctx->rd.attribute.entrypoint;
+ ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
+
+ /*
+ * X0: Unused (MBZ).
+ * X1: Unused (MBZ).
+ * X2: cookie value (Implementation Defined)
+ * X3: cookie value (Implementation Defined)
+ * X4 to X7 = 0
+ */
+ ep_info.args.arg0 = 0;
+ ep_info.args.arg1 = 0;
+ ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
+ ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
+
+ cm_setup_context(ctx, &ep_info);
+
+ /*
+ * Setup translation tables
+ * ------------------------
+ */
+
+ sp_map_memory_regions(sp_ctx);
+
+ /*
+ * MMU-related registers
+ * ---------------------
+ */
+ xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
+
+ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+ xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+ EL1_EL0_REGIME);
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
+ mmu_cfg_params[MMU_CFG_MAIR]);
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
+ mmu_cfg_params[MMU_CFG_TCR]);
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ mmu_cfg_params[MMU_CFG_TTBR0]);
+
+ /* Setup SCTLR_EL1 */
+ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+ sctlr_el1 |=
+ /*SCTLR_EL1_RES1 |*/
+ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+ SCTLR_UCI_BIT |
+ /* RW regions at xlat regime EL1&0 are forced to be XN. */
+ SCTLR_WXN_BIT |
+ /* Don't trap to EL1 execution of WFI or WFE at EL0. */
+ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
+ /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+ SCTLR_UCT_BIT |
+ /* Don't trap to EL1 execution of DZ ZVA at EL0. */
+ SCTLR_DZE_BIT |
+ /* Enable SP Alignment check for EL0 */
+ SCTLR_SA0_BIT |
+ /* Allow cacheable data and instr. accesses to normal memory. */
+ SCTLR_C_BIT | SCTLR_I_BIT |
+ /* Alignment fault checking enabled when at EL1 and EL0. */
+ SCTLR_A_BIT |
+ /* Enable MMU. */
+ SCTLR_M_BIT
+ ;
+
+ sctlr_el1 &= ~(
+ /* Explicit data accesses at EL0 are little-endian. */
+ SCTLR_E0E_BIT |
+ /* Accesses to DAIF from EL0 are trapped to EL1. */
+ SCTLR_UMA_BIT
+ );
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+
+ /*
+ * Setup other system registers
+ * ----------------------------
+ */
+
+ /* Shim Exception Vector Base Address */
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ SPM_SHIM_EXCEPTIONS_PTR);
+
+ /*
+ * FPEN: Allow the Secure Partition to access FP/SIMD registers.
+ * Note that SPM will not do any saving/restoring of these registers on
+ * behalf of the SP. This falls under the SP's responsibility.
+ * TTA: Enable access to trace registers.
+ * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ */
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+
+ /*
+ * Prepare shared buffers
+ * ----------------------
+ */
+
+ /* Initialize SPRT queues */
+ sprt_initialize_queues((void *)sp_ctx->spm_sp_buffer_base,
+ sp_ctx->spm_sp_buffer_size);
+}
diff --git a/services/std_svc/spm/spm_xlat.c b/services/std_svc/spm/spm_xlat.c
new file mode 100644
index 00000000..bbe392dd
--- /dev/null
+++ b/services/std_svc/spm/spm_xlat.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+#include <object_pool.h>
+#include <platform_def.h>
+#include <platform.h>
+#include <sp_res_desc.h>
+#include <string.h>
+#include <utils.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+#include "spm_shim_private.h"
+
+/*******************************************************************************
+ * Instantiation of translation table context
+ ******************************************************************************/
+
+/* Place translation tables by default along with the ones used by BL31. */
+#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
+#endif
+
+/*
+ * Allocate elements of the translation contexts for the Secure Partitions.
+ */
+
+/* Allocate an array of mmap_region per partition. */
+static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
+ [PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
+ sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
+ PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate individual translation tables. */
+static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
+ [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
+ __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
+static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t),
+ (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate base translation tables. */
+static uint64_t sp_xlat_base_tables
+ [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
+ [PLAT_SPM_MAX_PARTITIONS]
+ __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
+ * sizeof(uint64_t))
+ __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
+static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
+ GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
+ PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate arrays. */
+static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
+ [PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
+ sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate individual contexts. */
+static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
+ PLAT_SPM_MAX_PARTITIONS);
+
+/* Get handle of Secure Partition translation context */
+xlat_ctx_t *spm_sp_xlat_context_alloc(void)
+{
+ xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
+
+ struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
+
+ uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
+ uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
+ PLAT_SP_IMAGE_MAX_XLAT_TABLES);
+
+ int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
+
+ xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
+ PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
+ PLAT_SP_IMAGE_MMAP_REGIONS, tables,
+ PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
+ EL1_EL0_REGIME, mapped_regions);
+
+ return ctx;
+};
+
+/*******************************************************************************
+ * Functions to allocate memory for regions.
+ ******************************************************************************/
+
+/*
+ * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
+ * reserved for SPM to use as heap to allocate memory regions of Secure
+ * Partitions. This is only done at boot.
+ */
+static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
+ PLAT_SPM_HEAP_SIZE);
+
+static uintptr_t spm_alloc_heap(size_t size)
+{
+ return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
+}
+
+/*******************************************************************************
+ * Functions to map memory regions described in the resource description.
+ ******************************************************************************/
+static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
+{
+ unsigned int index = attr & RD_MEM_MASK;
+
+ const unsigned int mmap_attr_arr[8] = {
+ MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
+ MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
+ MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
+ MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
+ MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
+ MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
+ MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
+ MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
+ };
+
+ if (index >= ARRAY_SIZE(mmap_attr_arr)) {
+ ERROR("Unsupported RD memory attributes 0x%x\n", attr);
+ panic();
+ }
+
+ return mmap_attr_arr[index];
+}
+
+/*
+ * The data provided in the resource description structure is not directly
+ * compatible with a mmap_region structure. This function handles the conversion
+ * and maps it.
+ */
+static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
+{
+ int rc;
+ mmap_region_t mmap;
+
+ /* Location of the SP image */
+ uintptr_t sp_size = sp_ctx->image_size;
+ uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
+ unsigned long long sp_base_pa = sp_ctx->image_base;
+
+ /* Location of the memory region to map */
+ size_t rd_size = rdmem->size;
+ uintptr_t rd_base_va = rdmem->base;
+ unsigned long long rd_base_pa;
+
+ unsigned int memtype = rdmem->attr & RD_MEM_MASK;
+
+ VERBOSE("Adding memory region '%s'\n", rdmem->name);
+
+ mmap.granularity = REGION_DEFAULT_GRANULARITY;
+
+ /* Check if the RD region is inside of the SP image or not */
+ int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
+ (sp_base_va + sp_size <= rd_base_va);
+
+ /* Set to 1 if it is needed to zero this region */
+ int zero_region = 0;
+
+ switch (memtype) {
+ case RD_MEM_DEVICE:
+ /* Device regions are mapped 1:1 */
+ rd_base_pa = rd_base_va;
+ break;
+
+ case RD_MEM_NORMAL_CODE:
+ case RD_MEM_NORMAL_RODATA:
+ {
+ if (is_outside == 1) {
+ ERROR("Code and rodata sections must be fully contained in the image.");
+ panic();
+ }
+
+ /* Get offset into the image */
+ rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
+ break;
+ }
+ case RD_MEM_NORMAL_DATA:
+ {
+ if (is_outside == 1) {
+ ERROR("Data sections must be fully contained in the image.");
+ panic();
+ }
+
+ rd_base_pa = spm_alloc_heap(rd_size);
+
+ /* Get offset into the image */
+ void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
+
+ VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
+
+ /* Map destination */
+ rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
+ rd_size, MT_MEMORY | MT_RW | MT_SECURE);
+ if (rc != 0) {
+ ERROR("Unable to map data region at EL3: %d\n", rc);
+ panic();
+ }
+
+ /* Copy original data to destination */
+ memcpy((void *)rd_base_pa, img_pa, rd_size);
+
+ /* Unmap destination region */
+ rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
+ if (rc != 0) {
+ ERROR("Unable to remove data region at EL3: %d\n", rc);
+ panic();
+ }
+
+ break;
+ }
+ case RD_MEM_NORMAL_MISCELLANEOUS:
+ /* Allow SPM to change the attributes of the region. */
+ mmap.granularity = PAGE_SIZE;
+ rd_base_pa = spm_alloc_heap(rd_size);
+ zero_region = 1;
+ break;
+
+ case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
+ if ((sp_ctx->spm_sp_buffer_base != 0) ||
+ (sp_ctx->spm_sp_buffer_size != 0)) {
+ ERROR("A partition must have only one SPM<->SP buffer.\n");
+ panic();
+ }
+ rd_base_pa = spm_alloc_heap(rd_size);
+ zero_region = 1;
+ /* Save location of this buffer, it is needed by SPM */
+ sp_ctx->spm_sp_buffer_base = rd_base_pa;
+ sp_ctx->spm_sp_buffer_size = rd_size;
+ break;
+
+ case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
+ /* Fallthrough */
+ case RD_MEM_NORMAL_BSS:
+ rd_base_pa = spm_alloc_heap(rd_size);
+ zero_region = 1;
+ break;
+
+ default:
+ panic();
+ }
+
+ mmap.base_pa = rd_base_pa;
+ mmap.base_va = rd_base_va;
+ mmap.size = rd_size;
+
+ /* Only S-EL0 mappings supported for now */
+ mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
+
+ VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
+ mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
+
+ /* Map region in the context of the Secure Partition */
+ mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
+
+ if (zero_region == 1) {
+ VERBOSE(" Zeroing region...\n");
+
+ rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
+ mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
+ if (rc != 0) {
+ ERROR("Unable to map memory at EL3 to zero: %d\n",
+ rc);
+ panic();
+ }
+
+ zeromem((void *)mmap.base_pa, mmap.size);
+
+ /*
+ * Unmap destination region unless it is the SPM<->SP buffer,
+ * which must be used by SPM.
+ */
+ if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
+ rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
+ if (rc != 0) {
+ ERROR("Unable to remove region at EL3: %d\n", rc);
+ panic();
+ }
+ }
+ }
+}
+
+void sp_map_memory_regions(sp_context_t *sp_ctx)
+{
+ /* This region contains the exception vectors used at S-EL1. */
+ const mmap_region_t sel1_exception_vectors =
+ MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
+ SPM_SHIM_EXCEPTIONS_SIZE,
+ MT_CODE | MT_SECURE | MT_PRIVILEGED);
+
+ mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
+ &sel1_exception_vectors);
+
+ struct sp_rd_sect_mem_region *rdmem;
+
+ for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
+ map_rdmem(sp_ctx, rdmem);
+ }
+
+ init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
+}
diff --git a/services/std_svc/spm/sprt.c b/services/std_svc/spm/sprt.c
new file mode 100644
index 00000000..034dced7
--- /dev/null
+++ b/services/std_svc/spm/sprt.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <limits.h>
+#include <platform.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <sprt_svc.h>
+#include <utils.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Functions to manipulate memory regions
+ ******************************************************************************/
+
+/*
+ * Attributes are encoded using a different format in the SMC interface than in
+ * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
+ * converts an attributes value from the SMC format to the mmap_attr_t format by
+ * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
+ * The other fields are left as 0 because they are ignored by the function
+ * xlat_change_mem_attributes_ctx().
+ */
+static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
+{
+ unsigned int perm = attributes & SPRT_MEMORY_PERM_ATTR_MASK;
+
+ if (perm == SPRT_MEMORY_PERM_ATTR_RW) {
+ return MT_RW | MT_EXECUTE_NEVER | MT_USER;
+ } else if (perm == SPRT_MEMORY_PERM_ATTR_RO) {
+ return MT_RO | MT_EXECUTE_NEVER | MT_USER;
+ } else if (perm == SPRT_MEMORY_PERM_ATTR_RO_EXEC) {
+ return MT_RO | MT_USER;
+ } else {
+ return UINT_MAX;
+ }
+}
+
+/*
+ * This function converts attributes from the Trusted Firmware format into the
+ * SMC interface format.
+ */
+static unsigned int mmap_attr_to_smc_attr(unsigned int attr)
+{
+ unsigned int perm;
+
+ /* No access from EL0. */
+ if ((attr & MT_USER) == 0U)
+ return UINT_MAX;
+
+ if ((attr & MT_RW) != 0) {
+ assert(MT_TYPE(attr) != MT_DEVICE);
+ perm = SPRT_MEMORY_PERM_ATTR_RW;
+ } else {
+ if ((attr & MT_EXECUTE_NEVER) != 0U) {
+ perm = SPRT_MEMORY_PERM_ATTR_RO;
+ } else {
+ perm = SPRT_MEMORY_PERM_ATTR_RO_EXEC;
+ }
+ }
+
+ return perm << SPRT_MEMORY_PERM_ATTR_SHIFT;
+}
+
+static int32_t sprt_memory_perm_attr_get(sp_context_t *sp_ctx, uintptr_t base_va)
+{
+ uint32_t attributes;
+
+ spin_lock(&(sp_ctx->xlat_ctx_lock));
+
+ int ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, &attributes);
+
+ spin_unlock(&(sp_ctx->xlat_ctx_lock));
+
+ /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
+ assert((ret == 0) || (ret == -EINVAL));
+
+ if (ret != 0)
+ return SPRT_INVALID_PARAMETER;
+
+ unsigned int perm = mmap_attr_to_smc_attr(attributes);
+
+ if (perm == UINT_MAX)
+ return SPRT_INVALID_PARAMETER;
+
+ return SPRT_SUCCESS | perm;
+}
+
+static int32_t sprt_memory_perm_attr_set(sp_context_t *sp_ctx,
+ u_register_t page_address, u_register_t pages_count,
+ u_register_t smc_attributes)
+{
+ int ret;
+ uintptr_t base_va = (uintptr_t) page_address;
+ size_t size = pages_count * PAGE_SIZE;
+
+ VERBOSE(" Start address : 0x%lx\n", base_va);
+ VERBOSE(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
+ VERBOSE(" Attributes : 0x%lx\n", smc_attributes);
+
+ uint32_t mmap_attr = smc_attr_to_mmap_attr(smc_attributes);
+
+ if (mmap_attr == UINT_MAX) {
+ WARN("%s: Invalid memory attributes: 0x%lx\n", __func__,
+ smc_attributes);
+ return SPRT_INVALID_PARAMETER;
+ }
+
+ /*
+ * Perform some checks before actually trying to change the memory
+ * attributes.
+ */
+
+ spin_lock(&(sp_ctx->xlat_ctx_lock));
+
+ uint32_t attributes;
+
+ ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, &attributes);
+
+ if (ret != 0) {
+ spin_unlock(&(sp_ctx->xlat_ctx_lock));
+ return SPRT_INVALID_PARAMETER;
+ }
+
+ if ((attributes & MT_USER) == 0U) {
+ /* Prohibit changing attributes of S-EL1 regions */
+ spin_unlock(&(sp_ctx->xlat_ctx_lock));
+ return SPRT_INVALID_PARAMETER;
+ }
+
+ ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+ base_va, size, mmap_attr);
+
+ spin_unlock(&(sp_ctx->xlat_ctx_lock));
+
+ /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
+ assert((ret == 0) || (ret == -EINVAL));
+
+ return (ret == 0) ? SPRT_SUCCESS : SPRT_INVALID_PARAMETER;
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for SPRT.
+ ******************************************************************************/
+uint64_t sprt_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie, void *handle,
+ uint64_t flags)
+{
+ /* SPRT only supported from the Secure world */
+ if (is_caller_non_secure(flags) == SMC_FROM_NON_SECURE) {
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ assert(handle == cm_get_context(SECURE));
+
+ /*
+ * Only S-EL0 partitions are supported for now. Make the next ERET into
+ * the partition jump directly to S-EL0 instead of S-EL1.
+ */
+ cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+ switch (smc_fid) {
+ case SPRT_VERSION:
+ SMC_RET1(handle, SPRT_VERSION_COMPILED);
+
+ case SPRT_PUT_RESPONSE_AARCH64:
+ /*
+ * Registers x1-x3 aren't saved by default to the context,
+ * but they are needed after spm_sp_synchronous_exit() because
+ * they hold return values.
+ */
+ SMC_SET_GP(handle, CTX_GPREG_X1, x1);
+ SMC_SET_GP(handle, CTX_GPREG_X2, x2);
+ SMC_SET_GP(handle, CTX_GPREG_X3, x3);
+ spm_sp_synchronous_exit(SPRT_PUT_RESPONSE_AARCH64);
+
+ case SPRT_YIELD_AARCH64:
+ spm_sp_synchronous_exit(SPRT_YIELD_AARCH64);
+
+ case SPRT_MEMORY_PERM_ATTR_GET_AARCH64:
+ {
+ /* Get context of the SP in use by this CPU. */
+ unsigned int linear_id = plat_my_core_pos();
+ sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
+
+ SMC_RET1(handle, sprt_memory_perm_attr_get(sp_ctx, x1));
+ }
+
+ case SPRT_MEMORY_PERM_ATTR_SET_AARCH64:
+ {
+ /* Get context of the SP in use by this CPU. */
+ unsigned int linear_id = plat_my_core_pos();
+ sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
+
+ SMC_RET1(handle, sprt_memory_perm_attr_set(sp_ctx, x1, x2, x3));
+ }
+
+ default:
+ break;
+ }
+
+ WARN("SPRT: Unsupported call 0x%08x\n", smc_fid);
+ SMC_RET1(handle, SPRT_NOT_SUPPORTED);
+}
diff --git a/services/std_svc/spm_deprecated/aarch64/spm_helpers.S b/services/std_svc/spm_deprecated/aarch64/spm_helpers.S
new file mode 100644
index 00000000..aa35811f
--- /dev/null
+++ b/services/std_svc/spm_deprecated/aarch64/spm_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spm_private.h"
+
+ .global spm_secure_partition_enter
+ .global spm_secure_partition_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SP_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SP_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SP_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SP_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SP_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SP_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SP_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spm_secure_partition_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context
+ * saved in spm_secure_partition_enter().
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SP_C_RT_CTX_X19 - SP_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SP_C_RT_CTX_X21 - SP_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SP_C_RT_CTX_X23 - SP_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SP_C_RT_CTX_X25 - SP_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SP_C_RT_CTX_X27 - SP_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SP_C_RT_CTX_X29 - SP_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spm_secure_partition_exit
diff --git a/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S b/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S
new file mode 100644
index 00000000..9c218dfe
--- /dev/null
+++ b/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+ .globl spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx, .spm_shim_exceptions
+ b .
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since secure_partition does not implement
+ * a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA64, .spm_shim_exceptions
+ msr tpidr_el1, x30
+ mrs x30, esr_el1
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH64_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH32_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq handle_sys_trap
+
+ /* Fail in all the other cases */
+ b panic
+
+ /* ---------------------------------------------
+ * Tell SPM that we are done initialising
+ * ---------------------------------------------
+ */
+do_smc:
+ mrs x30, tpidr_el1
+ smc #0
+ eret
+
+ /* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+ b panic
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA64
+
+vector_entry FiqA64, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA64
+
+vector_entry SErrorA64, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA32, .spm_shim_exceptions
+ b .
+end_vector_entry SynchronousExceptionA32
+
+vector_entry IrqA32, .spm_shim_exceptions
+ b .
+end_vector_entry IrqA32
+
+vector_entry FiqA32, .spm_shim_exceptions
+ b .
+end_vector_entry FiqA32
+
+vector_entry SErrorA32, .spm_shim_exceptions
+ b .
+end_vector_entry SErrorA32
diff --git a/services/std_svc/spm_deprecated/spm.mk b/services/std_svc/spm_deprecated/spm.mk
new file mode 100644
index 00000000..ed36812f
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm.mk
@@ -0,0 +1,23 @@
+#
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${SPD},none)
+ $(error "Error: SPD and SPM are incompatible build options.")
+endif
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM is only supported on aarch64.")
+endif
+
+SPM_SOURCES := $(addprefix services/std_svc/spm_deprecated/, \
+ ${ARCH}/spm_helpers.S \
+ ${ARCH}/spm_shim_exceptions.S \
+ spm_main.c \
+ spm_setup.c \
+ spm_xlat.c)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm_deprecated/spm_main.c b/services/std_svc/spm_deprecated/spm_main.c
new file mode 100644
index 00000000..880e86e4
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_main.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <errno.h>
+#include <mm_svc.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <spinlock.h>
+#include <spm_svc.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Secure Partition context information.
+ ******************************************************************************/
+static sp_context_t sp_ctx;
+
+/*******************************************************************************
+ * Set state of a Secure Partition context.
+ ******************************************************************************/
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state)
+{
+ spin_lock(&(sp_ptr->state_lock));
+ sp_ptr->state = state;
+ spin_unlock(&(sp_ptr->state_lock));
+}
+
+/*******************************************************************************
+ * Wait until the state of a Secure Partition is the specified one and change it
+ * to the desired state.
+ ******************************************************************************/
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int success = 0;
+
+ while (success == 0) {
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ success = 1;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+ }
+}
+
+/*******************************************************************************
+ * Check if the state of a Secure Partition is the specified one and, if so,
+ * change it to the desired state. Returns 0 on success, -1 on error.
+ ******************************************************************************/
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+ int ret = -1;
+
+ spin_lock(&(sp_ptr->state_lock));
+
+ if (sp_ptr->state == from) {
+ sp_ptr->state = to;
+
+ ret = 0;
+ }
+
+ spin_unlock(&(sp_ptr->state_lock));
+
+ return ret;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
+{
+ uint64_t rc;
+
+ assert(sp_ctx != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(sp_ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spm_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+{
+ sp_context_t *ctx = &sp_ctx;
+
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
+ */
+ spm_secure_partition_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to each Secure Partition for the first time.
+ ******************************************************************************/
+static int32_t spm_init(void)
+{
+ uint64_t rc;
+ sp_context_t *ctx;
+
+ INFO("Secure Partition init...\n");
+
+ ctx = &sp_ctx;
+
+ ctx->state = SP_STATE_RESET;
+
+ rc = spm_sp_synchronous_entry(ctx);
+ assert(rc == 0);
+
+ ctx->state = SP_STATE_IDLE;
+
+ INFO("Secure Partition initialized.\n");
+
+ return rc;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spm_setup(void)
+{
+ sp_context_t *ctx;
+
+ /* Disable MMU at EL1 (initialized by BL2) */
+ disable_mmu_icache_el1();
+
+ /* Initialize context of the SP */
+ INFO("Secure Partition context setup start...\n");
+
+ ctx = &sp_ctx;
+
+ /* Assign translation tables context. */
+ ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+
+ spm_sp_setup(ctx);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spm_init);
+
+ INFO("Secure Partition setup done.\n");
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Function to perform a call to a Secure Partition.
+ ******************************************************************************/
+uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+ uint64_t rc;
+ sp_context_t *sp_ptr = &sp_ctx;
+
+ /* Wait until the Secure Partition is idle and set it to busy. */
+ sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+
+ /* Set values for registers on SP entry */
+ cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
+ write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+
+ /* Jump to the Secure Partition. */
+ rc = spm_sp_synchronous_entry(sp_ptr);
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ptr->state == SP_STATE_BUSY);
+ sp_state_set(sp_ptr, SP_STATE_IDLE);
+
+ return rc;
+}
+
+/*******************************************************************************
+ * MM_COMMUNICATE handler
+ ******************************************************************************/
+static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
+ uint64_t comm_buffer_address,
+ uint64_t comm_size_address, void *handle)
+{
+ uint64_t rc;
+
+ /* Cookie. Reserved for future use. It must be zero. */
+ if (mm_cookie != 0U) {
+ ERROR("MM_COMMUNICATE: cookie is not zero\n");
+ SMC_RET1(handle, SPM_INVALID_PARAMETER);
+ }
+
+ if (comm_buffer_address == 0U) {
+ ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
+ SMC_RET1(handle, SPM_INVALID_PARAMETER);
+ }
+
+ if (comm_size_address != 0U) {
+ VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
+ }
+
+ /*
+ * The current secure partition design mandates
+ * - at any point, only a single core can be
+ * executing in the secure partiton.
+ * - a core cannot be preempted by an interrupt
+ * while executing in secure partition.
+ * Raise the running priority of the core to the
+ * interrupt level configured for secure partition
+ * so as to block any interrupt from preempting this
+ * core.
+ */
+ ehf_activate_priority(PLAT_SP_PRI);
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ rc = spm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
+ plat_my_core_pos());
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /*
+ * Exited from secure partition. This core can take
+ * interrupts now.
+ */
+ ehf_deactivate_priority(PLAT_SP_PRI);
+
+ SMC_RET1(handle, rc);
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ unsigned int ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ if (ns == SMC_FROM_SECURE) {
+
+ /* Handle SMCs from Secure world. */
+
+ assert(handle == cm_get_context(SECURE));
+
+ /* Make next ERET jump to S-EL0 instead of S-EL1. */
+ cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+ switch (smc_fid) {
+
+ case SPM_VERSION_AARCH32:
+ SMC_RET1(handle, SPM_VERSION_COMPILED);
+
+ case SP_EVENT_COMPLETE_AARCH64:
+ spm_sp_synchronous_exit(x1);
+
+ case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_get_smc_handler(
+ &sp_ctx, x1));
+
+ case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
+
+ if (sp_ctx.state != SP_STATE_RESET) {
+ WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle,
+ spm_memory_attributes_set_smc_handler(
+ &sp_ctx, x1, x2, x3));
+ default:
+ break;
+ }
+ } else {
+
+ /* Handle SMCs from Non-secure world. */
+
+ assert(handle == cm_get_context(NON_SECURE));
+
+ switch (smc_fid) {
+
+ case MM_VERSION_AARCH32:
+ SMC_RET1(handle, MM_VERSION_COMPILED);
+
+ case MM_COMMUNICATE_AARCH32:
+ case MM_COMMUNICATE_AARCH64:
+ return mm_communicate(smc_fid, x1, x2, x3, handle);
+
+ case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+ case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+ /* SMC interfaces reserved for secure callers. */
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+
+ default:
+ break;
+ }
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/spm_deprecated/spm_private.h b/services/std_svc/spm_deprecated/spm_private.h
new file mode 100644
index 00000000..ec3f48ea
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_private.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_PRIVATE_H
+#define SPM_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <spinlock.h>
+#include <stdint.h>
+#include <xlat_tables_v2.h>
+
+typedef enum sp_state {
+ SP_STATE_RESET = 0,
+ SP_STATE_IDLE,
+ SP_STATE_BUSY
+} sp_state_t;
+
+typedef struct sp_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ xlat_ctx_t *xlat_ctx_handle;
+
+ sp_state_t state;
+ spinlock_t state_lock;
+} sp_context_t;
+
+/* Assembly helpers */
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+void spm_sp_setup(sp_context_t *sp_ctx);
+
+xlat_ctx_t *spm_get_sp_xlat_context(void);
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+ uintptr_t base_va);
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+ u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* SPM_PRIVATE_H */
diff --git a/services/std_svc/spm/sp_setup.c b/services/std_svc/spm_deprecated/spm_setup.c
index 0d61306f..0d61306f 100644
--- a/services/std_svc/spm/sp_setup.c
+++ b/services/std_svc/spm_deprecated/spm_setup.c
diff --git a/services/std_svc/spm_deprecated/spm_shim_private.h b/services/std_svc/spm_deprecated/spm_shim_private.h
new file mode 100644
index 00000000..f2a7e052
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_shim_private.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_SHIM_PRIVATE_H
+#define SPM_SHIM_PRIVATE_H
+
+#include <stdint.h>
+#include <utils_def.h>
+
+/* Assembly source */
+IMPORT_SYM(uintptr_t, spm_shim_exceptions_ptr, SPM_SHIM_EXCEPTIONS_PTR);
+
+/* Linker symbols */
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_START__, SPM_SHIM_EXCEPTIONS_START);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__, SPM_SHIM_EXCEPTIONS_END);
+
+/* Definitions */
+
+#define SPM_SHIM_EXCEPTIONS_SIZE \
+ (SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
+
+#endif /* SPM_SHIM_PRIVATE_H */
diff --git a/services/std_svc/spm/sp_xlat.c b/services/std_svc/spm_deprecated/spm_xlat.c
index 35271386..35271386 100644
--- a/services/std_svc/spm/sp_xlat.c
+++ b/services/std_svc/spm_deprecated/spm_xlat.c
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 1a81a0a3..86ecdf7b 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -102,7 +102,7 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, ret);
}
-#if ENABLE_SPM
+#if ENABLE_SPM && SPM_DEPRECATED
/*
* Dispatch SPM calls to SPM SMC handler and return its return
* value