summaryrefslogtreecommitdiff
path: root/plat/arm
diff options
context:
space:
mode:
authorJeenu Viswambharan <jeenu.viswambharan@arm.com>2017-02-16 14:55:15 +0000
committerJeenu Viswambharan <jeenu.viswambharan@arm.com>2017-05-04 11:00:34 +0100
commitb10d44995eb652675863c2cc6a7726683613da0d (patch)
tree1002687a7916dd0b8ff7dbed54f65be526b3532c /plat/arm
parentf4c8aa905414fb021c08370306bd516f678a58bd (diff)
Introduce ARM SiP service to switch execution state
In AArch64, privileged exception levels control the execution state (a.k.a. register width) of the immediate lower Exception Level; i.e. whether the lower exception level executes in AArch64 or AArch32 state. For an exception level to have its execution state changed at run time, it must request the change by raising a synchronous exception to the higher exception level. This patch implements and adds such a provision to the ARM SiP service, by which an immediate lower exception level can request to switch its execution state. The execution state is switched if the request is: - raised from non-secure world; - raised on the primary CPU, before any secondaries are brought online with CPU_ON PSCI call; - raised from an exception level immediately below EL3: EL2, if implemented; otherwise NS EL1. If successful, the SMC doesn't return to the caller, but to the entry point supplied with the call. Otherwise, the caller will observe the SMC returning with STATE_SW_E_DENIED code. If ARM Trusted Firmware is built for AArch32, the feature is not supported, and the call will always fail. For the ARM SiP service: - Add SMC function IDs for both AArch32 and AArch64; - Increment the SiP service minor version to 2; - Adjust the number of supported SiP service calls. Add documentation for ARM SiP service. Fixes ARM-software/tf-issues#436 Change-Id: I4347f2d6232e69fbfbe333b340fcd0caed0a4cea Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
Diffstat (limited to 'plat/arm')
-rw-r--r--plat/arm/common/arm_common.mk1
-rw-r--r--plat/arm/common/arm_sip_svc.c35
-rw-r--r--plat/arm/common/execution_state_switch.c197
3 files changed, 228 insertions, 5 deletions
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index 9cf2b7e0..c7db9d86 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -164,6 +164,7 @@ BL2U_SOURCES += plat/arm/common/arm_bl2u_setup.c
BL31_SOURCES += plat/arm/common/arm_bl31_setup.c \
plat/arm/common/arm_pm.c \
plat/arm/common/arm_topology.c \
+ plat/arm/common/execution_state_switch.c \
plat/common/plat_psci_common.c
ifeq (${ENABLE_PMF}, 1)
diff --git a/plat/arm/common/arm_sip_svc.c b/plat/arm/common/arm_sip_svc.c
index eb8ec9ee..62a2ef55 100644
--- a/plat/arm/common/arm_sip_svc.c
+++ b/plat/arm/common/arm_sip_svc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,6 +30,7 @@
#include <arm_sip_svc.h>
#include <debug.h>
+#include <plat_arm.h>
#include <pmf.h>
#include <runtime_svc.h>
#include <stdint.h>
@@ -60,6 +61,8 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
void *handle,
u_register_t flags)
{
+ int call_count = 0;
+
/*
* Dispatch PMF calls to PMF SMC handler and return its return
* value
@@ -70,12 +73,34 @@ static uintptr_t arm_sip_handler(unsigned int smc_fid,
}
switch (smc_fid) {
- case ARM_SIP_SVC_CALL_COUNT:
+ case ARM_SIP_SVC_EXE_STATE_SWITCH: {
+ u_register_t pc;
+
+ /* Allow calls from non-secure only */
+ if (!is_caller_non_secure(flags))
+ SMC_RET1(handle, STATE_SW_E_DENIED);
+
+ /* Validate supplied entry point */
+ pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
+ if (arm_validate_ns_entrypoint(pc))
+ SMC_RET1(handle, STATE_SW_E_PARAM);
+
/*
- * Return the number of SiP Service Calls. PMF is the only
- * SiP service implemented; so return number of PMF calls
+ * Pointers used in execution state switch are all 32 bits wide
*/
- SMC_RET1(handle, PMF_NUM_SMC_CALLS);
+ return arm_execution_state_switch(smc_fid, (uint32_t) x1,
+ (uint32_t) x2, (uint32_t) x3, (uint32_t) x4,
+ handle);
+ }
+
+ case ARM_SIP_SVC_CALL_COUNT:
+ /* PMF calls */
+ call_count += PMF_NUM_SMC_CALLS;
+
+ /* State switch call */
+ call_count += 1;
+
+ SMC_RET1(handle, call_count);
case ARM_SIP_SVC_UID:
/* Return UID to the caller */
diff --git a/plat/arm/common/execution_state_switch.c b/plat/arm/common/execution_state_switch.c
new file mode 100644
index 00000000..5068cdfc
--- /dev/null
+++ b/plat/arm/common/execution_state_switch.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <arm_sip_svc.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <plat_arm.h>
+#include <psci.h>
+#include <smcc_helpers.h>
+#include <string.h>
+#include <utils.h>
+
+/*
+ * Handle SMC from a lower exception level to switch its execution state
+ * (either from AArch64 to AArch32, or vice versa).
+ *
+ * smc_fid:
+ * SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
+ * ARM_SIP_SVC_STATE_SWITCH_32.
+ * pc_hi, pc_lo:
+ * PC upon re-entry to the calling exception level; width dependent on the
+ * calling exception level.
+ * cookie_hi, cookie_lo:
+ * Opaque pointer pairs received from the caller to pass it back, upon
+ * re-entry.
+ * handle:
+ * Handle to saved context.
+ */
+int arm_execution_state_switch(unsigned int smc_fid,
+ uint32_t pc_hi,
+ uint32_t pc_lo,
+ uint32_t cookie_hi,
+ uint32_t cookie_lo,
+ void *handle)
+{
+ /* Execution state can be switched only if EL3 is AArch64 */
+#ifdef AARCH64
+ int caller_64, from_el2, el, endianness, thumb = 0;
+ u_register_t spsr, pc, scr, sctlr;
+ entry_point_info_t ep;
+ cpu_context_t *ctx = (cpu_context_t *) handle;
+ el3_state_t *el3_ctx = get_el3state_ctx(ctx);
+
+ /* That the SMC originated from NS is already validated by the caller */
+
+ /*
+ * Disallow state switch if any of the secondaries have been brought up.
+ */
+ if (psci_secondaries_brought_up())
+ goto exec_denied;
+
+ spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
+ caller_64 = (GET_RW(spsr) == MODE_RW_64);
+
+ if (caller_64) {
+ /*
+ * If the call originated from AArch64, expect 32-bit pointers when
+ * switching to AArch32.
+ */
+ if ((pc_hi != 0) || (cookie_hi != 0))
+ goto invalid_param;
+
+ pc = pc_lo;
+
+ /* Instruction state when entering AArch32 */
+ thumb = pc & 1;
+ } else {
+ /* Construct AArch64 PC */
+ pc = (((u_register_t) pc_hi) << 32) | pc_lo;
+ }
+
+ /* Make sure PC is 4-byte aligned, except for Thumb */
+ if ((pc & 0x3) && !thumb)
+ goto invalid_param;
+
+ /*
+ * EL3 controls register width of the immediate lower EL only. Expect
+ * this request from EL2/Hyp unless:
+ *
+ * - EL2 is not implemented;
+ * - EL2 is implemented, but was disabled. This can be inferred from
+ * SCR_EL3.HCE.
+ */
+ from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
+ (GET_M32(spsr) == MODE32_hyp);
+ scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
+ if (!from_el2) {
+ /* The call is from NS privilege level other than HYP */
+
+ /*
+ * Disallow switching state if there's a Hypervisor in place;
+ * this request must be taken up with the Hypervisor instead.
+ */
+ if (scr & SCR_HCE_BIT)
+ goto exec_denied;
+ }
+
+ /*
+ * Return to the caller using the same endianness. Extract
+ * endianness bit from the respective system control register
+ * directly.
+ */
+ sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
+ endianness = !!(sctlr & SCTLR_EE_BIT);
+
+ /* Construct SPSR for the exception state we're about to switch to */
+ if (caller_64) {
+ int impl;
+
+ /*
+ * Switching from AArch64 to AArch32. Ensure this CPU implements
+ * the target EL in AArch32.
+ */
+ impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1);
+ if (impl != EL_IMPL_A64_A32)
+ goto exec_denied;
+
+ /* Return to the equivalent AArch32 privilege level */
+ el = from_el2 ? MODE32_hyp : MODE32_svc;
+ spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
+ endianness, DISABLE_ALL_EXCEPTIONS);
+ } else {
+ /*
+ * Switching from AArch32 to AArch64. Since it's not possible to
+ * implement an EL as AArch32-only (from which this call was
+ * raised), it's safe to assume AArch64 is also implemented.
+ */
+ el = from_el2 ? MODE_EL2 : MODE_EL1;
+ spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ }
+
+ /*
+ * Use the context management library to re-initialize the existing
+ * context with the execution state flipped. Since the library takes
+ * entry_point_info_t pointer as the argument, construct a dummy one
+ * with PC, state width, endianness, security etc. appropriately set.
+ * Other entries in the entry point structure are irrelevant for
+ * purpose.
+ */
+ zeromem(&ep, sizeof(ep));
+ ep.pc = pc;
+ ep.spsr = spsr;
+ SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
+ ((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
+ EP_ST_DISABLE));
+
+ /*
+ * Re-initialize the system register context, and exit EL3 as if for the
+ * first time. State switch is effectively a soft reset of the
+ * calling EL.
+ */
+ cm_init_my_context(&ep);
+ cm_prepare_el3_exit(NON_SECURE);
+
+ /*
+ * State switch success. The caller of SMC wouldn't see the SMC
+ * returning. Instead, execution starts at the supplied entry point,
+ * with context pointers populated in registers 0 and 1.
+ */
+ SMC_RET2(handle, cookie_hi, cookie_lo);
+
+invalid_param:
+ SMC_RET1(handle, STATE_SW_E_PARAM);
+
+exec_denied:
+#endif
+ /* State switch denied */
+ SMC_RET1(handle, STATE_SW_E_DENIED);
+}