summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch32/cortex_a53.S152
-rw-r--r--lib/cpus/aarch32/cortex_a57.S333
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S13
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c154
-rw-r--r--lib/el3_runtime/aarch64/context.S35
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c280
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c2
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c2
8 files changed, 843 insertions, 128 deletions
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
index 3d5f833a..bc2c7628 100644
--- a/lib/cpus/aarch32/cortex_a53.S
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -10,6 +10,11 @@
#include <cpu_macros.S>
#include <debug.h>
+#if A53_DISABLE_NON_TEMPORAL_HINT
+#undef ERRATA_A53_836870
+#define ERRATA_A53_836870 1
+#endif
+
/* ---------------------------------------------
* Disable intra-cluster coherency
* ---------------------------------------------
@@ -23,11 +28,133 @@ func cortex_a53_disable_smp
bx lr
endfunc cortex_a53_disable_smp
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #826319.
+ * This applies only to revision <= r0p2 of Cortex A53.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_826319_wa
+ /*
+ * Compare r0 against revision r0p2
+ */
+ mov r2, lr
+ bl check_errata_826319
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr r0, CORTEX_A53_L2ACTLR
+ bic r0, #CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN
+ orr r0, #CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH
+ stcopr r0, CORTEX_A53_L2ACTLR
+1:
+ bx lr
+endfunc errata_a53_826319_wa
+
+func check_errata_826319
+ mov r1, #0x02
+ b cpu_rev_var_ls
+endfunc check_errata_826319
+
+ /* ---------------------------------------------------------------------
+ * Disable the cache non-temporal hint.
+ *
+ * This ignores the Transient allocation hint in the MAIR and treats
+ * allocations the same as non-transient allocation types. As a result,
+ * the LDNP and STNP instructions in AArch64 behave the same as the
+ * equivalent LDP and STP instructions.
+ *
+ * This is relevant only for revisions <= r0p3 of Cortex-A53.
+ * From r0p4 and onwards, the bit to disable the hint is enabled by
+ * default at reset.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a53_disable_non_temporal_hint
+ /*
+ * Compare r0 against revision r0p3
+ */
+ mov r2, lr
+ bl check_errata_disable_non_temporal_hint
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_ACTLR
+ orr64_imm r0, r1, CORTEX_A53_ACTLR_DTAH
+ stcopr16 r0, r1, CORTEX_A53_ACTLR
+1:
+ bx lr
+endfunc a53_disable_non_temporal_hint
+
+func check_errata_disable_non_temporal_hint
+ mov r1, #0x03
+ b cpu_rev_var_ls
+endfunc check_errata_disable_non_temporal_hint
+
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A53 Errata #855873.
+ *
+ * This applies only to revisions >= r0p3 of Cortex A53.
+ * Earlier revisions of the core are affected as well, but don't
+ * have the chicken bit in the CPUACTLR register. It is expected that
+ * the rich OS takes care of that, especially as the workaround is
+ * shared with other erratas in those revisions of the CPU.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a53_855873_wa
+ /*
+ * Compare r0 against revision r0p3 and higher
+ */
+ mov r2, lr
+ bl check_errata_855873
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A53_ACTLR
+ orr64_imm r0, r1, CORTEX_A53_ACTLR_ENDCCASCI
+ stcopr16 r0, r1, CORTEX_A53_ACTLR
+1:
+ bx lr
+endfunc errata_a53_855873_wa
+
+func check_errata_855873
+ mov r1, #0x03
+ b cpu_rev_var_hs
+endfunc check_errata_855873
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A53.
+ * Shall clobber: r0-r6
* -------------------------------------------------
*/
func cortex_a53_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A53_826319
+ mov r0, r4
+ bl errata_a53_826319_wa
+#endif
+
+#if ERRATA_A53_836870
+ mov r0, r4
+ bl a53_disable_non_temporal_hint
+#endif
+
+#if ERRATA_A53_855873
+ mov r0, r4
+ bl errata_a53_855873_wa
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -36,7 +163,7 @@ func cortex_a53_reset_func
orr64_imm r0, r1, CORTEX_A53_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A53_ECTLR
isb
- bx lr
+ bx r5
endfunc cortex_a53_reset_func
/* ----------------------------------------------------
@@ -111,6 +238,29 @@ func cortex_a53_cluster_pwr_dwn
b cortex_a53_disable_smp
endfunc cortex_a53_cluster_pwr_dwn
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A53. Must follow AAPCS.
+ */
+func cortex_a53_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A53_826319, cortex_a53, 826319
+ report_errata ERRATA_A53_836870, cortex_a53, disable_non_temporal_hint
+ report_errata ERRATA_A53_855873, cortex_a53, 855873
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a53_errata_report
+#endif
+
declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
cortex_a53_reset_func, \
cortex_a53_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index ed478463..a791e4ee 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -50,11 +50,312 @@ func cortex_a57_disable_ext_debug
bx lr
endfunc cortex_a57_disable_ext_debug
+ /* --------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #806969.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * --------------------------------------------------
+ */
+func errata_a57_806969_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_806969
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_NO_ALLOC_WBWA
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_806969_wa
+
+func check_errata_806969
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_806969
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813419.
+ * This applies only to revision r0p0 of Cortex A57.
+ * ---------------------------------------------------
+ */
+func check_errata_813419
+ /*
+ * Even though this is only needed for revision r0p0, it
+ * is always applied due to limitations of the current
+ * errata framework.
+ */
+ mov r0, #ERRATA_APPLIES
+ bx lr
+endfunc check_errata_813419
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #813420.
+ * This applies only to revision r0p0 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_813420_wa
+ /*
+ * Compare r0 against revision r0p0
+ */
+ mov r2, lr
+ bl check_errata_813420
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_DCC_AS_DCCI
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_813420_wa
+
+func check_errata_813420
+ mov r1, #0x00
+ b cpu_rev_var_ls
+endfunc check_errata_813420
+
+ /* --------------------------------------------------------------------
+ * Disable the over-read from the LDNP instruction.
+ *
+ * This applies to all revisions <= r1p2. The performance degradation
+ * observed with LDNP/STNP has been fixed on r1p3 and onwards.
+ *
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------------------------
+ */
+func a57_disable_ldnp_overread
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_disable_ldnp_overread
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_DIS_OVERREAD
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc a57_disable_ldnp_overread
+
+func check_errata_disable_ldnp_overread
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_disable_ldnp_overread
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826974.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826974_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826974
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_DIS_LOAD_PASS_DMB
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_826974_wa
+
+func check_errata_826974
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826974
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #826977.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_826977_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_826977
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_GRE_NGRE_AS_NGNRE
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_826977_wa
+
+func check_errata_826977
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_826977
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #828024.
+ * This applies only to revision <= r1p1 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_828024_wa
+ /*
+ * Compare r0 against revision r1p1
+ */
+ mov r2, lr
+ bl check_errata_828024
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ /*
+ * Setting the relevant bits in CORTEX_A57_ACTLR has to be done in 2
+ * instructions here because the resulting bitmask doesn't fit in a
+ * 16-bit value so it cannot be encoded in a single instruction.
+ */
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_NO_ALLOC_WBWA
+ orr64_imm r0, r1, (CORTEX_A57_ACTLR_DIS_L1_STREAMING | CORTEX_A57_ACTLR_DIS_STREAMING)
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_828024_wa
+
+func check_errata_828024
+ mov r1, #0x11
+ b cpu_rev_var_ls
+endfunc check_errata_828024
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #829520.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_829520_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_829520
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r0, r1, CORTEX_A57_ACTLR_DIS_INDIRECT_PREDICTOR
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_829520_wa
+
+func check_errata_829520
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_829520
+
+ /* ---------------------------------------------------
+ * Errata Workaround for Cortex A57 Errata #833471.
+ * This applies only to revision <= r1p2 of Cortex A57.
+ * Inputs:
+ * r0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: r0-r3
+ * ---------------------------------------------------
+ */
+func errata_a57_833471_wa
+ /*
+ * Compare r0 against revision r1p2
+ */
+ mov r2, lr
+ bl check_errata_833471
+ mov lr, r2
+ cmp r0, #ERRATA_NOT_APPLIES
+ beq 1f
+ ldcopr16 r0, r1, CORTEX_A57_ACTLR
+ orr64_imm r1, r1, CORTEX_A57_ACTLR_FORCE_FPSCR_FLUSH
+ stcopr16 r0, r1, CORTEX_A57_ACTLR
+1:
+ bx lr
+endfunc errata_a57_833471_wa
+
+func check_errata_833471
+ mov r1, #0x12
+ b cpu_rev_var_ls
+endfunc check_errata_833471
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A57.
+ * Shall clobber: r0-r6
* -------------------------------------------------
*/
func cortex_a57_reset_func
+ mov r5, lr
+ bl cpu_get_rev_var
+ mov r4, r0
+
+#if ERRATA_A57_806969
+ mov r0, r4
+ bl errata_a57_806969_wa
+#endif
+
+#if ERRATA_A57_813420
+ mov r0, r4
+ bl errata_a57_813420_wa
+#endif
+
+#if A57_DISABLE_NON_TEMPORAL_HINT
+ mov r0, r4
+ bl a57_disable_ldnp_overread
+#endif
+
+#if ERRATA_A57_826974
+ mov r0, r4
+ bl errata_a57_826974_wa
+#endif
+
+#if ERRATA_A57_826977
+ mov r0, r4
+ bl errata_a57_826977_wa
+#endif
+
+#if ERRATA_A57_828024
+ mov r0, r4
+ bl errata_a57_828024_wa
+#endif
+
+#if ERRATA_A57_829520
+ mov r0, r4
+ bl errata_a57_829520_wa
+#endif
+
+#if ERRATA_A57_833471
+ mov r0, r4
+ bl errata_a57_833471_wa
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
@@ -63,7 +364,7 @@ func cortex_a57_reset_func
orr64_imm r0, r1, CORTEX_A57_ECTLR_SMP_BIT
stcopr16 r0, r1, CORTEX_A57_ECTLR
isb
- bx lr
+ bx r5
endfunc cortex_a57_reset_func
/* ----------------------------------------------------
@@ -162,6 +463,36 @@ func cortex_a57_cluster_pwr_dwn
b cortex_a57_disable_ext_debug
endfunc cortex_a57_cluster_pwr_dwn
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex A57. Must follow AAPCS.
+ */
+func cortex_a57_errata_report
+ push {r12, lr}
+
+ bl cpu_get_rev_var
+ mov r4, r0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_A57_806969, cortex_a57, 806969
+ report_errata ERRATA_A57_813419, cortex_a57, 813419
+ report_errata ERRATA_A57_813420, cortex_a57, 813420
+ report_errata A57_DISABLE_NON_TEMPORAL_HINT, cortex_a57, \
+ disable_ldnp_overread
+ report_errata ERRATA_A57_826974, cortex_a57, 826974
+ report_errata ERRATA_A57_826977, cortex_a57, 826977
+ report_errata ERRATA_A57_828024, cortex_a57, 828024
+ report_errata ERRATA_A57_829520, cortex_a57, 829520
+ report_errata ERRATA_A57_833471, cortex_a57, 833471
+
+ pop {r12, lr}
+ bx lr
+endfunc cortex_a57_errata_report
+#endif
+
declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
cortex_a57_core_pwr_dwn, \
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index dc78f6ee..bfdc1e4f 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -182,6 +182,19 @@ func cpu_rev_var_ls
bx lr
endfunc cpu_rev_var_ls
+/*
+ * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
+ * application purposes. If the revision-variant is higher than or same as a
+ * given value, indicates that errata applies; otherwise not.
+ */
+ .globl cpu_rev_var_hs
+func cpu_rev_var_hs
+ cmp r0, r1
+ movge r0, #ERRATA_APPLIES
+ movlt r0, #ERRATA_NOT_APPLIES
+ bx lr
+endfunc cpu_rev_var_hs
+
#if REPORT_ERRATA
/*
* void print_errata_status(void);
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 020f3a36..3e7a5b73 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -75,36 +75,44 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
if (security_state != SECURE)
scr |= SCR_NS_BIT;
- /*
- * Set up SCTLR for the Non Secure context.
- * EE bit is taken from the entrypoint attributes
- * M, C and I bits must be zero (as required by PSCI specification)
- *
- * The target exception level is based on the spsr mode requested.
- * If execution is requested to hyp mode, HVC is enabled
- * via SCR.HCE.
- *
- * Always compute the SCTLR_EL1 value and save in the cpu_context
- * - the HYP registers are set up by cm_preapre_ns_entry() as they
- * are not part of the stored cpu_context
- *
- * TODO: In debug builds the spsr should be validated and checked
- * against the CPU support, security state, endianness and pc
- */
if (security_state != SECURE) {
- sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
/*
- * In addition to SCTLR_RES1, set the CP15_BEN, nTWI & nTWE
- * bits that architecturally reset to 1.
+ * Set up SCTLR for the Non-secure context.
+ *
+ * SCTLR.EE: Endianness is taken from the entrypoint attributes.
+ *
+ * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
+ * required by PSCI specification)
+ *
+ * Set remaining SCTLR fields to their architecturally defined
+ * values. Some fields reset to an IMPLEMENTATION DEFINED value:
+ *
+ * SCTLR.TE: Set to zero so that exceptions to an Exception
+ * Level executing at PL1 are taken to A32 state.
+ *
+ * SCTLR.V: Set to zero to select the normal exception vectors
+ * with base address held in VBAR.
*/
- sctlr |= SCTLR_RES1 | SCTLR_CP15BEN_BIT |
- SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
+ assert(((ep->spsr >> SPSR_E_SHIFT) & SPSR_E_MASK) ==
+ (EP_GET_EE(ep->h.attr) >> EP_EE_SHIFT));
+
+ sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+ sctlr |= (SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_V_BIT));
write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
}
+ /*
+ * The target exception level is based on the spsr mode requested. If
+ * execution is requested to hyp mode, HVC is enabled via SCR.HCE.
+ */
if (GET_M32(ep->spsr) == MODE32_hyp)
scr |= SCR_HCE_BIT;
+ /*
+ * Store the initialised values for SCTLR and SCR in the cpu_context.
+ * The Hyp mode registers are not part of the saved context and are
+ * set-up in cm_prepare_el3_exit().
+ */
write_ctx_reg(reg_ctx, CTX_SCR, scr);
write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);
@@ -151,7 +159,7 @@ void cm_init_my_context(const entry_point_info_t *ep)
******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
- uint32_t sctlr, scr, hcptr;
+ uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state);
assert(ctx);
@@ -160,9 +168,9 @@ void cm_prepare_el3_exit(uint32_t security_state)
scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
if (scr & SCR_HCE_BIT) {
/* Use SCTLR value to initialize HSCTLR */
- sctlr = read_ctx_reg(get_regs_ctx(ctx),
+ hsctlr = read_ctx_reg(get_regs_ctx(ctx),
CTX_NS_SCTLR);
- sctlr |= HSCTLR_RES1;
+ hsctlr |= HSCTLR_RES1;
/* Temporarily set the NS bit to access HSCTLR */
write_scr(read_scr() | SCR_NS_BIT);
/*
@@ -170,7 +178,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
* we can access HSCTLR
*/
isb();
- write_hsctlr(sctlr);
+ write_hsctlr(hsctlr);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
@@ -184,48 +192,92 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() | SCR_NS_BIT);
isb();
- /* PL2 present but unused, need to disable safely */
- write_hcr(0);
-
- /* HSCTLR : can be ignored when bypassing */
+ /*
+ * Hyp / PL2 present but unused, need to disable safely.
+ * HSCTLR can be ignored in this case.
+ *
+ * Set HCR to its architectural reset value so that
+ * Non-secure operations do not trap to Hyp mode.
+ */
+ write_hcr(HCR_RESET_VAL);
- /* HCPTR : disable all traps TCPAC, TTA, TCP */
- hcptr = read_hcptr();
- hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
- write_hcptr(hcptr);
+ /*
+ * Set HCPTR to its architectural reset value so that
+ * Non-secure access from EL1 or EL0 to trace and to
+ * Advanced SIMD and floating point functionality does
+ * not trap to Hyp mode.
+ */
+ write_hcptr(HCPTR_RESET_VAL);
- /* Enable EL1 access to timer */
- write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);
+ /*
+ * Initialise CNTHCTL. All fields are architecturally
+ * UNKNOWN on reset and are set to zero except for
+ * field(s) listed below.
+ *
+ * CNTHCTL.PL1PCEN: Disable traps to Hyp mode of
+ * Non-secure EL0 and EL1 accessed to the physical
+ * timer registers.
+ *
+ * CNTHCTL.PL1PCTEN: Disable traps to Hyp mode of
+ * Non-secure EL0 and EL1 accessed to the physical
+ * counter registers.
+ */
+ write_cnthctl(CNTHCTL_RESET_VAL |
+ PL1PCEN_BIT | PL1PCTEN_BIT);
- /* Reset CNTVOFF_EL2 */
+ /*
+ * Initialise CNTVOFF to zero as it resets to an
+ * IMPLEMENTATION DEFINED value.
+ */
write64_cntvoff(0);
- /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+ /*
+ * Set VPIDR and VMPIDR to match MIDR_EL1 and MPIDR
+ * respectively.
+ */
write_vpidr(read_midr());
write_vmpidr(read_mpidr());
/*
- * Reset VTTBR.
- * Needed because cache maintenance operations depend on
- * the VMID even when non-secure EL1&0 stage 2 address
- * translation are disabled.
+ * Initialise VTTBR, setting all fields rather than
+ * relying on the hw. Some fields are architecturally
+ * UNKNOWN at reset.
+ *
+ * VTTBR.VMID: Set to zero which is the architecturally
+ * defined reset value. Even though EL1&0 stage 2
+ * address translation is disabled, cache maintenance
+ * operations depend on the VMID.
+ *
+ * VTTBR.BADDR: Set to zero as EL1&0 stage 2 address
+ * translation is disabled.
*/
- write64_vttbr(0);
+ write64_vttbr(VTTBR_RESET_VAL &
+ ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
+ | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
/*
- * Avoid unexpected debug traps in case where HDCR
- * is not completely reset by the hardware - set
- * HDCR.HPMN to PMCR.N and zero the remaining bits.
- * The HDCR.HPMN and PMCR.N fields are the same size
- * (5 bits) and HPMN is at offset zero within HDCR.
+ * Initialise HDCR, setting all the fields rather than
+ * relying on hw.
+ *
+ * HDCR.HPMN: Set to value of PMCR.N which is the
+ * architecturally-defined reset value.
*/
- write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);
+ write_hdcr(HDCR_RESET_VAL |
+ ((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT));
/*
- * Reset CNTHP_CTL to disable the EL2 physical timer and
- * therefore prevent timer interrupts.
+ * Set HSTR to its architectural reset value so that
+ * access to system registers in the cproc=1111
+ * encoding space do not trap to Hyp mode.
+ */
+ write_hstr(HSTR_RESET_VAL);
+ /*
+ * Set CNTHP_CTL to its architectural reset value to
+ * disable the EL2 physical timer and prevent timer
+ * interrupts. Some fields are architecturally UNKNOWN
+ * on reset and are set to zero.
*/
- write_cnthp_ctl(0);
+ write_cnthp_ctl(CNTHP_CTL_RESET_VAL);
isb();
write_scr(read_scr() & ~SCR_NS_BIT);
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index afe912ab..8a6c11b7 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <context.h>
.global el1_sysregs_context_save
+ .global el1_sysregs_context_save_post_ops
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
@@ -111,6 +112,36 @@ endfunc el1_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
+ * to do post operations after saving the EL1 system
+ * register context.
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_save_post_ops
+#if ENABLE_SPE_FOR_LOWER_ELS
+ /* Detect if SPE is implemented */
+ mrs x9, id_aa64dfr0_el1
+ ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
+ cmp x9, #0x1
+ b.ne 1f
+
+ /*
+ * Before switching from normal world to secure world
+ * the profiling buffers need to be drained out to memory. This is
+ * required to avoid an invalid memory access when TTBR is switched
+ * for entry to SEL1.
+ */
+ .arch armv8.2-a+profile
+ psb csync
+ dsb nsh
+ .arch armv8-a
+1:
+#endif
+ ret
+endfunc el1_sysregs_context_save_post_ops
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
* to restore EL1 system register context. It assumes
* that 'x0' is pointing to a 'el1_sys_regs' structure
* from where the register context will be restored
@@ -343,7 +374,7 @@ func restore_gp_registers_callee_eret
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
- ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
msr sp_el0, x17
ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
eret
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 0104c4ed..5257bf1c 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -71,77 +71,104 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
zeromem(ctx, sizeof(*ctx));
/*
- * Base the context SCR on the current value, adjust for entry point
- * specific requirements and set trap bits from the IMF
- * TODO: provide the base/global SCR bits using another mechanism?
+ * SCR_EL3 was initialised during reset sequence in macro
+ * el3_arch_init_common. This code modifies the SCR_EL3 fields that
+ * affect the next EL.
+ *
+ * The following fields are initially set to zero and then updated to
+ * the required value depending on the state of the SPSR_EL3 and the
+ * Security state and entrypoint attributes of the next EL.
*/
scr_el3 = read_scr();
scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
SCR_ST_BIT | SCR_HCE_BIT);
-
+ /*
+ * SCR_NS: Set the security state of the next EL.
+ */
if (security_state != SECURE)
scr_el3 |= SCR_NS_BIT;
-
+ /*
+ * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next
+ * Exception level as specified by SPSR.
+ */
if (GET_RW(ep->spsr) == MODE_RW_64)
scr_el3 |= SCR_RW_BIT;
-
+ /*
+ * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical
+ * Secure timer registers to EL3, from AArch64 state only, if specified
+ * by the entrypoint attributes.
+ */
if (EP_GET_ST(ep->h.attr))
scr_el3 |= SCR_ST_BIT;
#ifndef HANDLE_EA_EL3_FIRST
- /* Explicitly stop to trap aborts from lower exception levels. */
+ /*
+ * SCR_EL3.EA: Do not route External Abort and SError Interrupt External
+ * to EL3 when executing at a lower EL. When executing at EL3, External
+ * Aborts are taken to EL3.
+ */
scr_el3 &= ~SCR_EA_BIT;
#endif
#ifdef IMAGE_BL31
/*
- * IRQ/FIQ bits only need setting if interrupt routing
- * model has been set up for BL31.
+ * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ rounting as
+ * indicated by the interrupt routing model for BL31.
*/
scr_el3 |= get_scr_el3_from_routing_model(security_state);
#endif
/*
- * Set up SCTLR_ELx for the target exception level:
- * EE bit is taken from the entrypoint attributes
- * M, C and I bits must be zero (as required by PSCI specification)
- *
- * The target exception level is based on the spsr mode requested.
- * If execution is requested to EL2 or hyp mode, HVC is enabled
- * via SCR_EL3.HCE.
+ * SCR_EL3.HCE: Enable HVC instructions if next execution state is
+ * AArch64 and next EL is EL2, or if next execution state is AArch32 and
+ * next mode is Hyp.
+ */
+ if ((GET_RW(ep->spsr) == MODE_RW_64
+ && GET_EL(ep->spsr) == MODE_EL2)
+ || (GET_RW(ep->spsr) != MODE_RW_64
+ && GET_M32(ep->spsr) == MODE32_hyp)) {
+ scr_el3 |= SCR_HCE_BIT;
+ }
+
+ /*
+ * Initialise SCTLR_EL1 to the reset value corresponding to the target
+ * execution state setting all fields rather than relying of the hw.
+ * Some fields have architecturally UNKNOWN reset values and these are
+ * set to zero.
*
- * Always compute the SCTLR_EL1 value and save in the cpu_context
- * - the EL2 registers are set up by cm_preapre_ns_entry() as they
- * are not part of the stored cpu_context
+ * SCTLR.EE: Endianness is taken from the entrypoint attributes.
*
- * TODO: In debug builds the spsr should be validated and checked
- * against the CPU support, security state, endianess and pc
+ * SCTLR.M, SCTLR.C and SCTLR.I: These fields must be zero (as
+ * required by PSCI specification)
*/
sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
if (GET_RW(ep->spsr) == MODE_RW_64)
sctlr_elx |= SCTLR_EL1_RES1;
else {
- sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
/*
- * If lower non-secure EL is AArch32, enable the CP15BEN, nTWI
- * & nTWI bits. This aligns with SCTLR initialization on
- * systems with an AArch32 EL3, where these bits
- * architecturally reset to 1.
+ * If the target execution state is AArch32 then the following
+ * fields need to be set.
+ *
+ * SCTRL_EL1.nTWE: Set to one so that EL0 execution of WFE
+ * instructions are not trapped to EL1.
+ *
+ * SCTLR_EL1.nTWI: Set to one so that EL0 execution of WFI
+ * instructions are not trapped to EL1.
+ *
+ * SCTLR_EL1.CP15BEN: Set to one to enable EL0 execution of the
+ * CP15DMB, CP15DSB, and CP15ISB instructions.
*/
- if (security_state != SECURE)
- sctlr_elx |= SCTLR_CP15BEN_BIT | SCTLR_NTWI_BIT
- | SCTLR_NTWE_BIT;
+ sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTLR_CP15BEN_BIT
+ | SCTLR_NTWI_BIT | SCTLR_NTWE_BIT;
}
+ /*
+ * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
+ * and other EL2 resgisters are set up by cm_preapre_ns_entry() as they
+ * are not part of the stored cpu_context.
+ */
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
- if ((GET_RW(ep->spsr) == MODE_RW_64
- && GET_EL(ep->spsr) == MODE_EL2)
- || (GET_RW(ep->spsr) != MODE_RW_64
- && GET_M32(ep->spsr) == MODE32_hyp)) {
- scr_el3 |= SCR_HCE_BIT;
- }
-
/* Populate EL3 state so that we've the right context before doing ERET */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
@@ -191,7 +218,7 @@ void cm_init_my_context(const entry_point_info_t *ep)
******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
- uint32_t sctlr_elx, scr_el3, cptr_el2;
+ uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state);
assert(ctx);
@@ -206,57 +233,167 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) {
- /* EL2 present but unused, need to disable safely */
-
- /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
+ /*
+ * EL2 present but unused, need to disable safely.
+ * SCTLR_EL2 can be ignored in this case.
+ *
+ * Initialise all fields in HCR_EL2, except HCR_EL2.RW,
+ * to zero so that Non-secure operations do not trap to
+ * EL2.
+ *
+ * HCR_EL2.RW: Set this field to match SCR_EL3.RW
+ */
write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
- /* SCTLR_EL2 : can be ignored when bypassing */
-
- /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
- cptr_el2 = read_cptr_el2();
- cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
- write_cptr_el2(cptr_el2);
+ /*
+ * Initialise CPTR_EL2 setting all fields rather than
+ * relying on the hw. All fields have architecturally
+ * UNKNOWN reset values.
+ *
+ * CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
+ * accesses to the CPACR_EL1 or CPACR from both
+ * Execution states do not trap to EL2.
+ *
+ * CPTR_EL2.TTA: Set to zero so that Non-secure System
+ * register accesses to the trace registers from both
+ * Execution states do not trap to EL2.
+ *
+ * CPTR_EL2.TFP: Set to zero so that Non-secure accesses
+ * to SIMD and floating-point functionality from both
+ * Execution states do not trap to EL2.
+ */
+ write_cptr_el2(CPTR_EL2_RESET_VAL &
+ ~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
+ | CPTR_EL2_TFP_BIT));
- /* Enable EL1 access to timer */
- write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
+ /*
+ * Initiliase CNTHCTL_EL2. All fields are
+ * architecturally UNKNOWN on reset and are set to zero
+ * except for field(s) listed below.
+ *
+ * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to
+ * Hyp mode of Non-secure EL0 and EL1 accesses to the
+ * physical timer registers.
+ *
+ * CNTHCTL_EL2.EL1PCTEN: Set to one to disable traps to
+ * Hyp mode of Non-secure EL0 and EL1 accesses to the
+ * physical counter registers.
+ */
+ write_cnthctl_el2(CNTHCTL_RESET_VAL |
+ EL1PCEN_BIT | EL1PCTEN_BIT);
- /* Reset CNTVOFF_EL2 */
+ /*
+ * Initialise CNTVOFF_EL2 to zero as it resets to an
+ * architecturally UNKNOWN value.
+ */
write_cntvoff_el2(0);
- /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+ /*
+ * Set VPIDR_EL2 and VMPIDR_EL2 to match MIDR_EL1 and
+ * MPIDR_EL1 respectively.
+ */
write_vpidr_el2(read_midr_el1());
write_vmpidr_el2(read_mpidr_el1());
/*
- * Reset VTTBR_EL2.
- * Needed because cache maintenance operations depend on
- * the VMID even when non-secure EL1&0 stage 2 address
- * translation are disabled.
+ * Initialise VTTBR_EL2. All fields are architecturally
+ * UNKNOWN on reset.
+ *
+ * VTTBR_EL2.VMID: Set to zero. Even though EL1&0 stage
+ * 2 address translation is disabled, cache maintenance
+ * operations depend on the VMID.
+ *
+ * VTTBR_EL2.BADDR: Set to zero as EL1&0 stage 2 address
+ * translation is disabled.
*/
- write_vttbr_el2(0);
+ write_vttbr_el2(VTTBR_RESET_VAL &
+ ~((VTTBR_VMID_MASK << VTTBR_VMID_SHIFT)
+ | (VTTBR_BADDR_MASK << VTTBR_BADDR_SHIFT)));
+
/*
- * Avoid unexpected debug traps in case where MDCR_EL2
- * is not completely reset by the hardware - set
- * MDCR_EL2.HPMN to PMCR_EL0.N and zero the remaining
- * bits.
- * MDCR_EL2.HPMN and PMCR_EL0.N fields are the same size
- * (5 bits) and HPMN is at offset zero within MDCR_EL2.
+ * Initialise MDCR_EL2, setting all fields rather than
+ * relying on hw. Some fields are architecturally
+ * UNKNOWN on reset.
+ *
+ * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+ * profiling controls to EL2.
+ *
+ * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
+ * state. Accesses to profiling buffer controls at
+ * non-secure EL1 are not trapped to EL2.
+ *
+ * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
+ * EL1 System register accesses to the Debug ROM
+ * registers are not trapped to EL2.
+ *
+ * MDCR_EL2.TDOSA: Set to zero so that Non-secure EL1
+ * System register accesses to the powerdown debug
+ * registers are not trapped to EL2.
+ *
+ * MDCR_EL2.TDA: Set to zero so that System register
+ * accesses to the debug registers do not trap to EL2.
+ *
+ * MDCR_EL2.TDE: Set to zero so that debug exceptions
+ * are not routed to EL2.
+ *
+ * MDCR_EL2.HPME: Set to zero to disable EL2 Performance
+ * Monitors.
+ *
+ * MDCR_EL2.TPM: Set to zero so that Non-secure EL0 and
+ * EL1 accesses to all Performance Monitors registers
+ * are not trapped to EL2.
+ *
+ * MDCR_EL2.TPMCR: Set to zero so that Non-secure EL0
+ * and EL1 accesses to the PMCR_EL0 or PMCR are not
+ * trapped to EL2.
+ *
+ * MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
+ * architecturally-defined reset value.
*/
- write_mdcr_el2((read_pmcr_el0() & PMCR_EL0_N_BITS)
- >> PMCR_EL0_N_SHIFT);
+ mdcr_el2 = ((MDCR_EL2_RESET_VAL |
+ ((read_pmcr_el0() & PMCR_EL0_N_BITS)
+ >> PMCR_EL0_N_SHIFT)) &
+ ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT
+ | MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT
+ | MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
+ | MDCR_EL2_TPMCR_BIT));
+
+#if ENABLE_SPE_FOR_LOWER_ELS
+ uint64_t id_aa64dfr0_el1;
+
+ /* Detect if SPE is implemented */
+ id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
+ ID_AA64DFR0_PMS_SHIFT;
+ if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
+ /*
+ * Make sure traps to EL2 are not generated if
+ * EL2 is implemented but not used.
+ */
+ mdcr_el2 &= ~MDCR_EL2_TPMS;
+ mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+ }
+#endif
+
+ write_mdcr_el2(mdcr_el2);
+
/*
- * Avoid unexpected traps of non-secure access to
- * certain system registers at EL1 or lower where
- * HSTR_EL2 is not completely reset to zero by the
- * hardware - zero the entire register.
+ * Initialise HSTR_EL2. All fields are architecturally
+ * UNKNOWN on reset.
+ *
+ * HSTR_EL2.T<n>: Set all these fields to zero so that
+ * Non-secure EL0 or EL1 accesses to System registers
+ * do not trap to EL2.
*/
- write_hstr_el2(0);
+ write_hstr_el2(HSTR_EL2_RESET_VAL & ~(HSTR_EL2_T_MASK));
/*
- * Reset CNTHP_CTL_EL2 to disable the EL2 physical timer
- * and therefore prevent timer interrupts.
+ * Initialise CNTHP_CTL_EL2. All fields are
+ * architecturally UNKNOWN on reset.
+ *
+ * CNTHP_CTL_EL2:ENABLE: Set to zero to disable the EL2
+ * physical timer and prevent timer interrupts.
*/
- write_cnthp_ctl_el2(0);
+ write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
+ ~(CNTHP_CTL_ENABLE_BIT));
}
}
@@ -278,6 +415,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
assert(ctx);
el1_sysregs_context_save(get_sysregs_ctx(ctx));
+ el1_sysregs_context_save_post_ops();
}
void cm_el1_sysregs_context_restore(uint32_t security_state)
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index 3c9051c3..9c156240 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -149,7 +149,7 @@ void enable_mmu_secure(unsigned int flags)
* and translation register writes are committed
* before enabling the MMU
*/
- dsb();
+ dsbish();
isb();
sctlr = read_sctlr();
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index afc65e7d..40fd2d0b 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -141,7 +141,7 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table)
* and translation register writes are committed
* before enabling the MMU
*/
- dsb();
+ dsbish();
isb();
sctlr = read_sctlr();