summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch64/cortex_a65.S81
-rw-r--r--lib/cpus/aarch64/cortex_a65ae.S81
-rw-r--r--lib/cpus/aarch64/cortex_hercules_ae.S100
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c26
-rw-r--r--lib/extensions/pauth/pauth_helpers.S28
5 files changed, 310 insertions, 6 deletions
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
new file mode 100644
index 00000000..666324c1
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65 must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A65.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a65_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a65_reset_func
+
+func cortex_a65_cpu_pwr_dwn
+ mrs x0, CORTEX_A65_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A65_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A65_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a65_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A65. Must follow AAPCS.
+ */
+func cortex_a65_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a65, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a65_errata_report
+#endif
+
+.section .rodata.cortex_a65_regs, "aS"
+cortex_a65_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65_cpu_reg_dump
+ adr x6, cortex_a65_regs
+ mrs x8, CORTEX_A65_ECTLR_EL1
+ ret
+endfunc cortex_a65_cpu_reg_dump
+
+declare_cpu_ops cortex_a65, CORTEX_A65_MIDR, \
+ cortex_a65_reset_func, \
+ cortex_a65_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
new file mode 100644
index 00000000..ac6583eb
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <cortex_a65ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if !HW_ASSISTED_COHERENCY
+#error "Cortex-A65AE must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+/* 64-bit only core */
+#if CTX_INCLUDE_AARCH32_REGS
+#error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
+#endif
+
+/* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A65.
+ * Shall clobber: x0-x19
+ * -------------------------------------------------
+ */
+func cortex_a65ae_reset_func
+ mov x19, x30
+
+#if ERRATA_DSU_936184
+ bl errata_dsu_936184_wa
+#endif
+
+ ret x19
+endfunc cortex_a65ae_reset_func
+
+func cortex_a65ae_cpu_pwr_dwn
+ mrs x0, CORTEX_A65AE_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_A65AE_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+ msr CORTEX_A65AE_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_a65ae_cpu_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-A65AE. Must follow AAPCS.
+ */
+func cortex_a65ae_errata_report
+ stp x8, x30, [sp, #-16]!
+
+ bl cpu_get_rev_var
+ mov x8, x0
+
+ /*
+ * Report all errata. The revision-variant information is passed to
+ * checking functions of each errata.
+ */
+ report_errata ERRATA_DSU_936184, cortex_a65ae, dsu_936184
+
+ ldp x8, x30, [sp], #16
+ ret
+endfunc cortex_a65ae_errata_report
+#endif
+
+.section .rodata.cortex_a65ae_regs, "aS"
+cortex_a65ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_a65ae_cpu_reg_dump
+ adr x6, cortex_a65ae_regs
+ mrs x8, CORTEX_A65AE_ECTLR_EL1
+ ret
+endfunc cortex_a65ae_cpu_reg_dump
+
+declare_cpu_ops cortex_a65ae, CORTEX_A65AE_MIDR, \
+ cortex_a65ae_reset_func, \
+ cortex_a65ae_cpu_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_hercules_ae.S b/lib/cpus/aarch64/cortex_hercules_ae.S
new file mode 100644
index 00000000..c4a21635
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_hercules_ae.S
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_hercules_ae.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "cortex_hercules_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-Hercules-AE
+ * -------------------------------------------------
+ */
+#if ENABLE_AMU
+func cortex_hercules_ae_reset_func
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ msr actlr_el3, x0
+
+ /* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ bic x0, x0, #CORTEX_HERCULES_ACTLR_TAM_BIT
+ msr actlr_el2, x0
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_HERCULES_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET0_EL0, x0
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_HERCULES_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET1_EL0, x0
+ isb
+
+ ret
+endfunc cortex_hercules_ae_reset_func
+#endif
+
+ /* -------------------------------------------------------
+ * HW will do the cache maintenance while powering down
+ * -------------------------------------------------------
+ */
+func cortex_hercules_ae_core_pwr_dwn
+ /* -------------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * -------------------------------------------------------
+ */
+ mrs x0, CORTEX_HERCULES_CPUPWRCTLR_EL1
+ orr x0, x0, #CORTEX_HERCULES_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ msr CORTEX_HERCULES_CPUPWRCTLR_EL1, x0
+ isb
+ ret
+endfunc cortex_hercules_ae_core_pwr_dwn
+
+ /*
+ * Errata printing function for cortex_hercules_ae. Must follow AAPCS.
+ */
+#if REPORT_ERRATA
+func cortex_hercules_ae_errata_report
+ ret
+endfunc cortex_hercules_ae_errata_report
+#endif
+
+ /* -------------------------------------------------------
+ * This function provides cortex_hercules_ae specific
+ * register information for crash reporting.
+ * It needs to return with x6 pointing to
+ * a list of register names in ascii and
+ * x8 - x15 having values of registers to be
+ * reported.
+ * -------------------------------------------------------
+ */
+.section .rodata.cortex_hercules_ae_regs, "aS"
+cortex_hercules_ae_regs: /* The ascii list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_hercules_ae_cpu_reg_dump
+ adr x6, cortex_hercules_ae_regs
+ mrs x8, CORTEX_HERCULES_CPUECTLR_EL1
+ ret
+endfunc cortex_hercules_ae_cpu_reg_dump
+
+#if ENABLE_AMU
+#define HERCULES_AE_RESET_FUNC cortex_hercules_ae_reset_func
+#else
+#define HERCULES_AE_RESET_FUNC CPU_NO_RESET_FUNC
+#endif
+
+declare_cpu_ops cortex_hercules_ae, CORTEX_HERCULES_AE_MIDR, \
+ HERCULES_AE_RESET_FUNC, \
+ cortex_hercules_ae_core_pwr_dwn
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index a4702fcc..73d1e354 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -281,10 +281,28 @@ void cm_prepare_el3_exit(uint32_t security_state)
*
* HDCR.HPMN: Set to value of PMCR.N which is the
* architecturally-defined reset value.
+ *
+ * HDCR.HLP: Set to one so that event counter
+ * overflow, that is recorded in PMOVSCLR[0-30],
+ * occurs on the increment that changes
+ * PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU is
+ * implemented. This bit is RES0 in versions of the
+ * architecture earlier than ARMv8.5, setting it to 1
+ * doesn't have any effect on them.
+ * This bit is Reserved, UNK/SBZP in ARMv7.
+ *
+ * HDCR.HPME: Set to zero to disable EL2 Event
+ * counters.
*/
- write_hdcr(HDCR_RESET_VAL |
- ((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT));
-
+#if (ARM_ARCH_MAJOR > 7)
+ write_hdcr((HDCR_RESET_VAL | HDCR_HLP_BIT |
+ ((read_pmcr() & PMCR_N_BITS) >>
+ PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
+#else
+ write_hdcr((HDCR_RESET_VAL |
+ ((read_pmcr() & PMCR_N_BITS) >>
+ PMCR_N_SHIFT)) & ~HDCR_HPME_BIT);
+#endif
/*
* Set HSTR to its architectural reset value so that
* access to system registers in the cproc=1111
diff --git a/lib/extensions/pauth/pauth_helpers.S b/lib/extensions/pauth/pauth_helpers.S
index c6808de5..d483c7df 100644
--- a/lib/extensions/pauth/pauth_helpers.S
+++ b/lib/extensions/pauth/pauth_helpers.S
@@ -13,6 +13,7 @@
.global pauth_init_enable_el3
.global pauth_disable_el3
.globl pauth_load_bl31_apiakey
+ .globl pauth_load_bl1_apiakey_enable
/* -------------------------------------------------------------
* Program APIAKey_EL1 and enable pointer authentication in EL1
@@ -97,9 +98,9 @@ func pauth_disable_el3
endfunc pauth_disable_el3
/* -------------------------------------------------------------
- * The following function strictly follows the AArch64 PCS
+ * The following functions strictly follow the AArch64 PCS
* to use x9-x17 (temporary caller-saved registers) to load
- * the APIAKey_EL1 used by the firmware.
+ * the APIAKey_EL1 and enable pointer authentication.
* -------------------------------------------------------------
*/
func pauth_load_bl31_apiakey
@@ -115,3 +116,26 @@ func pauth_load_bl31_apiakey
isb
ret
endfunc pauth_load_bl31_apiakey
+
+func pauth_load_bl1_apiakey_enable
+ /* Load instruction key A used by the Trusted Firmware */
+ adrp x9, bl1_apiakey
+ add x9, x9, :lo12:bl1_apiakey
+ ldp x10, x11, [x9]
+
+ /* Program instruction key A */
+ msr APIAKeyLo_EL1, x10
+ msr APIAKeyHi_EL1, x11
+
+ /* Enable pointer authentication */
+ mrs x9, sctlr_el3
+ orr x9, x9, #SCTLR_EnIA_BIT
+
+#if ENABLE_BTI
+ /* Enable PAC branch type compatibility */
+ bic x9, x9, #SCTLR_BT_BIT
+#endif
+ msr sctlr_el3, x9
+ isb
+ ret
+endfunc pauth_load_bl1_apiakey_enable