summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile64
-rw-r--r--bl1/bl1.mk4
-rw-r--r--bl2/aarch64/bl2_el3_entrypoint.S8
-rw-r--r--bl2/aarch64/bl2_entrypoint.S7
-rw-r--r--bl2/bl2.mk4
-rw-r--r--bl31/aarch64/bl31_entrypoint.S14
-rw-r--r--bl31/bl31.mk4
-rw-r--r--bl32/tsp/aarch64/tsp_entrypoint.S10
-rw-r--r--bl32/tsp/tsp.mk5
-rw-r--r--docs/design/firmware-design.rst8
-rw-r--r--docs/getting_started/user-guide.rst52
-rw-r--r--include/arch/aarch64/arch.h12
-rw-r--r--include/arch/aarch64/arch_features.h6
-rw-r--r--include/arch/aarch64/asm_macros.S28
-rw-r--r--include/common/asm_macros_common.S8
-rw-r--r--include/lib/xlat_tables/xlat_tables_defs.h7
-rw-r--r--lib/aarch64/cache_helpers.S8
-rw-r--r--lib/cpus/aarch64/cpuamu_helpers.S46
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S244
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c15
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c9
-rw-r--r--make_helpers/defaults.mk12
22 files changed, 326 insertions, 249 deletions
diff --git a/Makefile b/Makefile
index 976f514d..1078ef53 100644
--- a/Makefile
+++ b/Makefile
@@ -117,6 +117,29 @@ ifneq (${GENERATE_COT},0)
FWU_FIP_DEPS += fwu_certificates
endif
+# Process BRANCH_PROTECTION value and set
+# Pointer Authentication and Branch Target Identification flags
+ifeq (${BRANCH_PROTECTION},0)
+ # Default value turns off all types of branch protection
+ BP_OPTION := none
+else ifneq (${ARCH},aarch64)
+ $(error BRANCH_PROTECTION requires AArch64)
+else ifeq (${BRANCH_PROTECTION},1)
+ # Enables all types of branch protection features
+ BP_OPTION := standard
+ ENABLE_BTI := 1
+ ENABLE_PAUTH := 1
+else ifeq (${BRANCH_PROTECTION},2)
+ # Return address signing to its standard level
+ BP_OPTION := pac-ret
+ ENABLE_PAUTH := 1
+else ifeq (${BRANCH_PROTECTION},3)
+ # Extend the signing to include leaf functions
+ BP_OPTION := pac-ret+leaf
+ ENABLE_PAUTH := 1
+else
+ $(error Unknown BRANCH_PROTECTION value ${BRANCH_PROTECTION})
+endif
################################################################################
# Toolchain
@@ -189,6 +212,10 @@ endif
TF_CFLAGS_aarch32 += -mno-unaligned-access
TF_CFLAGS_aarch64 += -mgeneral-regs-only -mstrict-align
+ifneq (${BP_OPTION},none)
+TF_CFLAGS_aarch64 += -mbranch-protection=${BP_OPTION}
+endif
+
ASFLAGS_aarch32 = $(march32-directive)
ASFLAGS_aarch64 = $(march64-directive)
@@ -451,26 +478,30 @@ ifeq ($(DYN_DISABLE_AUTH), 1)
endif
# If pointer authentication is used in the firmware, make sure that all the
-# registers associated to it are also saved and restored. Not doing it would
-# leak the value of the key used by EL3 to EL1 and S-EL1.
+# registers associated to it are also saved and restored.
+# Not doing it would leak the value of the keys used by EL3 to EL1 and S-EL1.
ifeq ($(ENABLE_PAUTH),1)
- ifneq ($(ARCH),aarch64)
- $(error ENABLE_PAUTH=1 requires AArch64)
- else ifeq ($(CTX_INCLUDE_PAUTH_REGS),0)
- $(error ENABLE_PAUTH=1 requires CTX_INCLUDE_PAUTH_REGS=1)
- else
- $(info ENABLE_PAUTH and CTX_INCLUDE_PAUTH_REGS are experimental features)
+ ifeq ($(CTX_INCLUDE_PAUTH_REGS),0)
+ $(error Pointer Authentication requires CTX_INCLUDE_PAUTH_REGS=1)
endif
-else
- ifeq ($(CTX_INCLUDE_PAUTH_REGS),1)
- ifneq ($(ARCH),aarch64)
- $(error CTX_INCLUDE_PAUTH_REGS=1 requires AArch64)
- else
- $(info CTX_INCLUDE_PAUTH_REGS is an experimental feature)
- endif
+endif
+
+ifeq ($(CTX_INCLUDE_PAUTH_REGS),1)
+ ifneq (${ARCH},aarch64)
+ $(error CTX_INCLUDE_PAUTH_REGS requires AArch64)
+ else
+ $(info CTX_INCLUDE_PAUTH_REGS is an experimental feature)
endif
endif
+ifeq ($(ENABLE_PAUTH),1)
+ $(info Pointer Authentication is an experimental feature)
+endif
+
+ifeq ($(ENABLE_BTI),1)
+ $(info Branch Protection is an experimental feature)
+endif
+
################################################################################
# Process platform overrideable behaviour
################################################################################
@@ -599,7 +630,6 @@ $(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING))
$(eval $(call assert_boolean,ENABLE_AMU))
$(eval $(call assert_boolean,ENABLE_ASSERTIONS))
$(eval $(call assert_boolean,ENABLE_MPAM_FOR_LOWER_ELS))
-$(eval $(call assert_boolean,ENABLE_PAUTH))
$(eval $(call assert_boolean,ENABLE_PIE))
$(eval $(call assert_boolean,ENABLE_PMF))
$(eval $(call assert_boolean,ENABLE_PSCI_STAT))
@@ -635,6 +665,7 @@ $(eval $(call assert_boolean,BL2_IN_XIP_MEM))
$(eval $(call assert_numeric,ARM_ARCH_MAJOR))
$(eval $(call assert_numeric,ARM_ARCH_MINOR))
+$(eval $(call assert_numeric,BRANCH_PROTECTION))
################################################################################
# Add definitions to the cpp preprocessor based on the current build options.
@@ -651,6 +682,7 @@ $(eval $(call add_define,CTX_INCLUDE_PAUTH_REGS))
$(eval $(call add_define,EL3_EXCEPTION_HANDLING))
$(eval $(call add_define,ENABLE_AMU))
$(eval $(call add_define,ENABLE_ASSERTIONS))
+$(eval $(call add_define,ENABLE_BTI))
$(eval $(call add_define,ENABLE_MPAM_FOR_LOWER_ELS))
$(eval $(call add_define,ENABLE_PAUTH))
$(eval $(call add_define,ENABLE_PIE))
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index 7f1a8230..b8399907 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -21,10 +21,6 @@ BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \
lib/el3_runtime/aarch64/context.S
endif
-ifeq (${ENABLE_PAUTH},1)
-BL1_CFLAGS += -msign-return-address=non-leaf
-endif
-
ifeq (${TRUSTED_BOARD_BOOT},1)
BL1_SOURCES += bl1/bl1_fwu.c
endif
diff --git a/bl2/aarch64/bl2_el3_entrypoint.S b/bl2/aarch64/bl2_el3_entrypoint.S
index d1e42471..261d2957 100644
--- a/bl2/aarch64/bl2_el3_entrypoint.S
+++ b/bl2/aarch64/bl2_el3_entrypoint.S
@@ -10,7 +10,6 @@
#include <el3_common_macros.S>
.globl bl2_entrypoint
- .globl bl2_vector_table
.globl bl2_el3_run_image
.globl bl2_run_next_image
@@ -51,6 +50,13 @@ func bl2_entrypoint
#if ENABLE_PAUTH
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_EnIA_BIT
+#if ENABLE_BTI
+ /* ---------------------------------------------
+ * Enable PAC branch type compatibility
+ * ---------------------------------------------
+ */
+ bic x0, x0, #SCTLR_BT_BIT
+#endif /* ENABLE_BTI */
msr sctlr_el3, x0
isb
#endif /* ENABLE_PAUTH */
diff --git a/bl2/aarch64/bl2_entrypoint.S b/bl2/aarch64/bl2_entrypoint.S
index c820cd13..5e5b83b1 100644
--- a/bl2/aarch64/bl2_entrypoint.S
+++ b/bl2/aarch64/bl2_entrypoint.S
@@ -124,6 +124,13 @@ func bl2_entrypoint
#if ENABLE_PAUTH
mrs x0, sctlr_el1
orr x0, x0, #SCTLR_EnIA_BIT
+#if ENABLE_BTI
+ /* ---------------------------------------------
+ * Enable PAC branch type compatibility
+ * ---------------------------------------------
+ */
+ bic x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT)
+#endif /* ENABLE_BTI */
msr sctlr_el1, x0
isb
#endif /* ENABLE_PAUTH */
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 9523918b..6dc0f182 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -15,10 +15,6 @@ ifeq (${ARCH},aarch64)
BL2_SOURCES += common/aarch64/early_exceptions.S
endif
-ifeq (${ENABLE_PAUTH},1)
-BL2_CFLAGS += -msign-return-address=non-leaf
-endif
-
ifeq (${BL2_AT_EL3},0)
BL2_SOURCES += bl2/${ARCH}/bl2_entrypoint.S
BL2_LINKERFILE := bl2/bl2.ld.S
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index f24458c1..e7ad5a89 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -105,6 +105,13 @@ func bl31_entrypoint
#if ENABLE_PAUTH
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_EnIA_BIT
+#if ENABLE_BTI
+ /* --------------------------------------------------------------------
+ * Enable PAC branch type compatibility
+ * --------------------------------------------------------------------
+ */
+ bic x0, x0, #SCTLR_BT_BIT
+#endif /* ENABLE_BTI */
msr sctlr_el3, x0
isb
#endif /* ENABLE_PAUTH */
@@ -211,6 +218,13 @@ func bl31_warm_entrypoint
mrs x0, sctlr_el3
orr x0, x0, #SCTLR_EnIA_BIT
+#if ENABLE_BTI
+ /* --------------------------------------------------------------------
+ * Enable PAC branch type compatibility
+ * --------------------------------------------------------------------
+ */
+ bic x0, x0, #SCTLR_BT_BIT
+#endif /* ENABLE_BTI */
msr sctlr_el3, x0
isb
#endif /* ENABLE_PAUTH */
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 10feae16..c9ba926c 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -75,10 +75,6 @@ ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1)
BL31_SOURCES += lib/extensions/mpam/mpam.c
endif
-ifeq (${ENABLE_PAUTH},1)
-BL31_CFLAGS += -msign-return-address=non-leaf
-endif
-
ifeq (${WORKAROUND_CVE_2017_5715},1)
BL31_SOURCES += lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S \
lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index cd08ce7d..fd6b0fbc 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -136,6 +136,13 @@ func tsp_entrypoint _align=3
#if ENABLE_PAUTH
mrs x0, sctlr_el1
orr x0, x0, #SCTLR_EnIA_BIT
+#if ENABLE_BTI
+ /* ---------------------------------------------
+ * Enable PAC branch type compatibility
+ * ---------------------------------------------
+ */
+ bic x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT)
+#endif /* ENABLE_BTI */
msr sctlr_el1, x0
isb
#endif /* ENABLE_PAUTH */
@@ -164,7 +171,7 @@ endfunc tsp_entrypoint
* TSPD for the various entrypoints
* -------------------------------------------
*/
-func tsp_vector_table
+vector_base tsp_vector_table
b tsp_yield_smc_entry
b tsp_fast_smc_entry
b tsp_cpu_on_entry
@@ -175,7 +182,6 @@ func tsp_vector_table
b tsp_system_off_entry
b tsp_system_reset_entry
b tsp_abort_yield_smc_entry
-endfunc tsp_vector_table
/*---------------------------------------------
* This entrypoint is used by the TSPD when this
diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk
index b1fe7ff6..3fd6d995 100644
--- a/bl32/tsp/tsp.mk
+++ b/bl32/tsp/tsp.mk
@@ -17,11 +17,6 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \
BL32_LINKERFILE := bl32/tsp/tsp.ld.S
-# This flag determines whether pointer authentication is used in the TSP or not
-ifeq ($(ENABLE_PAUTH),1)
-BL32_CFLAGS += -msign-return-address=non-leaf
-endif
-
# This flag determines if the TSPD initializes BL32 in tspd_init() (synchronous
# method) or configures BL31 to pass control to BL32 instead of BL33
# (asynchronous method).
diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst
index 21b82346..e4e2bc1d 100644
--- a/docs/design/firmware-design.rst
+++ b/docs/design/firmware-design.rst
@@ -2564,7 +2564,7 @@ Armv8.3-A
to the context that is saved when doing a world switch.
The TF-A itself has support for pointer authentication at runtime
- that can be enabled by setting both options ``ENABLE_PAUTH`` and
+ that can be enabled by setting ``BRANCH_PROTECTION`` option to non-zero and
``CTX_INCLUDE_PAUTH_REGS`` to 1. This enables pointer authentication in BL1,
BL2, BL31, and the TSP if it is used.
@@ -2577,6 +2577,12 @@ Armv8.3-A
enabling PAuth is lower because the compiler will use the optimized
PAuth instructions rather than the backwards-compatible ones.
+Armv8.5-A
+~~~~~~~~~
+
+- Branch Target Identification feature is selected by ``BRANCH_PROTECTION``
+ option set to 1. This option defaults to 0 and this is an experimental feature.
+
Armv7-A
~~~~~~~
diff --git a/docs/getting_started/user-guide.rst b/docs/getting_started/user-guide.rst
index 60654644..db365488 100644
--- a/docs/getting_started/user-guide.rst
+++ b/docs/getting_started/user-guide.rst
@@ -315,6 +315,34 @@ Common build options
file that contains the BL33 private key in PEM format. If ``SAVE_KEYS=1``,
this file name will be used to save the key.
+- ``BRANCH_PROTECTION``: Numeric value to enable ARMv8.3 Pointer Authentication
+ and ARMv8.5 Branch Target Identification support for TF-A BL images themselves.
+ If enabled, it is needed to use a compiler that supports the option
+ ``-mbranch-protection``. Selects the branch protection features to use:
+- 0: Default value turns off all types of branch protection
+- 1: Enables all types of branch protection features
+- 2: Return address signing to its standard level
+- 3: Extend the signing to include leaf functions
+
+ The table below summarizes ``BRANCH_PROTECTION`` values, GCC compilation options
+ and resulting PAuth/BTI features.
+
+ +-------+--------------+-------+-----+
+ | Value | GCC option | PAuth | BTI |
+ +=======+==============+=======+=====+
+ | 0 | none | N | N |
+ +-------+--------------+-------+-----+
+ | 1 | standard | Y | Y |
+ +-------+--------------+-------+-----+
+ | 2 | pac-ret | Y | N |
+ +-------+--------------+-------+-----+
+ | 3 | pac-ret+leaf | Y | N |
+ +-------+--------------+-------+-----+
+
+ This option defaults to 0 and this is an experimental feature.
+ Note that Pointer Authentication is enabled for Non-secure world
+ irrespective of the value of this option if the CPU supports it.
+
- ``BUILD_MESSAGE_TIMESTAMP``: String used to identify the time and date of the
compilation of each build. It must be set to a C string (including quotes
where applicable). Defaults to a string that contains the time and date of
@@ -354,17 +382,12 @@ Common build options
registers to be included when saving and restoring the CPU context. Default
is 0.
-- ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, allows
- Pointer Authentication for **Secure world**. This will cause the
- Armv8.3-PAuth registers to be included when saving and restoring the CPU
- context as part of a world switch. Default value is 0. Pointer Authentication
- is an experimental feature.
-
- Note that, if the CPU supports it, Pointer Authentication is allowed for
- Non-secure world irrespectively of the value of this flag. "Allowed" means
- that accesses to PAuth-related registers or execution of PAuth-related
- instructions will not be trapped to EL3. As such, usage or not of PAuth in
- Non-secure world images, depends on those images themselves.
+- ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, enables
+ Pointer Authentication for Secure world. This will cause the ARMv8.3-PAuth
+ registers to be included when saving and restoring the CPU context as
+ part of world switch. Default value is 0 and this is an experimental feature.
+ Note that Pointer Authentication is enabled for Non-secure world irrespective
+ of the value of this flag if the CPU supports it.
- ``DEBUG``: Chooses between a debug and release build. It can take either 0
(release) or 1 (debug) as values. 0 is the default.
@@ -417,13 +440,6 @@ Common build options
partitioning in EL3, however. Platform initialisation code should configure
and use partitions in EL3 as required. This option defaults to ``0``.
-- ``ENABLE_PAUTH``: Boolean option to enable Armv8.3 Pointer Authentication
- for **TF-A BL images themselves**. If enabled, the compiler must support the
- ``-msign-return-address`` option. This flag defaults to 0. Pointer
- Authentication is an experimental feature.
-
- If this flag is enabled, ``CTX_INCLUDE_PAUTH_REGS`` must also be enabled.
-
- ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
support within generic code in TF-A. This option is currently only supported
in BL31. Default is 0.
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index d15851d8..d23d89e3 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -163,16 +163,12 @@
/* ID_AA64ISAR1_EL1 definitions */
#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
#define ID_AA64ISAR1_GPI_SHIFT U(28)
-#define ID_AA64ISAR1_GPI_WIDTH U(4)
#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
#define ID_AA64ISAR1_GPA_SHIFT U(24)
-#define ID_AA64ISAR1_GPA_WIDTH U(4)
#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
#define ID_AA64ISAR1_API_SHIFT U(8)
-#define ID_AA64ISAR1_API_WIDTH U(4)
#define ID_AA64ISAR1_API_MASK ULL(0xf)
#define ID_AA64ISAR1_APA_SHIFT U(4)
-#define ID_AA64ISAR1_APA_WIDTH U(4)
#define ID_AA64ISAR1_APA_MASK ULL(0xf)
/* ID_AA64MMFR0_EL1 definitions */
@@ -217,6 +213,11 @@
#define SSBS_UNAVAILABLE ULL(0) /* No architectural SSBS support */
+#define ID_AA64PFR1_EL1_BT_SHIFT U(0)
+#define ID_AA64PFR1_EL1_BT_MASK ULL(0xf)
+
+#define BTI_IMPLEMENTED ULL(1) /* The BTI mechanism is implemented */
+
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf)
@@ -260,6 +261,9 @@
#define SCTLR_EE_BIT (ULL(1) << 25)
#define SCTLR_UCI_BIT (ULL(1) << 26)
#define SCTLR_EnIA_BIT (ULL(1) << 31)
+#define SCTLR_BT0_BIT (ULL(1) << 35)
+#define SCTLR_BT1_BIT (ULL(1) << 36)
+#define SCTLR_BT_BIT (ULL(1) << 36)
#define SCTLR_DSSBS_BIT (ULL(1) << 44)
#define SCTLR_RESET_VAL SCTLR_EL3_RES1
diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h
index 6af1d039..1129b8e4 100644
--- a/include/arch/aarch64/arch_features.h
+++ b/include/arch/aarch64/arch_features.h
@@ -48,4 +48,10 @@ static inline bool is_armv8_4_ttst_present(void)
ID_AA64MMFR2_EL1_ST_MASK) == 1U;
}
+static inline bool is_armv8_5_bti_present(void)
+{
+ return ((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_BT_SHIFT) &
+ ID_AA64PFR1_EL1_BT_MASK) == BTI_IMPLEMENTED;
+}
+
#endif /* ARCH_FEATURES_H */
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
index 9b121855..79e0ad7f 100644
--- a/include/arch/aarch64/asm_macros.S
+++ b/include/arch/aarch64/asm_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,10 @@
#include <common/asm_macros_common.S>
#include <lib/spinlock.h>
+#if ENABLE_BTI && !ARM_ARCH_AT_LEAST(8, 5)
+#error Branch Target Identification requires ARM_ARCH_MINOR >= 5
+#endif
+
/*
* TLBI instruction with type specifier that implements the workaround for
* errata 813419 of Cortex-A57 or errata 1286807 of Cortex-A76.
@@ -192,4 +196,26 @@
.endm
#endif
+ /*
+ * Helper macro to read system register value into x0
+ */
+ .macro read reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ mrs x0, \reg
+ ret
+ .endm
+
+ /*
+ * Helper macro to write value from x1 to system register
+ */
+ .macro write reg:req
+#if ENABLE_BTI
+ bti j
+#endif
+ msr \reg, x1
+ ret
+ .endm
+
#endif /* ASM_MACROS_S */
diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S
index 09742af1..fd0ea81d 100644
--- a/include/common/asm_macros_common.S
+++ b/include/common/asm_macros_common.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -38,6 +38,12 @@
.cfi_startproc
.align \_align
\_name:
+#if ENABLE_BTI
+ /* When Branch Target Identification is enabled, insert "bti jc"
+ * instruction to enable indirect calls and branches
+ */
+ bti jc
+#endif
.endm
/*
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index f9bbe0f6..000811f3 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -62,6 +62,11 @@
#define OSH (U(0x2) << 6)
#define ISH (U(0x3) << 6)
+#ifdef AARCH64
+/* Guarded Page bit */
+#define GP (ULL(1) << 50)
+#endif
+
#define TABLE_ADDR_MASK ULL(0x0000FFFFFFFFF000)
/*
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 9c40b9db..9ef8ca79 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -91,6 +91,9 @@ func do_dcsw_op
cbz x3, exit
adr x14, dcsw_loop_table // compute inner loop address
add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+#if ENABLE_BTI
+ add x14, x14, x0, lsl #2 // inner loop is + "bti j" instruction
+#endif
mov x0, x9
mov w8, #1
loop1:
@@ -116,6 +119,9 @@ loop1:
br x14 // jump to DC operation specific loop
.macro dcsw_loop _op
+#if ENABLE_BTI
+ bti j
+#endif
loop2_\_op:
lsl w7, w6, w2 // w7 = aligned max set number
diff --git a/lib/cpus/aarch64/cpuamu_helpers.S b/lib/cpus/aarch64/cpuamu_helpers.S
index 79b72883..5a77fc7b 100644
--- a/lib/cpus/aarch64/cpuamu_helpers.S
+++ b/lib/cpus/aarch64/cpuamu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -23,21 +23,17 @@
*/
func cpuamu_cnt_read
adr x1, 1f
- lsl x0, x0, #3
- add x1, x1, x0
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x1
-1:
- mrs x0, CPUAMEVCNTR0_EL0
- ret
- mrs x0, CPUAMEVCNTR1_EL0
- ret
- mrs x0, CPUAMEVCNTR2_EL0
- ret
- mrs x0, CPUAMEVCNTR3_EL0
- ret
- mrs x0, CPUAMEVCNTR4_EL0
- ret
+1: read CPUAMEVCNTR0_EL0
+ read CPUAMEVCNTR1_EL0
+ read CPUAMEVCNTR2_EL0
+ read CPUAMEVCNTR3_EL0
+ read CPUAMEVCNTR4_EL0
endfunc cpuamu_cnt_read
/*
@@ -47,21 +43,17 @@ endfunc cpuamu_cnt_read
*/
func cpuamu_cnt_write
adr x2, 1f
- lsl x0, x0, #3
- add x2, x2, x0
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x2
-1:
- msr CPUAMEVCNTR0_EL0, x0
- ret
- msr CPUAMEVCNTR1_EL0, x0
- ret
- msr CPUAMEVCNTR2_EL0, x0
- ret
- msr CPUAMEVCNTR3_EL0, x0
- ret
- msr CPUAMEVCNTR4_EL0, x0
- ret
+1: write CPUAMEVCNTR0_EL0
+ write CPUAMEVCNTR1_EL0
+ write CPUAMEVCNTR2_EL0
+ write CPUAMEVCNTR3_EL0
+ write CPUAMEVCNTR4_EL0
endfunc cpuamu_cnt_write
/*
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
index e0b1f564..89007a3f 100644
--- a/lib/extensions/amu/aarch64/amu_helpers.S
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,35 +21,29 @@
* and return it in `x0`.
*/
func amu_group0_cnt_read_internal
+ adr x1, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
- mov x1, x0
- lsr x1, x1, #2
- cmp x1, #0
+ tst x0, #~3
ASM_ASSERT(eq)
#endif
-
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
- adr x1, 1f
- lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
- add x1, x1, x0
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x1
-1:
- mrs x0, AMEVCNTR00_EL0 /* index 0 */
- ret
- mrs x0, AMEVCNTR01_EL0 /* index 1 */
- ret
- mrs x0, AMEVCNTR02_EL0 /* index 2 */
- ret
- mrs x0, AMEVCNTR03_EL0 /* index 3 */
- ret
+1: read AMEVCNTR00_EL0 /* index 0 */
+ read AMEVCNTR01_EL0 /* index 1 */
+ read AMEVCNTR02_EL0 /* index 2 */
+ read AMEVCNTR03_EL0 /* index 3 */
endfunc amu_group0_cnt_read_internal
/*
@@ -58,35 +52,29 @@ endfunc amu_group0_cnt_read_internal
* Given `idx`, write `val` to the corresponding AMU counter.
*/
func amu_group0_cnt_write_internal
+ adr x2, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
- mov x2, x0
- lsr x2, x2, #2
- cmp x2, #0
+ tst x0, #~3
ASM_ASSERT(eq)
#endif
-
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
- adr x2, 1f
- lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
- add x2, x2, x0
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x2
-1:
- msr AMEVCNTR00_EL0, x1 /* index 0 */
- ret
- msr AMEVCNTR01_EL0, x1 /* index 1 */
- ret
- msr AMEVCNTR02_EL0, x1 /* index 2 */
- ret
- msr AMEVCNTR03_EL0, x1 /* index 3 */
- ret
+1: write AMEVCNTR00_EL0 /* index 0 */
+ write AMEVCNTR01_EL0 /* index 1 */
+ write AMEVCNTR02_EL0 /* index 2 */
+ write AMEVCNTR03_EL0 /* index 3 */
endfunc amu_group0_cnt_write_internal
/*
@@ -96,59 +84,41 @@ endfunc amu_group0_cnt_write_internal
* and return it in `x0`.
*/
func amu_group1_cnt_read_internal
+ adr x1, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
- mov x1, x0
- lsr x1, x1, #4
- cmp x1, #0
+ tst x0, #~0xF
ASM_ASSERT(eq)
#endif
-
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
- adr x1, 1f
- lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
- add x1, x1, x0
+ add x1, x1, x0, lsl #3 /* each mrs/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x1, x1, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x1
-1:
- mrs x0, AMEVCNTR10_EL0 /* index 0 */
- ret
- mrs x0, AMEVCNTR11_EL0 /* index 1 */
- ret
- mrs x0, AMEVCNTR12_EL0 /* index 2 */
- ret
- mrs x0, AMEVCNTR13_EL0 /* index 3 */
- ret
- mrs x0, AMEVCNTR14_EL0 /* index 4 */
- ret
- mrs x0, AMEVCNTR15_EL0 /* index 5 */
- ret
- mrs x0, AMEVCNTR16_EL0 /* index 6 */
- ret
- mrs x0, AMEVCNTR17_EL0 /* index 7 */
- ret
- mrs x0, AMEVCNTR18_EL0 /* index 8 */
- ret
- mrs x0, AMEVCNTR19_EL0 /* index 9 */
- ret
- mrs x0, AMEVCNTR1A_EL0 /* index 10 */
- ret
- mrs x0, AMEVCNTR1B_EL0 /* index 11 */
- ret
- mrs x0, AMEVCNTR1C_EL0 /* index 12 */
- ret
- mrs x0, AMEVCNTR1D_EL0 /* index 13 */
- ret
- mrs x0, AMEVCNTR1E_EL0 /* index 14 */
- ret
- mrs x0, AMEVCNTR1F_EL0 /* index 15 */
- ret
+1: read AMEVCNTR10_EL0 /* index 0 */
+ read AMEVCNTR11_EL0 /* index 1 */
+ read AMEVCNTR12_EL0 /* index 2 */
+ read AMEVCNTR13_EL0 /* index 3 */
+ read AMEVCNTR14_EL0 /* index 4 */
+ read AMEVCNTR15_EL0 /* index 5 */
+ read AMEVCNTR16_EL0 /* index 6 */
+ read AMEVCNTR17_EL0 /* index 7 */
+ read AMEVCNTR18_EL0 /* index 8 */
+ read AMEVCNTR19_EL0 /* index 9 */
+ read AMEVCNTR1A_EL0 /* index 10 */
+ read AMEVCNTR1B_EL0 /* index 11 */
+ read AMEVCNTR1C_EL0 /* index 12 */
+ read AMEVCNTR1D_EL0 /* index 13 */
+ read AMEVCNTR1E_EL0 /* index 14 */
+ read AMEVCNTR1F_EL0 /* index 15 */
endfunc amu_group1_cnt_read_internal
/*
@@ -157,59 +127,41 @@ endfunc amu_group1_cnt_read_internal
* Given `idx`, write `val` to the corresponding AMU counter.
*/
func amu_group1_cnt_write_internal
+ adr x2, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
- mov x2, x0
- lsr x2, x2, #4
- cmp x2, #0
+ tst x0, #~0xF
ASM_ASSERT(eq)
#endif
-
/*
* Given `idx` calculate address of mrs/ret instruction pair
* in the table below.
*/
- adr x2, 1f
- lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
- add x2, x2, x0
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x2
-1:
- msr AMEVCNTR10_EL0, x1 /* index 0 */
- ret
- msr AMEVCNTR11_EL0, x1 /* index 1 */
- ret
- msr AMEVCNTR12_EL0, x1 /* index 2 */
- ret
- msr AMEVCNTR13_EL0, x1 /* index 3 */
- ret
- msr AMEVCNTR14_EL0, x1 /* index 4 */
- ret
- msr AMEVCNTR15_EL0, x1 /* index 5 */
- ret
- msr AMEVCNTR16_EL0, x1 /* index 6 */
- ret
- msr AMEVCNTR17_EL0, x1 /* index 7 */
- ret
- msr AMEVCNTR18_EL0, x1 /* index 8 */
- ret
- msr AMEVCNTR19_EL0, x1 /* index 9 */
- ret
- msr AMEVCNTR1A_EL0, x1 /* index 10 */
- ret
- msr AMEVCNTR1B_EL0, x1 /* index 11 */
- ret
- msr AMEVCNTR1C_EL0, x1 /* index 12 */
- ret
- msr AMEVCNTR1D_EL0, x1 /* index 13 */
- ret
- msr AMEVCNTR1E_EL0, x1 /* index 14 */
- ret
- msr AMEVCNTR1F_EL0, x1 /* index 15 */
- ret
+1: write AMEVCNTR10_EL0 /* index 0 */
+ write AMEVCNTR11_EL0 /* index 1 */
+ write AMEVCNTR12_EL0 /* index 2 */
+ write AMEVCNTR13_EL0 /* index 3 */
+ write AMEVCNTR14_EL0 /* index 4 */
+ write AMEVCNTR15_EL0 /* index 5 */
+ write AMEVCNTR16_EL0 /* index 6 */
+ write AMEVCNTR17_EL0 /* index 7 */
+ write AMEVCNTR18_EL0 /* index 8 */
+ write AMEVCNTR19_EL0 /* index 9 */
+ write AMEVCNTR1A_EL0 /* index 10 */
+ write AMEVCNTR1B_EL0 /* index 11 */
+ write AMEVCNTR1C_EL0 /* index 12 */
+ write AMEVCNTR1D_EL0 /* index 13 */
+ write AMEVCNTR1E_EL0 /* index 14 */
+ write AMEVCNTR1F_EL0 /* index 15 */
endfunc amu_group1_cnt_write_internal
/*
@@ -219,63 +171,43 @@ endfunc amu_group1_cnt_write_internal
* with the value `val`.
*/
func amu_group1_set_evtype_internal
+ adr x2, 1f
#if ENABLE_ASSERTIONS
/*
* It can be dangerous to call this function with an
* out of bounds index. Ensure `idx` is valid.
*/
- mov x2, x0
- lsr x2, x2, #4
- cmp x2, #0
+ tst x0, #~0xF
ASM_ASSERT(eq)
/* val should be between [0, 65535] */
- mov x2, x1
- lsr x2, x2, #16
- cmp x2, #0
+ tst x1, #~0xFFFF
ASM_ASSERT(eq)
#endif
-
/*
* Given `idx` calculate address of msr/ret instruction pair
* in the table below.
*/
- adr x2, 1f
- lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
- add x2, x2, x0
+ add x2, x2, x0, lsl #3 /* each msr/ret sequence is 8 bytes */
+#if ENABLE_BTI
+ add x2, x2, x0, lsl #2 /* + "bti j" instruction */
+#endif
br x2
-1:
- msr AMEVTYPER10_EL0, x1 /* index 0 */
- ret
- msr AMEVTYPER11_EL0, x1 /* index 1 */
- ret
- msr AMEVTYPER12_EL0, x1 /* index 2 */
- ret
- msr AMEVTYPER13_EL0, x1 /* index 3 */
- ret
- msr AMEVTYPER14_EL0, x1 /* index 4 */
- ret
- msr AMEVTYPER15_EL0, x1 /* index 5 */
- ret
- msr AMEVTYPER16_EL0, x1 /* index 6 */
- ret
- msr AMEVTYPER17_EL0, x1 /* index 7 */
- ret
- msr AMEVTYPER18_EL0, x1 /* index 8 */
- ret
- msr AMEVTYPER19_EL0, x1 /* index 9 */
- ret
- msr AMEVTYPER1A_EL0, x1 /* index 10 */
- ret
- msr AMEVTYPER1B_EL0, x1 /* index 11 */
- ret
- msr AMEVTYPER1C_EL0, x1 /* index 12 */
- ret
- msr AMEVTYPER1D_EL0, x1 /* index 13 */
- ret
- msr AMEVTYPER1E_EL0, x1 /* index 14 */
- ret
- msr AMEVTYPER1F_EL0, x1 /* index 15 */
- ret
+1: write AMEVTYPER10_EL0 /* index 0 */
+ write AMEVTYPER11_EL0 /* index 1 */
+ write AMEVTYPER12_EL0 /* index 2 */
+ write AMEVTYPER13_EL0 /* index 3 */
+ write AMEVTYPER14_EL0 /* index 4 */
+ write AMEVTYPER15_EL0 /* index 5 */
+ write AMEVTYPER16_EL0 /* index 6 */
+ write AMEVTYPER17_EL0 /* index 7 */
+ write AMEVTYPER18_EL0 /* index 8 */
+ write AMEVTYPER19_EL0 /* index 9 */
+ write AMEVTYPER1A_EL0 /* index 10 */
+ write AMEVTYPER1B_EL0 /* index 11 */
+ write AMEVTYPER1C_EL0 /* index 12 */
+ write AMEVTYPER1D_EL0 /* index 13 */
+ write AMEVTYPER1E_EL0 /* index 14 */
+ write AMEVTYPER1F_EL0 /* index 15 */
endfunc amu_group1_set_evtype_internal
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 0e6a6fa8..4f62f469 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,6 +12,7 @@
#include <platform_def.h>
+#include <arch_features.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <lib/utils_def.h>
@@ -195,6 +196,18 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
if (mem_type == MT_MEMORY) {
desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
+
+ /* Check if Branch Target Identification is enabled */
+#if ENABLE_BTI
+ /* Set GP bit for block and page code entries
+ * if BTI mechanism is implemented.
+ */
+ if (is_armv8_5_bti_present() &&
+ ((attr & (MT_TYPE_MASK | MT_RW |
+ MT_EXECUTE_NEVER)) == MT_CODE)) {
+ desc |= GP;
+ }
+#endif
} else {
assert(mem_type == MT_NON_CACHEABLE);
desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index f5848a25..761d00c3 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -96,6 +96,13 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
}
printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
+
+#ifdef AARCH64
+ /* Check Guarded Page bit */
+ if ((desc & GP) != 0ULL) {
+ printf("-GP");
+ }
+#endif
}
static const char * const level_spacers[] = {
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index dc797ed1..6becf808 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -33,6 +33,9 @@ BL2_AT_EL3 := 0
# when BL2_AT_EL3 is 1.
BL2_IN_XIP_MEM := 0
+# Select the branch protection features to use.
+BRANCH_PROTECTION := 0
+
# By default, consider that the platform may release several CPUs out of reset.
# The platform Makefile is free to override this value.
COLD_BOOT_SINGLE_CPU := 0
@@ -90,7 +93,14 @@ ENABLE_STACK_PROTECTOR := 0
# Flag to enable exception handling in EL3
EL3_EXCEPTION_HANDLING := 0
-# Flag to enable Pointer Authentication
+# Flag to enable Branch Target Identification.
+# Internal flag not meant for direct setting.
+# Use BRANCH_PROTECTION to enable BTI.
+ENABLE_BTI := 0
+
+# Flag to enable Pointer Authentication.
+# Internal flag not meant for direct setting.
+# Use BRANCH_PROTECTION to enable PAUTH.
ENABLE_PAUTH := 0
# Build flag to treat usage of deprecated platform and framework APIs as error.