summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2019-07-09 14:02:43 -0700
committerJulius Werner <jwerner@chromium.org>2019-08-01 13:45:03 -0700
commit402b3cf8766fe2cb4ae462f7ee7761d08a1ba56c (patch)
treebf3de0c17a38822188847b7bdaad7f70441637b0 /lib
parentd5dfdeb65ff5b7f24dded201d2945c7b74565ce8 (diff)
Switch AARCH32/AARCH64 to __aarch64__
NOTE: AARCH32/AARCH64 macros are now deprecated in favor of __aarch64__. All common C compilers pre-define the same macros to signal which architecture the code is being compiled for: __arm__ for AArch32 (or earlier versions) and __aarch64__ for AArch64. There's no need for TF-A to define its own custom macros for this. In order to unify code with the export headers (which use __aarch64__ to avoid another dependency), let's deprecate the AARCH32 and AARCH64 macros and switch the code base over to the pre-defined standard macro. (Since it is somewhat unintuitive that __arm__ only means AArch32, let's standardize on only using __aarch64__.) Change-Id: Ic77de4b052297d77f38fc95f95f65a8ee70cf200 Signed-off-by: Julius Werner <jwerner@chromium.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/errata_report.c4
-rw-r--r--lib/locks/bakery/bakery_lock_normal.c12
-rw-r--r--lib/optee/optee_utils.c4
-rw-r--r--lib/psci/psci_common.c96
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c34
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c2
6 files changed, 76 insertions, 76 deletions
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
index aeb35600..f43b2176 100644
--- a/lib/cpus/errata_report.c
+++ b/lib/cpus/errata_report.c
@@ -18,9 +18,9 @@
#ifdef IMAGE_BL1
# define BL_STRING "BL1"
-#elif defined(AARCH64) && defined(IMAGE_BL31)
+#elif defined(__aarch64__) && defined(IMAGE_BL31)
# define BL_STRING "BL31"
-#elif defined(AARCH32) && defined(IMAGE_BL32)
+#elif !defined(__arch64__) && defined(IMAGE_BL32)
# define BL_STRING "BL32"
#elif defined(IMAGE_BL2) && BL2_AT_EL3
# define BL_STRING "BL2"
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index cc13fc1b..f906f51e 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -167,10 +167,10 @@ void bakery_lock_get(bakery_lock_t *lock)
unsigned int their_bakery_data;
me = plat_my_core_pos();
-#ifdef AARCH32
- is_cached = read_sctlr() & SCTLR_C_BIT;
-#else
+#ifdef __aarch64__
is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#else
+ is_cached = read_sctlr() & SCTLR_C_BIT;
#endif
/* Get a ticket */
@@ -228,10 +228,10 @@ void bakery_lock_get(bakery_lock_t *lock)
void bakery_lock_release(bakery_lock_t *lock)
{
bakery_info_t *my_bakery_info;
-#ifdef AARCH32
- unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
-#else
+#ifdef __aarch64__
unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
+#else
+ unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
#endif
my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
diff --git a/lib/optee/optee_utils.c b/lib/optee/optee_utils.c
index f7392fda..2a407939 100644
--- a/lib/optee/optee_utils.c
+++ b/lib/optee/optee_utils.c
@@ -176,7 +176,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
*/
if (!tee_validate_header(header)) {
INFO("Invalid OPTEE header, set legacy mode.\n");
-#ifdef AARCH64
+#ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64;
#else
header_ep->args.arg0 = MODE_RW_32;
@@ -222,7 +222,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
if (header->arch == 0) {
header_ep->args.arg0 = MODE_RW_32;
} else {
-#ifdef AARCH64
+#ifdef __aarch64__
header_ep->args.arg0 = MODE_RW_64;
#else
ERROR("Cannot boot an AArch64 OP-TEE\n");
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 3f5e9893..5d24356c 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -619,53 +619,7 @@ int psci_validate_mpidr(u_register_t mpidr)
* This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
-#ifdef AARCH32
-static int psci_get_ns_ep_info(entry_point_info_t *ep,
- uintptr_t entrypoint,
- u_register_t context_id)
-{
- u_register_t ep_attr;
- unsigned int aif, ee, mode;
- u_register_t scr = read_scr();
- u_register_t ns_sctlr, sctlr;
-
- /* Switch to non secure state */
- write_scr(scr | SCR_NS_BIT);
- isb();
- ns_sctlr = read_sctlr();
-
- sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
-
- /* Return to original state */
- write_scr(scr);
- isb();
- ee = 0;
-
- ep_attr = NON_SECURE | EP_ST_DISABLE;
- if (sctlr & SCTLR_EE_BIT) {
- ep_attr |= EP_EE_BIG;
- ee = 1;
- }
- SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
-
- ep->pc = entrypoint;
- zeromem(&ep->args, sizeof(ep->args));
- ep->args.arg0 = context_id;
-
- mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
-
- /*
- * TODO: Choose async. exception bits if HYP mode is not
- * implemented according to the values of SCR.{AW, FW} bits
- */
- aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
-
- ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
-
- return PSCI_E_SUCCESS;
-}
-
-#else
+#ifdef __aarch64__
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
@@ -722,7 +676,53 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
return PSCI_E_SUCCESS;
}
-#endif
+#else /* !__aarch64__ */
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ u_register_t ep_attr;
+ unsigned int aif, ee, mode;
+ u_register_t scr = read_scr();
+ u_register_t ns_sctlr, sctlr;
+
+ /* Switch to non secure state */
+ write_scr(scr | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_sctlr();
+
+ sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
+
+ /* Return to original state */
+ write_scr(scr);
+ isb();
+ ee = 0;
+
+ ep_attr = NON_SECURE | EP_ST_DISABLE;
+ if (sctlr & SCTLR_EE_BIT) {
+ ep_attr |= EP_EE_BIG;
+ ee = 1;
+ }
+ SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+ ep->pc = entrypoint;
+ zeromem(&ep->args, sizeof(ep->args));
+ ep->args.arg0 = context_id;
+
+ mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+ /*
+ * TODO: Choose async. exception bits if HYP mode is not
+ * implemented according to the values of SCR.{AW, FW} bits
+ */
+ aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
+
+ ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
+
+ return PSCI_E_SUCCESS;
+}
+
+#endif /* __aarch64__ */
/*******************************************************************************
* This function validates the entrypoint with the platform layer if the
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index bf3ae1e7..f4b64b33 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -136,48 +136,48 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
#endif
-#ifdef AARCH32
+#ifdef __aarch64__
-void enable_mmu_svc_mon(unsigned int flags)
+void enable_mmu_el1(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
- enable_mmu_direct_svc_mon(flags);
+ enable_mmu_direct_el1(flags);
}
-void enable_mmu_hyp(unsigned int flags)
+void enable_mmu_el2(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL2_REGIME);
- enable_mmu_direct_hyp(flags);
+ enable_mmu_direct_el2(flags);
}
-#else
-
-void enable_mmu_el1(unsigned int flags)
+void enable_mmu_el3(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
- enable_mmu_direct_el1(flags);
+ tf_xlat_ctx.va_max_address, EL3_REGIME);
+ enable_mmu_direct_el3(flags);
}
-void enable_mmu_el2(unsigned int flags)
+#else /* !__aarch64__ */
+
+void enable_mmu_svc_mon(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address, EL2_REGIME);
- enable_mmu_direct_el2(flags);
+ tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
+ enable_mmu_direct_svc_mon(flags);
}
-void enable_mmu_el3(unsigned int flags)
+void enable_mmu_hyp(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address, EL3_REGIME);
- enable_mmu_direct_el3(flags);
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
}
-#endif /* AARCH32 */
+#endif /* __aarch64__ */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 761d00c3..232142e8 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -97,7 +97,7 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
-#ifdef AARCH64
+#ifdef __aarch64__
/* Check Guarded Page bit */
if ((desc & GP) != 0ULL) {
printf("-GP");