summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorScott Branden <sbranden@users.noreply.github.com>2017-04-29 08:36:12 -0700
committerGitHub <noreply@github.com>2017-04-29 08:36:12 -0700
commit0f22bef31d402e24fab77eb2a3c643d042b7e79c (patch)
tree6595ed7f87249ccbd4a953ba5cdc00963a130d18 /lib
parent53d9c9c85bc49845c4c40315e1ab29d627a1f8c3 (diff)
parentdd454b40dfe42dbf77e2f04a3965295380b4f78d (diff)
Merge branch 'integration' into tf_issue_461
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/misc_helpers.S2
-rw-r--r--lib/aarch64/misc_helpers.S8
-rw-r--r--lib/cpus/aarch32/aem_generic.S6
-rw-r--r--lib/cpus/aarch32/cortex_a32.S6
-rw-r--r--lib/cpus/aarch32/cortex_a53.S141
-rw-r--r--lib/cpus/aarch32/cortex_a57.S192
-rw-r--r--lib/cpus/aarch32/cortex_a72.S216
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S6
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S6
-rw-r--r--lib/psci/psci_on.c2
-rw-r--r--lib/psci/psci_suspend.c2
-rw-r--r--lib/psci/psci_system_off.c7
-rw-r--r--lib/stdlib/assert.c20
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c6
-rw-r--r--lib/xlat_tables/aarch64/xlat_tables.c4
-rw-r--r--lib/xlat_tables/xlat_tables_common.c17
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c4
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c4
-rw-r--r--lib/xlat_tables_v2/xlat_tables_common.c4
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c4
20 files changed, 610 insertions, 47 deletions
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index 5b17c21c..03b47eae 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -162,7 +162,7 @@ endfunc zeromem
* --------------------------------------------------------------------------
*/
func memcpy4
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
orr r3, r0, r1
tst r3, #0x3
ASM_ASSERT(eq)
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 84265e0b..74550aa2 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -215,7 +215,7 @@ func zeromem_dczva
tmp1 .req x4
tmp2 .req x5
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
/*
* Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
* register value and panic if the MMU is disabled.
@@ -228,7 +228,7 @@ func zeromem_dczva
tst tmp1, #SCTLR_M_BIT
ASM_ASSERT(ne)
-#endif /* ASM_ASSERTION */
+#endif /* ENABLE_ASSERTIONS */
/* stop_address is the address past the last to zero */
add stop_address, cursor, length
@@ -247,7 +247,7 @@ func zeromem_dczva
mov tmp2, #(1 << 2)
lsl block_size, tmp2, block_size
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
/*
* Assumes block size is at least 16 bytes to avoid manual realignment
* of the cursor at the end of the DCZVA loop.
@@ -444,7 +444,7 @@ endfunc zeromem_dczva
* --------------------------------------------------------------------------
*/
func memcpy16
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
orr x3, x0, x1
tst x3, #0xf
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index 3d6064c9..7374e250 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,7 +35,7 @@
func aem_generic_core_pwr_dwn
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@@ -51,7 +51,7 @@ endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
index f631c4cf..8cd79330 100644
--- a/lib/cpus/aarch32/cortex_a32.S
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -76,7 +76,7 @@ func cortex_a32_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@@ -107,7 +107,7 @@ func cortex_a32_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
new file mode 100644
index 00000000..a16ead8b
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a53.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ ldcopr16 r0, r1, CPUECTLR
+ bic64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a53_disable_smp
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A53.
+ * -------------------------------------------------
+ */
+func cortex_a53_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CPUECTLR
+ orr64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ bx lr
+endfunc cortex_a53_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A53.
+ * ----------------------------------------------------
+ */
+func cortex_a53_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A53.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a53_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
new file mode 100644
index 00000000..3c5c4549
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a57.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ ldcopr16 r0, r1, CPUECTLR
+ bic64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ bx lr
+endfunc cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * Clobbers: r0-r2
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ ldcopr16 r0, r1, CPUECTLR
+ orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
+ CPUECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a57_disable_ext_debug
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A57.
+ * -------------------------------------------------
+ */
+func cortex_a57_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CPUECTLR
+ orr64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ bx lr
+endfunc cortex_a57_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A57.
+ * ----------------------------------------------------
+ */
+func cortex_a57_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A57.
+ * Clobbers: r0-r3
+ * -------------------------------------------------------
+ */
+func cortex_a57_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L2 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
new file mode 100644
index 00000000..583c1b58
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a72.h>
+#include <cpu_macros.S>
+#include <debug.h>
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_l2_prefetch
+ ldcopr16 r0, r1, CPUECTLR
+ orr64_imm r0, r1, CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
+ bic64_imm r0, r1, (CPUECTLR_L2_IPFTCH_DIST_MASK | \
+ CPUECTLR_L2_DPFTCH_DIST_MASK)
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ bx lr
+endfunc cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_hw_prefetcher
+ ldcopr16 r0, r1, CPUACTLR
+ orr64_imm r0, r1, CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH
+ stcopr16 r0, r1, CPUACTLR
+ isb
+ dsb ish
+ bx lr
+endfunc cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * Clobbers: r0-r1
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_smp
+ ldcopr16 r0, r1, CPUECTLR
+ bic64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ bx lr
+endfunc cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a72_disable_ext_debug
+ mov r0, #1
+ stcopr r0, DBGOSDLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a72_disable_ext_debug
+
+ /* -------------------------------------------------
+ * The CPU Ops reset function for Cortex-A72.
+ * -------------------------------------------------
+ */
+func cortex_a72_reset_func
+ /* ---------------------------------------------
+ * Enable the SMP bit.
+ * ---------------------------------------------
+ */
+ ldcopr16 r0, r1, CPUECTLR
+ orr64_imm r0, r1, CPUECTLR_SMP_BIT
+ stcopr16 r0, r1, CPUECTLR
+ isb
+ bx lr
+endfunc cortex_a72_reset_func
+
+ /* ----------------------------------------------------
+ * The CPU Ops core power down function for Cortex-A72.
+ * ----------------------------------------------------
+ */
+func cortex_a72_core_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
+
+ /* -------------------------------------------------------
+ * The CPU Ops cluster power down function for Cortex-A72.
+ * -------------------------------------------------------
+ */
+func cortex_a72_cluster_pwr_dwn
+ push {r12, lr}
+
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the load-store hardware prefetcher.
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_hw_prefetcher
+
+#if !SKIP_A72_L1_FLUSH_PWR_DWN
+ /* ---------------------------------------------
+ * Flush L1 caches.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+#endif
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* -------------------------------------------------
+ * Flush the L2 caches.
+ * -------------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level2
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a72_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ pop {r12, lr}
+ b cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index dc1b6e61..7606b8e2 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -53,7 +53,7 @@ func reset_handler
/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
bl get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
@@ -92,7 +92,7 @@ func prepare_cpu_pwr_dwn
pop {r2, lr}
ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
@@ -118,7 +118,7 @@ func init_cpu_ops
cmp r1, #0
bne 1f
bl get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp r0, #0
ASM_ASSERT(ne)
#endif
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 47cb6a2d..6a399167 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -55,7 +55,7 @@ func reset_handler
/* Get the matching cpu_ops pointer */
bl get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
@@ -94,7 +94,7 @@ func prepare_cpu_pwr_dwn
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
@@ -120,7 +120,7 @@ func init_cpu_ops
cbnz x0, 1f
mov x10, x30
bl get_cpu_ops_ptr
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
cmp x0, #0
ASM_ASSERT(ne)
#endif
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index 675ed668..76e67a36 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -165,7 +165,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
*/
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
-#if !HW_ASSISTED_COHERENCY
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/*
* Arch. management: Enable data cache and manage stack memory
*/
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index 08c8fd6a..bf95df24 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -302,7 +302,7 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
-#if !HW_ASSISTED_COHERENCY
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
/* Arch. management: Enable the data cache, stack memory maintenance. */
psci_do_pwrup_cache_maintenance();
#endif
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
index de9ec643..eb3e7fbc 100644
--- a/lib/psci/psci_system_off.c
+++ b/lib/psci/psci_system_off.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -31,6 +31,7 @@
#include <stddef.h>
#include <arch_helpers.h>
#include <assert.h>
+#include <console.h>
#include <debug.h>
#include <platform.h>
#include "psci_private.h"
@@ -46,6 +47,8 @@ void psci_system_off(void)
psci_spd_pm->svc_system_off();
}
+ console_flush();
+
/* Call the platform specific hook */
psci_plat_pm_ops->system_off();
@@ -63,6 +66,8 @@ void psci_system_reset(void)
psci_spd_pm->svc_system_reset();
}
+ console_flush();
+
/* Call the platform specific hook */
psci_plat_pm_ops->system_reset();
diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c
index 90a1afe5..3c0bd166 100644
--- a/lib/stdlib/assert.c
+++ b/lib/stdlib/assert.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,14 +28,22 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <console.h>
#include <debug.h>
+#include <platform.h>
-/*
- * This is a basic implementation. This could be improved.
- */
-void __assert (const char *function, const char *file, unsigned int line,
+void __assert(const char *function, const char *file, unsigned int line,
const char *assertion)
{
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ /*
+ * Only print the output if LOG_LEVEL is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
tf_printf("ASSERT: %s <%d> : %s\n", function, line, assertion);
- while(1);
+
+ console_flush();
+#endif
+
+ plat_panic_handler();
}
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index 316a60e7..4fe5bf91 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -85,13 +85,13 @@
static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
-#if DEBUG
+#if ENABLE_ASSERTIONS
static unsigned long long get_max_supported_pa(void)
{
/* Physical address space size for long descriptor format. */
return (1ULL << 40) - 1ULL;
}
-#endif
+#endif /* ENABLE_ASSERTIONS */
void init_xlat_tables(void)
{
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index ecb12022..4f237936 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -127,7 +127,7 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
-#if DEBUG
+#if ENABLE_ASSERTIONS
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
@@ -144,7 +144,7 @@ static unsigned long long get_max_supported_pa(void)
return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
}
-#endif
+#endif /* ENABLE_ASSERTIONS */
void init_xlat_tables(void)
{
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index 81c4dc68..4426ccef 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -87,7 +87,7 @@ void print_mmap(void)
}
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
- size_t size, unsigned int attr)
+ size_t size, mmap_attr_t attr)
{
mmap_region_t *mm = mmap;
mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
@@ -109,7 +109,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
assert((base_pa + (unsigned long long)size - 1ULL) <=
(PLAT_PHY_ADDR_SPACE_SIZE - 1));
-#if DEBUG
+#if ENABLE_ASSERTIONS
/* Check for PAs and VAs overlaps with all other regions */
for (mm = mmap; mm->size; ++mm) {
@@ -154,7 +154,7 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
mm = mmap; /* Restore pointer to the start of the array */
-#endif /* DEBUG */
+#endif /* ENABLE_ASSERTIONS */
/* Find correct place in mmap to insert new region */
while (mm->base_va < base_va && mm->size)
@@ -199,7 +199,7 @@ void mmap_add(const mmap_region_t *mm)
}
}
-static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
+static uint64_t mmap_desc(mmap_attr_t attr, unsigned long long addr_pa,
int level)
{
uint64_t desc;
@@ -277,11 +277,11 @@ static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
* attributes of the innermost region that contains it. If there are partial
* overlaps, it returns -1, as a smaller size is needed.
*/
-static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
+static mmap_attr_t mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
size_t size)
{
/* Don't assume that the area is contained in the first region */
- int attr = -1;
+ mmap_attr_t attr = -1;
/*
* Get attributes from last (innermost) region that contains the
@@ -360,7 +360,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
* there are partially overlapping regions. On success,
* it will return the innermost region's attributes.
*/
- int attr = mmap_region_attr(mm, base_va, level_size);
+ mmap_attr_t attr = mmap_region_attr(mm, base_va,
+ level_size);
if (attr >= 0) {
desc = mmap_desc(attr,
base_va - mm->base_va + mm->base_pa,
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index ba0e53d6..cd7aad8f 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -37,13 +37,13 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if DEBUG
+#if ENABLE_ASSERTIONS
static unsigned long long xlat_arch_get_max_supported_pa(void)
{
/* Physical address space size for long descriptor format. */
return (1ull << 40) - 1ull;
}
-#endif /* DEBUG*/
+#endif /* ENABLE_ASSERTIONS*/
int is_mmu_enabled(void)
{
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 575ac71c..24266b2d 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -77,7 +77,7 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
-#if DEBUG
+#if ENABLE_ASSERTIONS
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
@@ -94,7 +94,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
return (1ull << pa_range_bits_arr[pa_range]) - 1ull;
}
-#endif /* DEBUG*/
+#endif /* ENABLE_ASSERTIONS*/
int is_mmu_enabled(void)
{
diff --git a/lib/xlat_tables_v2/xlat_tables_common.c b/lib/xlat_tables_v2/xlat_tables_common.c
index b4691a2b..7ca81b9c 100644
--- a/lib/xlat_tables_v2/xlat_tables_common.c
+++ b/lib/xlat_tables_v2/xlat_tables_common.c
@@ -92,7 +92,7 @@ xlat_ctx_t tf_xlat_ctx = {
};
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
- size_t size, unsigned int attr)
+ size_t size, mmap_attr_t attr)
{
mmap_region_t mm = {
.base_va = base_va,
@@ -114,7 +114,7 @@ void mmap_add(const mmap_region_t *mm)
#if PLAT_XLAT_TABLES_DYNAMIC
int mmap_add_dynamic_region(unsigned long long base_pa,
- uintptr_t base_va, size_t size, unsigned int attr)
+ uintptr_t base_va, size_t size, mmap_attr_t attr)
{
mmap_region_t mm = {
.base_va = base_va,
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 2f03306e..581f7703 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -115,7 +115,7 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/* Returns a block/page table descriptor for the given level and attributes. */
-static uint64_t xlat_desc(unsigned int attr, unsigned long long addr_pa,
+static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
int level)
{
uint64_t desc;
@@ -609,7 +609,7 @@ void print_mmap(mmap_region_t *const mmap)
*/
static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
uintptr_t base_va, size_t size,
- unsigned int attr)
+ mmap_attr_t attr)
{
mmap_region_t *mm = ctx->mmap;
unsigned long long end_pa = base_pa + size - 1;