summaryrefslogtreecommitdiff
path: root/lib/cpus
diff options
context:
space:
mode:
authorSoby Mathew <soby.mathew@arm.com>2016-05-05 14:10:46 +0100
committerSoby Mathew <soby.mathew@arm.com>2016-08-10 12:35:46 +0100
commite33b78a658bd54a815c780e17c2d0073db6f59db (patch)
tree17ede96d62e85ca511b0291b6a913a2d486dd91c /lib/cpus
parent66be868e9acc7b34852f755934664b191e9fae13 (diff)
AArch32: Add support in TF libraries
This patch adds AArch32 support to cpu ops, context management, per-cpu data and spinlock libraries. The `entrypoint_info` structure is modified to add support for AArch32 register arguments. The CPU operations for AEM generic cpu in AArch32 mode is also added. Change-Id: I1e52e79f498661d8f31f1e7b3a29e222bc7a4483
Diffstat (limited to 'lib/cpus')
-rw-r--r--lib/cpus/aarch32/aem_generic.S68
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S177
2 files changed, 245 insertions, 0 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
new file mode 100644
index 00000000..10ea4e47
--- /dev/null
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <aem_generic.h>
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_macros.S>
+
+func aem_generic_core_pwr_dwn
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
+
+
+func aem_generic_cluster_pwr_dwn
+ /* Assert if cache is enabled */
+#if ASM_ASSERTION
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov r0, #DC_OP_CISW
+ b dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
+
+/* cpu_ops for Base AEM FVP */
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
new file mode 100644
index 00000000..927a6f50
--- /dev/null
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cpu_data.h>
+#include <cpu_macros.S>
+
+ /*
+ * The reset handler common to all platforms. After a matching
+ * cpu_ops structure entry is found, the correponding reset_handler
+ * in the cpu_ops is invoked. The reset handler is invoked very early
+ * in the boot sequence and it is assumed that we can clobber r0 - r10
+ * without the need to follow AAPCS.
+ * Clobbers: r0 - r10
+ */
+ .globl reset_handler
+func reset_handler
+ mov r10, lr
+
+ /* The plat_reset_handler can clobber r0 - r9 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
+ bl get_cpu_ops_ptr
+
+#if ASM_ASSERTION
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops reset handler */
+ ldr r1, [r0, #CPU_RESET_FUNC]
+ cmp r1, #0
+ mov lr, r10
+ bxne r1
+ bx lr
+endfunc reset_handler
+
+ /*
+ * The prepare core power down function for all platforms. After
+ * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+ * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
+ */
+ .globl prepare_core_pwr_dwn
+func prepare_core_pwr_dwn
+ push {lr}
+ bl _cpu_data
+ pop {lr}
+
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+ cmp r1, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops core_pwr_dwn handler */
+ ldr r0, [r1, #CPU_PWR_DWN_CORE]
+ bx r0
+endfunc prepare_core_pwr_dwn
+
+ /*
+ * The prepare cluster power down function for all platforms. After
+ * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+ * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
+ */
+ .globl prepare_cluster_pwr_dwn
+func prepare_cluster_pwr_dwn
+ push {lr}
+ bl _cpu_data
+ pop {lr}
+
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+ cmp r1, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops cluster_pwr_dwn handler */
+ ldr r0, [r1, #CPU_PWR_DWN_CLUSTER]
+ bx r0
+endfunc prepare_cluster_pwr_dwn
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This must only be called after the data cache
+ * is enabled. AAPCS is followed.
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ push {r4 - r6, lr}
+ bl _cpu_data
+ mov r6, r0
+ ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+ cmp r1, #0
+ bne 1f
+ bl get_cpu_ops_ptr
+#if ASM_ASSERTION
+ cmp r0, #0
+ ASM_ASSERT(ne)
+#endif
+ str r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
+1:
+ pop {r4 - r6, pc}
+endfunc init_cpu_ops
+
+ /*
+ * The below function returns the cpu_ops structure matching the
+ * midr of the core. It reads the MIDR and finds the matching
+ * entry in cpu_ops entries. Only the implementation and part number
+ * are used to match the entries.
+ * Return :
+ * r0 - The matching cpu_ops pointer on Success
+ * r0 - 0 on failure.
+ * Clobbers: r0 - r5
+ */
+ .globl get_cpu_ops_ptr
+func get_cpu_ops_ptr
+ /* Get the cpu_ops start and end locations */
+ ldr r4, =(__CPU_OPS_START__ + CPU_MIDR)
+ ldr r5, =(__CPU_OPS_END__ + CPU_MIDR)
+
+ /* Initialize the return parameter */
+ mov r0, #0
+
+ /* Read the MIDR_EL1 */
+ ldcopr r2, MIDR
+ ldr r3, =CPU_IMPL_PN_MASK
+
+ /* Retain only the implementation and part number using mask */
+ and r2, r2, r3
+1:
+ /* Check if we have reached end of list */
+ cmp r4, r5
+ bge error_exit
+
+ /* load the midr from the cpu_ops */
+ ldr r1, [r4], #CPU_OPS_SIZE
+ and r1, r1, r3
+
+ /* Check if midr matches to midr of this core */
+ cmp r1, r2
+ bne 1b
+
+ /* Subtract the increment and offset to get the cpu-ops pointer */
+ sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
+error_exit:
+ bx lr
+endfunc get_cpu_ops_ptr