summaryrefslogtreecommitdiff
path: root/arch/arm/cpu/armv8
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu/armv8')
-rw-r--r--arch/arm/cpu/armv8/Makefile2
-rw-r--r--arch/arm/cpu/armv8/cache_v8.c112
-rw-r--r--arch/arm/cpu/armv8/cpu-dt.c31
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/Makefile1
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/cpu.c396
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/fdt.c33
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/ppa.c48
-rw-r--r--arch/arm/cpu/armv8/s32v234/cpu.c12
-rw-r--r--arch/arm/cpu/armv8/sec_firmware.c270
-rw-r--r--arch/arm/cpu/armv8/sec_firmware_asm.S53
-rw-r--r--arch/arm/cpu/armv8/zynqmp/cpu.c21
11 files changed, 599 insertions, 380 deletions
diff --git a/arch/arm/cpu/armv8/Makefile b/arch/arm/cpu/armv8/Makefile
index f6eb9f4a7f..dea14657d9 100644
--- a/arch/arm/cpu/armv8/Makefile
+++ b/arch/arm/cpu/armv8/Makefile
@@ -15,9 +15,11 @@ obj-y += cache.o
obj-y += tlb.o
obj-y += transition.o
obj-y += fwcall.o
+obj-y += cpu-dt.o
ifndef CONFIG_SPL_BUILD
obj-$(CONFIG_ARMV8_SPIN_TABLE) += spin_table.o spin_table_v8.o
endif
+obj-$(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) += sec_firmware.o sec_firmware_asm.o
obj-$(CONFIG_FSL_LAYERSCAPE) += fsl-layerscape/
obj-$(CONFIG_S32V234) += s32v234/
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 1615542a99..ac909a15ff 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -35,7 +35,7 @@ DECLARE_GLOBAL_DATA_PTR;
* off: FFF
*/
-static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
+u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
{
u64 max_addr = 0;
u64 ips, va_bits;
@@ -44,7 +44,7 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
/* Find the largest address we need to support */
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
- max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
+ max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
/* Calculate the maximum physical (and thus virtual) address */
if (max_addr > (1ULL << 44)) {
@@ -167,49 +167,6 @@ static void set_pte_table(u64 *pte, u64 *table)
*pte = PTE_TYPE_TABLE | (ulong)table;
}
-/* Add one mm_region map entry to the page tables */
-static void add_map(struct mm_region *map)
-{
- u64 *pte;
- u64 addr = map->base;
- u64 size = map->size;
- u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
- u64 blocksize;
- int level;
- u64 *new_table;
-
- while (size) {
- pte = find_pte(addr, 0);
- if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
- debug("Creating table for addr 0x%llx\n", addr);
- new_table = create_table();
- set_pte_table(pte, new_table);
- }
-
- for (level = 1; level < 4; level++) {
- pte = find_pte(addr, level);
- blocksize = 1ULL << level2shift(level);
- debug("Checking if pte fits for addr=%llx size=%llx "
- "blocksize=%llx\n", addr, size, blocksize);
- if (size >= blocksize && !(addr & (blocksize - 1))) {
- /* Page fits, create block PTE */
- debug("Setting PTE %p to block addr=%llx\n",
- pte, addr);
- *pte = addr | attrs;
- addr += blocksize;
- size -= blocksize;
- break;
- } else if ((pte_type(pte) == PTE_TYPE_FAULT)) {
- /* Page doesn't fit, create subpages */
- debug("Creating subtable for addr 0x%llx "
- "blksize=%llx\n", addr, blocksize);
- new_table = create_table();
- set_pte_table(pte, new_table);
- }
- }
- }
-}
-
/* Splits a block PTE into table with subpages spanning the old block */
static void split_block(u64 *pte, int level)
{
@@ -241,6 +198,58 @@ static void split_block(u64 *pte, int level)
set_pte_table(pte, new_table);
}
+/* Add one mm_region map entry to the page tables */
+static void add_map(struct mm_region *map)
+{
+ u64 *pte;
+ u64 virt = map->virt;
+ u64 phys = map->phys;
+ u64 size = map->size;
+ u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
+ u64 blocksize;
+ int level;
+ u64 *new_table;
+
+ while (size) {
+ pte = find_pte(virt, 0);
+ if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
+ debug("Creating table for virt 0x%llx\n", virt);
+ new_table = create_table();
+ set_pte_table(pte, new_table);
+ }
+
+ for (level = 1; level < 4; level++) {
+ pte = find_pte(virt, level);
+ if (!pte)
+ panic("pte not found\n");
+
+ blocksize = 1ULL << level2shift(level);
+ debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
+ virt, size, blocksize);
+ if (size >= blocksize && !(virt & (blocksize - 1))) {
+ /* Page fits, create block PTE */
+ debug("Setting PTE %p to block virt=%llx\n",
+ pte, virt);
+ *pte = phys | attrs;
+ virt += blocksize;
+ phys += blocksize;
+ size -= blocksize;
+ break;
+ } else if (pte_type(pte) == PTE_TYPE_FAULT) {
+ /* Page doesn't fit, create subpages */
+ debug("Creating subtable for virt 0x%llx blksize=%llx\n",
+ virt, blocksize);
+ new_table = create_table();
+ set_pte_table(pte, new_table);
+ } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
+ debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
+ virt, blocksize);
+ split_block(pte, level);
+ }
+ }
+ }
+}
+
enum pte_type {
PTE_INVAL,
PTE_BLOCK,
@@ -265,7 +274,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr)
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
struct mm_region *map = &mem_map[i];
- u64 start = map->base;
+ u64 start = map->virt;
u64 end = start + map->size;
/* Check if the PTE would overlap with the map */
@@ -349,10 +358,13 @@ __weak u64 get_page_table_size(void)
return size;
}
-static void setup_pgtables(void)
+void setup_pgtables(void)
{
int i;
+ if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
+ panic("Page table pointer not setup.");
+
/*
* Allocate the first level we're on with invalidate entries.
* If the starting level is 0 (va_bits >= 39), then this is our
@@ -363,9 +375,6 @@ static void setup_pgtables(void)
/* Now add all MMU table entries one after another to the table */
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
add_map(&mem_map[i]);
-
- /* Create the same thing once more for our emergency page table */
- create_table();
}
static void setup_all_pgtables(void)
@@ -527,6 +536,9 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
+ if (!gd->arch.tlb_emerg)
+ panic("Emergency page table not setup.");
+
/*
* We can not modify page tables that we're currently running on,
* so we first need to switch to the "emergency" page tables where
diff --git a/arch/arm/cpu/armv8/cpu-dt.c b/arch/arm/cpu/armv8/cpu-dt.c
new file mode 100644
index 0000000000..9ffb49c37c
--- /dev/null
+++ b/arch/arm/cpu/armv8/cpu-dt.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2016 NXP Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <asm/psci.h>
+#ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
+#include <asm/armv8/sec_firmware.h>
+#endif
+
+int psci_update_dt(void *fdt)
+{
+#ifdef CONFIG_MP
+#if defined(CONFIG_ARMV8_PSCI)
+#ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
+ /*
+ * If the PSCI in SEC Firmware didn't work, avoid to update the
+ * device node of PSCI. But still return 0 instead of an error
+ * number to support detecting PSCI dynamically and then switching
+ * the SMP boot method between PSCI and spin-table.
+ */
+ if (sec_firmware_support_psci_version() == 0xffffffff)
+ return 0;
+#endif
+ fdt_psci(fdt);
+#endif
+#endif
+ return 0;
+}
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/Makefile b/arch/arm/cpu/armv8/fsl-layerscape/Makefile
index eb2cbc3f7e..bcf6b48a51 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/Makefile
+++ b/arch/arm/cpu/armv8/fsl-layerscape/Makefile
@@ -10,6 +10,7 @@ obj-y += soc.o
obj-$(CONFIG_MP) += mp.o
obj-$(CONFIG_OF_LIBFDT) += fdt.o
obj-$(CONFIG_SPL) += spl.o
+obj-$(CONFIG_FSL_LS_PPA) += ppa.o
ifneq ($(CONFIG_FSL_LSCH3),)
obj-y += fsl_lsch3_speed.o
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
index 8062106e3e..7a2ec6bf59 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
@@ -23,16 +23,13 @@
#ifdef CONFIG_FSL_ESDHC
#include <fsl_esdhc.h>
#endif
+#ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
+#include <asm/armv8/sec_firmware.h>
+#endif
DECLARE_GLOBAL_DATA_PTR;
-static struct mm_region layerscape_mem_map[] = {
- {
- /* List terminator */
- 0,
- }
-};
-struct mm_region *mem_map = layerscape_mem_map;
+struct mm_region *mem_map = early_map;
void cpu_name(char *name)
{
@@ -56,348 +53,96 @@ void cpu_name(char *name)
}
#ifndef CONFIG_SYS_DCACHE_OFF
-static void set_pgtable_section(u64 *page_table, u64 index, u64 section,
- u64 memory_type, u64 attribute)
-{
- u64 value;
-
- value = section | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
- value |= PMD_ATTRINDX(memory_type);
- value |= attribute;
- page_table[index] = value;
-}
-
-static void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
-{
- u64 value;
-
- value = (u64)table_addr | PTE_TYPE_TABLE;
- page_table[index] = value;
-}
-
-/*
- * Set the block entries according to the information of the table.
- */
-static int set_block_entry(const struct sys_mmu_table *list,
- struct table_info *table)
-{
- u64 block_size = 0, block_shift = 0;
- u64 block_addr, index;
- int j;
-
- if (table->entry_size == BLOCK_SIZE_L1) {
- block_size = BLOCK_SIZE_L1;
- block_shift = SECTION_SHIFT_L1;
- } else if (table->entry_size == BLOCK_SIZE_L2) {
- block_size = BLOCK_SIZE_L2;
- block_shift = SECTION_SHIFT_L2;
- } else {
- return -EINVAL;
- }
-
- block_addr = list->phys_addr;
- index = (list->virt_addr - table->table_base) >> block_shift;
-
- for (j = 0; j < (list->size >> block_shift); j++) {
- set_pgtable_section(table->ptr,
- index,
- block_addr,
- list->memory_type,
- list->attribute);
- block_addr += block_size;
- index++;
- }
-
- return 0;
-}
-
-/*
- * Find the corresponding table entry for the list.
- */
-static int find_table(const struct sys_mmu_table *list,
- struct table_info *table, u64 *level0_table)
-{
- u64 index = 0, level = 0;
- u64 *level_table = level0_table;
- u64 temp_base = 0, block_size = 0, block_shift = 0;
-
- while (level < 3) {
- if (level == 0) {
- block_size = BLOCK_SIZE_L0;
- block_shift = SECTION_SHIFT_L0;
- } else if (level == 1) {
- block_size = BLOCK_SIZE_L1;
- block_shift = SECTION_SHIFT_L1;
- } else if (level == 2) {
- block_size = BLOCK_SIZE_L2;
- block_shift = SECTION_SHIFT_L2;
- }
-
- index = 0;
- while (list->virt_addr >= temp_base) {
- index++;
- temp_base += block_size;
- }
-
- temp_base -= block_size;
-
- if ((level_table[index - 1] & PTE_TYPE_MASK) ==
- PTE_TYPE_TABLE) {
- level_table = (u64 *)(level_table[index - 1] &
- ~PTE_TYPE_MASK);
- level++;
- continue;
- } else {
- if (level == 0)
- return -EINVAL;
-
- if ((list->phys_addr + list->size) >
- (temp_base + block_size * NUM_OF_ENTRY))
- return -EINVAL;
-
- /*
- * Check the address and size of the list member is
- * aligned with the block size.
- */
- if (((list->phys_addr & (block_size - 1)) != 0) ||
- ((list->size & (block_size - 1)) != 0))
- return -EINVAL;
-
- table->ptr = level_table;
- table->table_base = temp_base -
- ((index - 1) << block_shift);
- table->entry_size = block_size;
-
- return 0;
- }
- }
- return -EINVAL;
-}
-
/*
* To start MMU before DDR is available, we create MMU table in SRAM.
* The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
* levels of translation tables here to cover 40-bit address space.
* We use 4KB granule size, with 40 bits physical address, T0SZ=24
- * Level 0 IA[39], table address @0
- * Level 1 IA[38:30], table address @0x1000, 0x2000
- * Level 2 IA[29:21], table address @0x3000, 0x4000
- * Address above 0x5000 is free for other purpose.
+ * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
+ * Note, the debug print in cache_v8.c is not usable for debugging
+ * these early MMU tables because UART is not yet available.
*/
static inline void early_mmu_setup(void)
{
- unsigned int el, i;
- u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
- u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
- u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
- u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
- u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
-
- struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
-
- /* Invalidate all table entries */
- memset(level0_table, 0, 0x5000);
-
- /* Fill in the table entries */
- set_pgtable_table(level0_table, 0, level1_table0);
- set_pgtable_table(level0_table, 1, level1_table1);
- set_pgtable_table(level1_table0, 0, level2_table0);
+ unsigned int el = current_el();
-#ifdef CONFIG_FSL_LSCH3
- set_pgtable_table(level1_table0,
- CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1,
- level2_table1);
-#elif defined(CONFIG_FSL_LSCH2)
- set_pgtable_table(level1_table0, 1, level2_table1);
-#endif
- /* Find the table and fill in the block entries */
- for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) {
- if (find_table(&early_mmu_table[i],
- &table, level0_table) == 0) {
- /*
- * If find_table() returns error, it cannot be dealt
- * with here. Breakpoint can be added for debugging.
- */
- set_block_entry(&early_mmu_table[i], &table);
- /*
- * If set_block_entry() returns error, it cannot be
- * dealt with here too.
- */
- }
- }
+ /* global data is already setup, no allocation yet */
+ gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
+ gd->arch.tlb_fillptr = gd->arch.tlb_addr;
+ gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
- el = current_el();
+ /* Create early page tables */
+ setup_pgtables();
- set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR,
+ /* point TTBR to the new table */
+ set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
+ get_tcr(el, NULL, NULL) &
+ ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
MEMORY_ATTRIBUTES);
- set_sctlr(get_sctlr() | CR_M);
-}
-#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
-/*
- * Called from final mmu setup. The phys_addr is new, non-existing
- * address. A new sub table is created @level2_table_secure to cover
- * size of CONFIG_SYS_MEM_RESERVE_SECURE memory.
- */
-static inline int final_secure_ddr(u64 *level0_table,
- u64 *level2_table_secure,
- phys_addr_t phys_addr)
-{
- int ret = -EINVAL;
- struct table_info table = {};
- struct sys_mmu_table ddr_entry = {
- 0, 0, BLOCK_SIZE_L1, MT_NORMAL,
- PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
- };
- u64 index;
-
- /* Need to create a new table */
- ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
- ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
- ret = find_table(&ddr_entry, &table, level0_table);
- if (ret)
- return ret;
- index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1;
- set_pgtable_table(table.ptr, index, level2_table_secure);
- table.ptr = level2_table_secure;
- table.table_base = ddr_entry.virt_addr;
- table.entry_size = BLOCK_SIZE_L2;
- ret = set_block_entry(&ddr_entry, &table);
- if (ret) {
- printf("MMU error: could not fill non-secure ddr block entries\n");
- return ret;
- }
- ddr_entry.virt_addr = phys_addr;
- ddr_entry.phys_addr = phys_addr;
- ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE;
- ddr_entry.attribute = PTE_BLOCK_OUTER_SHARE;
- ret = find_table(&ddr_entry, &table, level0_table);
- if (ret) {
- printf("MMU error: could not find secure ddr table\n");
- return ret;
- }
- ret = set_block_entry(&ddr_entry, &table);
- if (ret)
- printf("MMU error: could not set secure ddr block entry\n");
-
- return ret;
+ set_sctlr(get_sctlr() | CR_M);
}
-#endif
/*
* The final tables look similar to early tables, but different in detail.
* These tables are in DRAM. Sub tables are added to enable cache for
* QBMan and OCRAM.
*
- * Put the MMU table in secure memory if gd->secure_ram is valid.
- * OCRAM will be not used for this purpose so gd->secure_ram can't be 0.
- *
- * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
- * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
- * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
- *
- * For LSCH3:
- * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
- * For LSCH2:
- * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB.
- * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB.
+ * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
+ * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
*/
static inline void final_mmu_setup(void)
{
+ u64 tlb_addr_save = gd->arch.tlb_addr;
unsigned int el = current_el();
- unsigned int i;
- u64 *level0_table = (u64 *)gd->arch.tlb_addr;
- u64 *level1_table0;
- u64 *level1_table1;
- u64 *level2_table0;
- u64 *level2_table1;
-#ifdef CONFIG_FSL_LSCH2
- u64 *level2_table2;
-#endif
- struct table_info table = {NULL, 0, BLOCK_SIZE_L0};
-
#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
- u64 *level2_table_secure;
-
- if (el == 3) {
- /*
- * Only use gd->secure_ram if the address is recalculated
- * Align to 4KB for MMU table
- */
- if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED)
- level0_table = (u64 *)(gd->secure_ram & ~0xfff);
- else
- printf("MMU warning: gd->secure_ram is not maintained, disabled.\n");
- }
-#endif
- level1_table0 = level0_table + 512;
- level1_table1 = level1_table0 + 512;
- level2_table0 = level1_table1 + 512;
- level2_table1 = level2_table0 + 512;
-#ifdef CONFIG_FSL_LSCH2
- level2_table2 = level2_table1 + 512;
+ int index;
#endif
- table.ptr = level0_table;
- /* Invalidate all table entries */
- memset(level0_table, 0, PGTABLE_SIZE);
+ mem_map = final_map;
- /* Fill in the table entries */
- set_pgtable_table(level0_table, 0, level1_table0);
- set_pgtable_table(level0_table, 1, level1_table1);
- set_pgtable_table(level1_table0, 0, level2_table0);
-#ifdef CONFIG_FSL_LSCH3
- set_pgtable_table(level1_table0,
- CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
- level2_table1);
-#elif defined(CONFIG_FSL_LSCH2)
- set_pgtable_table(level1_table0, 1, level2_table1);
- set_pgtable_table(level1_table0,
- CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
- level2_table2);
-#endif
-
- /* Find the table and fill in the block entries */
- for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) {
- if (find_table(&final_mmu_table[i],
- &table, level0_table) == 0) {
- if (set_block_entry(&final_mmu_table[i],
- &table) != 0) {
- printf("MMU error: could not set block entry for %p\n",
- &final_mmu_table[i]);
- }
-
- } else {
- printf("MMU error: could not find the table for %p\n",
- &final_mmu_table[i]);
- }
- }
- /* Set the secure memory to secure in MMU */
#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
- if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
-#ifdef CONFIG_FSL_LSCH3
- level2_table_secure = level2_table1 + 512;
-#elif defined(CONFIG_FSL_LSCH2)
- level2_table_secure = level2_table2 + 512;
-#endif
- if (!final_secure_ddr(level0_table,
- level2_table_secure,
- gd->secure_ram & ~0x3)) {
- gd->secure_ram |= MEM_RESERVE_SECURE_SECURED;
- debug("Now MMU table is in secured memory at 0x%llx\n",
- gd->secure_ram & ~0x3);
+ if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
+ if (el == 3) {
+ /*
+ * Only use gd->arch.secure_ram if the address is
+ * recalculated. Align to 4KB for MMU table.
+ */
+ /* put page tables in secure ram */
+ index = ARRAY_SIZE(final_map) - 2;
+ gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
+ final_map[index].virt = gd->arch.secure_ram & ~0x3;
+ final_map[index].phys = final_map[index].virt;
+ final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
+ final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
+ gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
+ tlb_addr_save = gd->arch.tlb_addr;
} else {
- printf("MMU warning: Failed to secure DDR\n");
+ /* Use allocated (board_f.c) memory for TLB */
+ tlb_addr_save = gd->arch.tlb_allocated;
+ gd->arch.tlb_addr = tlb_addr_save;
}
}
#endif
+ /* Reset the fill ptr */
+ gd->arch.tlb_fillptr = tlb_addr_save;
+
+ /* Create normal system page tables */
+ setup_pgtables();
+
+ /* Create emergency page tables */
+ gd->arch.tlb_addr = gd->arch.tlb_fillptr;
+ gd->arch.tlb_emerg = gd->arch.tlb_addr;
+ setup_pgtables();
+ gd->arch.tlb_addr = tlb_addr_save;
+
/* flush new MMU table */
- flush_dcache_range((ulong)level0_table,
- (ulong)level0_table + gd->arch.tlb_size);
+ flush_dcache_range(gd->arch.tlb_addr,
+ gd->arch.tlb_addr + gd->arch.tlb_size);
/* point TTBR to the new table */
- set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL,
+ set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
MEMORY_ATTRIBUTES);
/*
* MMU is already enabled, just need to invalidate TLB to load the
@@ -422,15 +167,21 @@ int arch_cpu_init(void)
return 0;
}
+void mmu_setup(void)
+{
+ final_mmu_setup();
+}
+
/*
- * This function is called from lib/board.c.
- * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
- * There is no need to disable d-cache for this operation.
+ * This function is called from common/board_r.c.
+ * It recreates MMU table in main memory.
*/
void enable_caches(void)
{
- final_mmu_setup();
+ mmu_setup();
__asm_invalidate_tlb_all();
+ icache_enable();
+ dcache_enable();
}
#endif
@@ -616,6 +367,7 @@ int arch_early_init_r(void)
{
#ifdef CONFIG_MP
int rv = 1;
+ u32 psci_ver = 0xffffffff;
#endif
#ifdef CONFIG_SYS_FSL_ERRATUM_A009635
@@ -623,9 +375,15 @@ int arch_early_init_r(void)
#endif
#ifdef CONFIG_MP
- rv = fsl_layerscape_wake_seconday_cores();
- if (rv)
- printf("Did not wake secondary cores\n");
+#if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
+ /* Check the psci version to determine if the psci is supported */
+ psci_ver = sec_firmware_support_psci_version();
+#endif
+ if (psci_ver == 0xffffffff) {
+ rv = fsl_layerscape_wake_seconday_cores();
+ if (rv)
+ printf("Did not wake secondary cores\n");
+ }
#endif
#ifdef CONFIG_SYS_HAS_SERDES
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/fdt.c b/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
index d17227ab2b..40d6a761e8 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
@@ -22,6 +22,9 @@
#endif
#include <fsl_sec.h>
#include <asm/arch-fsl-layerscape/soc.h>
+#ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
+#include <asm/armv8/sec_firmware.h>
+#endif
int fdt_fixup_phy_connection(void *blob, int offset, phy_interface_t phyc)
{
@@ -38,7 +41,37 @@ void ft_fixup_cpu(void *blob)
int addr_cells;
u64 val, core_id;
size_t *boot_code_size = &(__secondary_boot_code_size);
+#if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
+ int node;
+ u32 psci_ver;
+
+ /* Check the psci version to determine if the psci is supported */
+ psci_ver = sec_firmware_support_psci_version();
+ if (psci_ver == 0xffffffff) {
+ /* remove psci DT node */
+ node = fdt_path_offset(blob, "/psci");
+ if (node >= 0)
+ goto remove_psci_node;
+
+ node = fdt_node_offset_by_compatible(blob, -1, "arm,psci");
+ if (node >= 0)
+ goto remove_psci_node;
+
+ node = fdt_node_offset_by_compatible(blob, -1, "arm,psci-0.2");
+ if (node >= 0)
+ goto remove_psci_node;
+ node = fdt_node_offset_by_compatible(blob, -1, "arm,psci-1.0");
+ if (node >= 0)
+ goto remove_psci_node;
+
+remove_psci_node:
+ if (node >= 0)
+ fdt_del_node(blob, node);
+ } else {
+ return;
+ }
+#endif
off = fdt_path_offset(blob, "/cpus");
if (off < 0) {
puts("couldn't find /cpus node\n");
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/ppa.c b/arch/arm/cpu/armv8/fsl-layerscape/ppa.c
new file mode 100644
index 0000000000..541b251bf4
--- /dev/null
+++ b/arch/arm/cpu/armv8/fsl-layerscape/ppa.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 NXP Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#include <common.h>
+#include <config.h>
+#include <errno.h>
+#include <asm/system.h>
+#include <asm/types.h>
+#include <asm/arch/soc.h>
+#ifdef CONFIG_FSL_LSCH3
+#include <asm/arch/immap_lsch3.h>
+#elif defined(CONFIG_FSL_LSCH2)
+#include <asm/arch/immap_lsch2.h>
+#endif
+#ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
+#include <asm/armv8/sec_firmware.h>
+#endif
+
+int ppa_init(void)
+{
+ const void *ppa_fit_addr;
+ u32 *boot_loc_ptr_l, *boot_loc_ptr_h;
+ int ret;
+
+#ifdef CONFIG_SYS_LS_PPA_FW_IN_NOR
+ ppa_fit_addr = (void *)CONFIG_SYS_LS_PPA_FW_ADDR;
+#else
+#error "No CONFIG_SYS_LS_PPA_FW_IN_xxx defined"
+#endif
+
+#ifdef CONFIG_FSL_LSCH3
+ struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
+ boot_loc_ptr_l = &gur->bootlocptrl;
+ boot_loc_ptr_h = &gur->bootlocptrh;
+#elif defined(CONFIG_FSL_LSCH2)
+ struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR);
+ boot_loc_ptr_l = &scfg->scratchrw[1];
+ boot_loc_ptr_h = &scfg->scratchrw[0];
+#endif
+
+ debug("fsl-ppa: boot_loc_ptr_l = 0x%p, boot_loc_ptr_h =0x%p\n",
+ boot_loc_ptr_l, boot_loc_ptr_h);
+ ret = sec_firmware_init(ppa_fit_addr, boot_loc_ptr_l, boot_loc_ptr_h);
+
+ return ret;
+}
diff --git a/arch/arm/cpu/armv8/s32v234/cpu.c b/arch/arm/cpu/armv8/s32v234/cpu.c
index dac12a2552..5c97e0eee4 100644
--- a/arch/arm/cpu/armv8/s32v234/cpu.c
+++ b/arch/arm/cpu/armv8/s32v234/cpu.c
@@ -32,24 +32,28 @@ u32 cpu_mask(void)
static struct mm_region s32v234_mem_map[] = {
{
- .base = S32V234_IRAM_BASE,
+ .virt = S32V234_IRAM_BASE,
+ .phys = S32V234_IRAM_BASE,
.size = S32V234_IRAM_SIZE,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
PTE_BLOCK_OUTER_SHARE
}, {
- .base = S32V234_DRAM_BASE1,
+ .virt = S32V234_DRAM_BASE1,
+ .phys = S32V234_DRAM_BASE1,
.size = S32V234_DRAM_SIZE1,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
PTE_BLOCK_OUTER_SHARE
}, {
- .base = S32V234_PERIPH_BASE,
+ .virt = S32V234_PERIPH_BASE,
+ .phys = S32V234_PERIPH_BASE,
.size = S32V234_PERIPH_SIZE,
.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
PTE_BLOCK_NON_SHARE
/* TODO: Do we need these? */
/* | PTE_BLOCK_PXN | PTE_BLOCK_UXN */
}, {
- .base = S32V234_DRAM_BASE2,
+ .virt = S32V234_DRAM_BASE2,
+ .phys = S32V234_DRAM_BASE2,
.size = S32V234_DRAM_SIZE2,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL_NC) |
PTE_BLOCK_OUTER_SHARE
diff --git a/arch/arm/cpu/armv8/sec_firmware.c b/arch/arm/cpu/armv8/sec_firmware.c
new file mode 100644
index 0000000000..e21e199381
--- /dev/null
+++ b/arch/arm/cpu/armv8/sec_firmware.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2016 NXP Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <errno.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/types.h>
+#include <asm/macro.h>
+#include <asm/armv8/sec_firmware.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+extern void c_runtime_cpu_setup(void);
+
+#define SEC_FIRMWARE_LOADED 0x1
+#define SEC_FIRMWARE_RUNNING 0x2
+#define SEC_FIRMWARE_ADDR_MASK (~0x3)
+ /*
+ * Secure firmware load addr
+ * Flags used: 0x1 secure firmware has been loaded to secure memory
+ * 0x2 secure firmware is running
+ */
+ phys_addr_t sec_firmware_addr;
+
+static int sec_firmware_get_data(const void *sec_firmware_img,
+ const void **data, size_t *size)
+{
+ int conf_node_off, fw_node_off;
+ char *conf_node_name = NULL;
+ char *desc;
+ int ret;
+
+ conf_node_name = SEC_FIRMEWARE_FIT_CNF_NAME;
+
+ conf_node_off = fit_conf_get_node(sec_firmware_img, conf_node_name);
+ if (conf_node_off < 0) {
+ printf("SEC Firmware: %s: no such config\n", conf_node_name);
+ return -ENOENT;
+ }
+
+ fw_node_off = fit_conf_get_prop_node(sec_firmware_img, conf_node_off,
+ SEC_FIRMWARE_FIT_IMAGE);
+ if (fw_node_off < 0) {
+ printf("SEC Firmware: No '%s' in config\n",
+ SEC_FIRMWARE_FIT_IMAGE);
+ return -ENOLINK;
+ }
+
+ /* Verify secure firmware image */
+ if (!(fit_image_verify(sec_firmware_img, fw_node_off))) {
+ printf("SEC Firmware: Bad firmware image (bad CRC)\n");
+ return -EINVAL;
+ }
+
+ if (fit_image_get_data(sec_firmware_img, fw_node_off, data, size)) {
+ printf("SEC Firmware: Can't get %s subimage data/size",
+ SEC_FIRMWARE_FIT_IMAGE);
+ return -ENOENT;
+ }
+
+ ret = fit_get_desc(sec_firmware_img, fw_node_off, &desc);
+ if (ret)
+ printf("SEC Firmware: Can't get description\n");
+ else
+ printf("%s\n", desc);
+
+ return ret;
+}
+
+/*
+ * SEC Firmware FIT image parser checks if the image is in FIT
+ * format, verifies integrity of the image and calculates raw
+ * image address and size values.
+ *
+ * Returns 0 on success and a negative errno on error task fail.
+ */
+static int sec_firmware_parse_image(const void *sec_firmware_img,
+ const void **raw_image_addr,
+ size_t *raw_image_size)
+{
+ int ret;
+
+ ret = sec_firmware_get_data(sec_firmware_img, raw_image_addr,
+ raw_image_size);
+ if (ret)
+ return ret;
+
+ debug("SEC Firmware: raw_image_addr = 0x%p, raw_image_size = 0x%lx\n",
+ *raw_image_addr, *raw_image_size);
+
+ return 0;
+}
+
+static int sec_firmware_copy_image(const char *title,
+ u64 image_addr, u32 image_size, u64 sec_firmware)
+{
+ debug("%s copied to address 0x%p\n", title, (void *)sec_firmware);
+ memcpy((void *)sec_firmware, (void *)image_addr, image_size);
+ flush_dcache_range(sec_firmware, sec_firmware + image_size);
+
+ return 0;
+}
+
+/*
+ * This function will parse the SEC Firmware image, and then load it
+ * to secure memory.
+ */
+static int sec_firmware_load_image(const void *sec_firmware_img)
+{
+ const void *raw_image_addr;
+ size_t raw_image_size = 0;
+ int ret;
+
+ /*
+ * The Excetpion Level must be EL3 to load and initialize
+ * the SEC Firmware.
+ */
+ if (current_el() != 3) {
+ ret = -EACCES;
+ goto out;
+ }
+
+#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
+ /*
+ * The SEC Firmware must be stored in secure memory.
+ * Append SEC Firmware to secure mmu table.
+ */
+ if (!(gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ sec_firmware_addr = (gd->arch.secure_ram & MEM_RESERVE_SECURE_ADDR_MASK) +
+ gd->arch.tlb_size;
+#else
+#error "The CONFIG_SYS_MEM_RESERVE_SECURE must be defined when enabled SEC Firmware support"
+#endif
+
+ /* Align SEC Firmware base address to 4K */
+ sec_firmware_addr = (sec_firmware_addr + 0xfff) & ~0xfff;
+ debug("SEC Firmware: Load address: 0x%llx\n",
+ sec_firmware_addr & SEC_FIRMWARE_ADDR_MASK);
+
+ ret = sec_firmware_parse_image(sec_firmware_img, &raw_image_addr,
+ &raw_image_size);
+ if (ret)
+ goto out;
+
+ /* TODO:
+ * Check if the end addr of SEC Firmware has been extend the secure
+ * memory.
+ */
+
+ /* Copy the secure firmware to secure memory */
+ ret = sec_firmware_copy_image("SEC Firmware", (u64)raw_image_addr,
+ raw_image_size, sec_firmware_addr &
+ SEC_FIRMWARE_ADDR_MASK);
+ if (ret)
+ goto out;
+
+ sec_firmware_addr |= SEC_FIRMWARE_LOADED;
+ debug("SEC Firmware: Entry point: 0x%llx\n",
+ sec_firmware_addr & SEC_FIRMWARE_ADDR_MASK);
+
+ return 0;
+
+out:
+ printf("SEC Firmware: error (%d)\n", ret);
+ sec_firmware_addr = 0;
+
+ return ret;
+}
+
+static int sec_firmware_entry(u32 *eret_hold_l, u32 *eret_hold_h)
+{
+ const void *entry = (void *)(sec_firmware_addr &
+ SEC_FIRMWARE_ADDR_MASK);
+
+ return _sec_firmware_entry(entry, eret_hold_l, eret_hold_h);
+}
+
+/* Check the secure firmware FIT image */
+__weak bool sec_firmware_is_valid(const void *sec_firmware_img)
+{
+ if (fdt_check_header(sec_firmware_img)) {
+ printf("SEC Firmware: Bad firmware image (not a FIT image)\n");
+ return false;
+ }
+
+ if (!fit_check_format(sec_firmware_img)) {
+ printf("SEC Firmware: Bad firmware image (bad FIT header)\n");
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef CONFIG_ARMV8_PSCI
+/*
+ * The PSCI_VERSION function is added from PSCI v0.2. When the PSCI
+ * v0.1 received this function, the NOT_SUPPORTED (0xffff_ffff) error
+ * number will be returned according to SMC Calling Conventions. But
+ * when getting the NOT_SUPPORTED error number, we cannot ensure if
+ * the PSCI version is v0.1 or other error occurred. So, PSCI v0.1
+ * won't be supported by this framework.
+ * And if the secure firmware isn't running, return NOT_SUPPORTED.
+ *
+ * The return value on success is PSCI version in format
+ * major[31:16]:minor[15:0].
+ */
+unsigned int sec_firmware_support_psci_version(void)
+{
+ if (sec_firmware_addr & SEC_FIRMWARE_RUNNING)
+ return _sec_firmware_support_psci_version();
+
+ return 0xffffffff;
+}
+#endif
+
+/*
+ * sec_firmware_init - Initialize the SEC Firmware
+ * @sec_firmware_img: the SEC Firmware image address
+ * @eret_hold_l: the address to hold exception return address low
+ * @eret_hold_h: the address to hold exception return address high
+ */
+int sec_firmware_init(const void *sec_firmware_img,
+ u32 *eret_hold_l,
+ u32 *eret_hold_h)
+{
+ int ret;
+
+ if (!sec_firmware_is_valid(sec_firmware_img))
+ return -EINVAL;
+
+ ret = sec_firmware_load_image(sec_firmware_img);
+ if (ret) {
+ printf("SEC Firmware: Failed to load image\n");
+ return ret;
+ } else if (sec_firmware_addr & SEC_FIRMWARE_LOADED) {
+ ret = sec_firmware_entry(eret_hold_l, eret_hold_h);
+ if (ret) {
+ printf("SEC Firmware: Failed to initialize\n");
+ return ret;
+ }
+ }
+
+ debug("SEC Firmware: Return from SEC Firmware: current_el = %d\n",
+ current_el());
+
+ /*
+ * The PE will be turned into target EL when returned from
+ * SEC Firmware.
+ */
+ if (current_el() != SEC_FIRMWARE_TARGET_EL)
+ return -EACCES;
+
+ sec_firmware_addr |= SEC_FIRMWARE_RUNNING;
+
+ /* Set exception table and enable caches if it isn't EL3 */
+ if (current_el() != 3) {
+ c_runtime_cpu_setup();
+ enable_caches();
+ }
+
+ return 0;
+}
diff --git a/arch/arm/cpu/armv8/sec_firmware_asm.S b/arch/arm/cpu/armv8/sec_firmware_asm.S
new file mode 100644
index 0000000000..0c6a46249a
--- /dev/null
+++ b/arch/arm/cpu/armv8/sec_firmware_asm.S
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 NXP Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <asm/macro.h>
+
+WEAK(_sec_firmware_entry)
+ /*
+ * x0: Secure Firmware entry point
+ * x1: Exception return address Low
+ * x2: Exception return address High
+ */
+
+ /* Save stack pointer for EL2 */
+ mov x3, sp
+ msr sp_el2, x3
+
+ /* Set exception return address hold pointer */
+ adr x4, 1f
+ mov x3, x4
+#ifdef SEC_FIRMWARE_ERET_ADDR_REVERT
+ rev w3, w3
+#endif
+ str w3, [x1]
+ lsr x3, x4, #32
+#ifdef SEC_FIRMWARE_ERET_ADDR_REVERT
+ rev w3, w3
+#endif
+ str w3, [x2]
+
+ /* Call SEC monitor */
+ br x0
+
+1:
+ mov x0, #0
+ ret
+ENDPROC(_sec_firmware_entry)
+
+#ifdef CONFIG_ARMV8_PSCI
+ENTRY(_sec_firmware_support_psci_version)
+ mov x0, 0x84000000
+ mov x1, 0x0
+ mov x2, 0x0
+ mov x3, 0x0
+ smc #0
+ ret
+ENDPROC(_sec_firmware_support_psci_version)
+#endif
diff --git a/arch/arm/cpu/armv8/zynqmp/cpu.c b/arch/arm/cpu/armv8/zynqmp/cpu.c
index 509f0aa387..b0f12955a1 100644
--- a/arch/arm/cpu/armv8/zynqmp/cpu.c
+++ b/arch/arm/cpu/armv8/zynqmp/cpu.c
@@ -18,40 +18,47 @@ DECLARE_GLOBAL_DATA_PTR;
static struct mm_region zynqmp_mem_map[] = {
{
- .base = 0x0UL,
+ .virt = 0x0UL,
+ .phys = 0x0UL,
.size = 0x80000000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
PTE_BLOCK_INNER_SHARE
}, {
- .base = 0x80000000UL,
+ .virt = 0x80000000UL,
+ .phys = 0x80000000UL,
.size = 0x70000000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
PTE_BLOCK_NON_SHARE |
PTE_BLOCK_PXN | PTE_BLOCK_UXN
}, {
- .base = 0xf8000000UL,
+ .virt = 0xf8000000UL,
+ .phys = 0xf8000000UL,
.size = 0x07e00000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
PTE_BLOCK_NON_SHARE |
PTE_BLOCK_PXN | PTE_BLOCK_UXN
}, {
- .base = 0xffe00000UL,
+ .virt = 0xffe00000UL,
+ .phys = 0xffe00000UL,
.size = 0x00200000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
PTE_BLOCK_INNER_SHARE
}, {
- .base = 0x400000000UL,
+ .virt = 0x400000000UL,
+ .phys = 0x400000000UL,
.size = 0x200000000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
PTE_BLOCK_NON_SHARE |
PTE_BLOCK_PXN | PTE_BLOCK_UXN
}, {
- .base = 0x600000000UL,
+ .virt = 0x600000000UL,
+ .phys = 0x600000000UL,
.size = 0x800000000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
PTE_BLOCK_INNER_SHARE
}, {
- .base = 0xe00000000UL,
+ .virt = 0xe00000000UL,
+ .phys = 0xe00000000UL,
.size = 0xf200000000UL,
.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
PTE_BLOCK_NON_SHARE |