summaryrefslogtreecommitdiff
path: root/lib/xlat_tables_v2
diff options
context:
space:
mode:
authorAntonio Nino Diaz <antonio.ninodiaz@arm.com>2018-07-03 11:58:49 +0100
committerAntonio Nino Diaz <antonio.ninodiaz@arm.com>2018-07-03 13:41:07 +0100
commitfd2299e6b0dfdf1214e09ba742de3d3f9eadbe0c (patch)
tree0c119d3a290be37aa816b45d47fc00493a86cf16 /lib/xlat_tables_v2
parent6c5e196be37b4afe3403f349d5da1747e12460ac (diff)
xlat v2: Split code into separate files
Instead of having one big file with all the code, it's better to have a few smaller files that are more manageable: - xlat_tables_core.c: Code related to the core functionality of the library (map and unmap regions, initialize xlat context). - xlat_tables_context.c: Instantiation of the active image context as well as APIs to manipulate it. - xlat_tables_utils.c: Helper code that isn't part of the core functionality (change attributes, debug print messages). Change-Id: I3ea956fc1afd7473c0bb5e7c6aab3b2e5d88c711 Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Diffstat (limited to 'lib/xlat_tables_v2')
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c2
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h2
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c5
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h2
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk4
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c117
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c (renamed from lib/xlat_tables_v2/xlat_tables_internal.c)681
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h10
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c592
9 files changed, 730 insertions, 685 deletions
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 94dcf578..6e971925 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -61,7 +61,7 @@ void xlat_arch_tlbi_va(uintptr_t va)
tlbimvaais(TLBI_ADDR(va));
}
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
{
/*
* Ensure the translation table write has drained into memory before
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
index 509395d8..9b41f4df 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
@@ -14,7 +14,7 @@
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
*/
-static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused)
+static inline uint64_t xlat_arch_regime_get_xn_desc(int regime __unused)
{
return UPPER_ATTRS(XN);
}
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 71b9c8fa..4bbbe544 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -7,11 +7,8 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
-#include <bl_common.h>
#include <cassert.h>
-#include <common_def.h>
#include <sys/types.h>
-#include <utils.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
@@ -128,7 +125,7 @@ void xlat_arch_tlbi_va(uintptr_t va)
#endif
}
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
index d201590a..39b0a653 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
@@ -15,7 +15,7 @@
* Return the execute-never mask that will prevent instruction fetch at all ELs
* that are part of the given translation regime.
*/
-static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime)
+static inline uint64_t xlat_arch_regime_get_xn_desc(int regime)
{
if (regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index 1e70f37f..b25c805c 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -7,6 +7,8 @@
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
${ARCH}/enable_mmu.S \
${ARCH}/xlat_tables_arch.c \
- xlat_tables_internal.c)
+ xlat_tables_context.c \
+ xlat_tables_core.c \
+ xlat_tables_utils.c)
INCLUDES += -Ilib/xlat_tables_v2/${ARCH}
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
new file mode 100644
index 00000000..0964b49b
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+/*
+ * Each platform can define the size of its physical and virtual address spaces.
+ * If the platform hasn't defined one or both of them, default to
+ * ADDR_SPACE_SIZE. The latter is deprecated, though.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+#endif
+
+/*
+ * Allocate and initialise the default translation context for the BL image
+ * currently executing.
+ */
+REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+ unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ mmap_add_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+void mmap_add(const mmap_region_t *mm)
+{
+ mmap_add_ctx(&tf_xlat_ctx, mm);
+}
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr)
+{
+ mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
+
+ return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
+}
+
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
+{
+ return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
+ base_va, size);
+}
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+void init_xlat_tables(void)
+{
+ init_xlat_tables_ctx(&tf_xlat_ctx);
+}
+
+/*
+ * If dynamic allocation of new regions is disabled then by the time we call the
+ * function enabling the MMU, we'll have registered all the memory regions to
+ * map for the system's lifetime. Therefore, at this point we know the maximum
+ * physical address that will ever be mapped.
+ *
+ * If dynamic allocation is enabled then we can't make any such assumption
+ * because the maximum physical address could get pushed while adding a new
+ * region. Therefore, in this case we have to assume that the whole address
+ * space size might be mapped.
+ */
+#ifdef PLAT_XLAT_TABLES_DYNAMIC
+#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
+#else
+#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
+#endif
+
+#ifdef AARCH32
+
+void enable_mmu_secure(unsigned int flags)
+{
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address);
+ enable_mmu_direct(flags);
+}
+
+#else
+
+void enable_mmu_el1(unsigned int flags)
+{
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el1(flags);
+}
+
+void enable_mmu_el3(unsigned int flags)
+{
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el3(flags);
+}
+
+#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 7f1d3958..f555524a 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -4,47 +4,20 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
-#include <common_def.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
-#include <utils.h>
+#include <utils_def.h>
#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
#include "xlat_tables_private.h"
-/*
- * Each platform can define the size of its physical and virtual address spaces.
- * If the platform hasn't defined one or both of them, default to
- * ADDR_SPACE_SIZE. The latter is deprecated, though.
- */
-#if ERROR_DEPRECATED
-# ifdef ADDR_SPACE_SIZE
-# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
-# endif
-#elif defined(ADDR_SPACE_SIZE)
-# ifndef PLAT_PHY_ADDR_SPACE_SIZE
-# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
-# endif
-# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
-# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
-# endif
-#endif
-
-/*
- * Allocate and initialise the default translation context for the BL image
- * currently executing.
- */
-REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
- PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
-
#if PLAT_XLAT_TABLES_DYNAMIC
/*
@@ -94,7 +67,7 @@ static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
}
-/* Returns 0 if the speficied table isn't empty, otherwise 1. */
+/* Returns 0 if the specified table isn't empty, otherwise 1. */
static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
{
return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
@@ -115,8 +88,8 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
/*
* Returns a block/page table descriptor for the given level and attributes.
*/
-static uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level)
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, int level)
{
uint64_t desc;
int mem_type;
@@ -509,7 +482,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
/*
* Recursive function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
- * succesfully mapped. On error, it returns the VA of the next entry that
+ * successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -609,23 +582,6 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
return table_idx_va - 1;
}
-void print_mmap(mmap_region_t *const mmap)
-{
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
- tf_printf("mmap:\n");
- mmap_region_t *mm = mmap;
-
- while (mm->size) {
- tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x",
- (void *)mm->base_va, mm->base_pa,
- mm->size, mm->attr);
- tf_printf(" granularity:0x%zx\n", mm->granularity);
- ++mm;
- };
- tf_printf("\n");
-#endif
-}
-
/*
* Function that verifies that a region can be mapped.
* Returns:
@@ -823,14 +779,6 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
ctx->max_va = end_va;
}
-void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
- unsigned int attr)
-{
- mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
- mmap_add_region_ctx(&tf_xlat_ctx, &mm);
-}
-
-
void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
while (mm->size) {
@@ -839,11 +787,6 @@ void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
}
}
-void mmap_add(const mmap_region_t *mm)
-{
- mmap_add_ctx(&tf_xlat_ctx, mm);
-}
-
#if PLAT_XLAT_TABLES_DYNAMIC
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
@@ -945,13 +888,6 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
return 0;
}
-int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
- size_t size, unsigned int attr)
-{
- mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
- return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
-}
-
/*
* Removes the region with given base Virtual Address and size from the given
* context.
@@ -1027,219 +963,8 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
return 0;
}
-int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
-{
- return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
- base_va, size);
-}
-
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
-
-/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
-{
- int mem_type_index = ATTR_INDEX_GET(desc);
- xlat_regime_t xlat_regime = ctx->xlat_regime;
-
- if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
- tf_printf("MEM");
- } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
- tf_printf("NC");
- } else {
- assert(mem_type_index == ATTR_DEVICE_INDEX);
- tf_printf("DEV");
- }
-
- const char *priv_str = "(PRIV)";
- const char *user_str = "(USER)";
-
- /*
- * Showing Privileged vs Unprivileged only makes sense for EL1&0
- * mappings
- */
- const char *ro_str = "-RO";
- const char *rw_str = "-RW";
- const char *no_access_str = "-NOACCESS";
-
- if (xlat_regime == EL3_REGIME) {
- /* For EL3, the AP[2] bit is all what matters */
- tf_printf("%s", (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
- } else {
- const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
- tf_printf("%s", ap_str);
- tf_printf("%s", priv_str);
- /*
- * EL0 can only have the same permissions as EL1 or no
- * permissions at all.
- */
- tf_printf("%s",
- (desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
- ? ap_str : no_access_str);
- tf_printf("%s", user_str);
- }
-
- const char *xn_str = "-XN";
- const char *exec_str = "-EXEC";
-
- if (xlat_regime == EL3_REGIME) {
- /* For EL3, the XN bit is all what matters */
- tf_printf("%s", (UPPER_ATTRS(XN) & desc) ? xn_str : exec_str);
- } else {
- /* For EL0 and EL1, we need to know who has which rights */
- tf_printf("%s", (UPPER_ATTRS(PXN) & desc) ? xn_str : exec_str);
- tf_printf("%s", priv_str);
-
- tf_printf("%s", (UPPER_ATTRS(UXN) & desc) ? xn_str : exec_str);
- tf_printf("%s", user_str);
- }
-
- tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
-}
-
-static const char * const level_spacers[] = {
- "[LV0] ",
- " [LV1] ",
- " [LV2] ",
- " [LV3] "
-};
-
-static const char *invalid_descriptors_ommited =
- "%s(%d invalid descriptors omitted)\n";
-
-/*
- * Recursive function that reads the translation tables passed as an argument
- * and prints their status.
- */
-static void xlat_tables_print_internal(xlat_ctx_t *ctx,
- const uintptr_t table_base_va,
- uint64_t *const table_base, const int table_entries,
- const unsigned int level)
-{
- assert(level <= XLAT_TABLE_LEVEL_MAX);
-
- uint64_t desc;
- uintptr_t table_idx_va = table_base_va;
- int table_idx = 0;
-
- size_t level_size = XLAT_BLOCK_SIZE(level);
-
- /*
- * Keep track of how many invalid descriptors are counted in a row.
- * Whenever multiple invalid descriptors are found, only the first one
- * is printed, and a line is added to inform about how many descriptors
- * have been omitted.
- */
- int invalid_row_count = 0;
-
- while (table_idx < table_entries) {
-
- desc = table_base[table_idx];
-
- if ((desc & DESC_MASK) == INVALID_DESC) {
-
- if (invalid_row_count == 0) {
- tf_printf("%sVA:%p size:0x%zx\n",
- level_spacers[level],
- (void *)table_idx_va, level_size);
- }
- invalid_row_count++;
-
- } else {
-
- if (invalid_row_count > 1) {
- tf_printf(invalid_descriptors_ommited,
- level_spacers[level],
- invalid_row_count - 1);
- }
- invalid_row_count = 0;
-
- /*
- * Check if this is a table or a block. Tables are only
- * allowed in levels other than 3, but DESC_PAGE has the
- * same value as DESC_TABLE, so we need to check.
- */
- if (((desc & DESC_MASK) == TABLE_DESC) &&
- (level < XLAT_TABLE_LEVEL_MAX)) {
- /*
- * Do not print any PA for a table descriptor,
- * as it doesn't directly map physical memory
- * but instead points to the next translation
- * table in the translation table walk.
- */
- tf_printf("%sVA:%p size:0x%zx\n",
- level_spacers[level],
- (void *)table_idx_va, level_size);
-
- uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
-
- xlat_tables_print_internal(ctx, table_idx_va,
- (uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level + 1);
- } else {
- tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
- level_spacers[level],
- (void *)table_idx_va,
- (unsigned long long)(desc & TABLE_ADDR_MASK),
- level_size);
- xlat_desc_print(ctx, desc);
- tf_printf("\n");
- }
- }
-
- table_idx++;
- table_idx_va += level_size;
- }
-
- if (invalid_row_count > 1) {
- tf_printf(invalid_descriptors_ommited,
- level_spacers[level], invalid_row_count - 1);
- }
-}
-
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-
-void xlat_tables_print(xlat_ctx_t *ctx)
-{
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
- const char *xlat_regime_str;
- if (ctx->xlat_regime == EL1_EL0_REGIME) {
- xlat_regime_str = "1&0";
- } else {
- assert(ctx->xlat_regime == EL3_REGIME);
- xlat_regime_str = "3";
- }
- VERBOSE("Translation tables state:\n");
- VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
- VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
- VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
- VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
- VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
-
- VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
- VERBOSE(" Entries @initial lookup level: %i\n",
- ctx->base_table_entries);
-
- int used_page_tables;
-#if PLAT_XLAT_TABLES_DYNAMIC
- used_page_tables = 0;
- for (unsigned int i = 0; i < ctx->tables_num; ++i) {
- if (ctx->tables_mapped_regions[i] != 0)
- ++used_page_tables;
- }
-#else
- used_page_tables = ctx->next_table;
-#endif
- VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
- used_page_tables, ctx->tables_num,
- ctx->tables_num - used_page_tables);
-
- xlat_tables_print_internal(ctx, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-}
-
void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
assert(ctx != NULL);
@@ -1249,7 +974,7 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
mmap_region_t *mm = ctx->mmap;
- print_mmap(mm);
+ xlat_mmap_print(mm);
/* All tables must be zeroed before mapping any region. */
@@ -1286,397 +1011,3 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
xlat_tables_print(ctx);
}
-
-void init_xlat_tables(void)
-{
- init_xlat_tables_ctx(&tf_xlat_ctx);
-}
-
-/*
- * If dynamic allocation of new regions is disabled then by the time we call the
- * function enabling the MMU, we'll have registered all the memory regions to
- * map for the system's lifetime. Therefore, at this point we know the maximum
- * physical address that will ever be mapped.
- *
- * If dynamic allocation is enabled then we can't make any such assumption
- * because the maximum physical address could get pushed while adding a new
- * region. Therefore, in this case we have to assume that the whole address
- * space size might be mapped.
- */
-#ifdef PLAT_XLAT_TABLES_DYNAMIC
-#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
-#else
-#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
-#endif
-
-#ifdef AARCH32
-
-void enable_mmu_secure(unsigned int flags)
-{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
- enable_mmu_direct(flags);
-}
-
-#else
-
-void enable_mmu_el1(unsigned int flags)
-{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
- enable_mmu_direct_el1(flags);
-}
-
-void enable_mmu_el3(unsigned int flags)
-{
- setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
- tf_xlat_ctx.va_max_address);
- enable_mmu_direct_el3(flags);
-}
-
-#endif /* AARCH32 */
-
-/*
- * Do a translation table walk to find the block or page descriptor that maps
- * virtual_addr.
- *
- * On success, return the address of the descriptor within the translation
- * table. Its lookup level is stored in '*out_level'.
- * On error, return NULL.
- *
- * xlat_table_base
- * Base address for the initial lookup level.
- * xlat_table_base_entries
- * Number of entries in the translation table for the initial lookup level.
- * virt_addr_space_size
- * Size in bytes of the virtual address space.
- */
-static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
- void *xlat_table_base,
- int xlat_table_base_entries,
- unsigned long long virt_addr_space_size,
- int *out_level)
-{
- unsigned int start_level;
- uint64_t *table;
- int entries;
-
- VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
-
- start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
- VERBOSE("Starting translation table walk from level %i\n", start_level);
-
- table = xlat_table_base;
- entries = xlat_table_base_entries;
-
- for (unsigned int level = start_level;
- level <= XLAT_TABLE_LEVEL_MAX;
- ++level) {
- int idx;
- uint64_t desc;
- uint64_t desc_type;
-
- VERBOSE("Table address: %p\n", (void *)table);
-
- idx = XLAT_TABLE_IDX(virtual_addr, level);
- VERBOSE("Index into level %i table: %i\n", level, idx);
- if (idx >= entries) {
- VERBOSE("Invalid address\n");
- return NULL;
- }
-
- desc = table[idx];
- desc_type = desc & DESC_MASK;
- VERBOSE("Descriptor at level %i: 0x%llx\n", level,
- (unsigned long long)desc);
-
- if (desc_type == INVALID_DESC) {
- VERBOSE("Invalid entry (memory not mapped)\n");
- return NULL;
- }
-
- if (level == XLAT_TABLE_LEVEL_MAX) {
- /*
- * There can't be table entries at the final lookup
- * level.
- */
- assert(desc_type == PAGE_DESC);
- VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
- (unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
- *out_level = level;
- return &table[idx];
- }
-
- if (desc_type == BLOCK_DESC) {
- VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
- (unsigned long long)XLAT_BLOCK_SIZE(level));
- *out_level = level;
- return &table[idx];
- }
-
- assert(desc_type == TABLE_DESC);
- VERBOSE("Table descriptor, continuing xlat table walk...\n");
- table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- entries = XLAT_TABLE_ENTRIES;
- }
-
- /*
- * This shouldn't be reached, the translation table walk should end at
- * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
- */
- assert(0);
-
- return NULL;
-}
-
-
-static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes, uint64_t **table_entry,
- unsigned long long *addr_pa, int *table_level)
-{
- uint64_t *entry;
- uint64_t desc;
- int level;
- unsigned long long virt_addr_space_size;
-
- /*
- * Sanity-check arguments.
- */
- assert(ctx != NULL);
- assert(ctx->initialized);
- assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
-
- virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
-
- entry = find_xlat_table_entry(base_va,
- ctx->base_table,
- ctx->base_table_entries,
- virt_addr_space_size,
- &level);
- if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
- return -EINVAL;
- }
-
- if (addr_pa != NULL) {
- *addr_pa = *entry & TABLE_ADDR_MASK;
- }
-
- if (table_entry != NULL) {
- *table_entry = entry;
- }
-
- if (table_level != NULL) {
- *table_level = level;
- }
-
- desc = *entry;
-
-#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
- VERBOSE("Attributes: ");
- xlat_desc_print(ctx, desc);
- tf_printf("\n");
-#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-
- assert(attributes != NULL);
- *attributes = 0;
-
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
-
- if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
- *attributes |= MT_MEMORY;
- } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
- *attributes |= MT_NON_CACHEABLE;
- } else {
- assert(attr_index == ATTR_DEVICE_INDEX);
- *attributes |= MT_DEVICE;
- }
-
- int ap2_bit = (desc >> AP2_SHIFT) & 1;
-
- if (ap2_bit == AP2_RW)
- *attributes |= MT_RW;
-
- if (ctx->xlat_regime == EL1_EL0_REGIME) {
- int ap1_bit = (desc >> AP1_SHIFT) & 1;
- if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
- *attributes |= MT_USER;
- }
-
- int ns_bit = (desc >> NS_SHIFT) & 1;
-
- if (ns_bit == 1)
- *attributes |= MT_NS;
-
- uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
-
- if ((desc & xn_mask) == xn_mask) {
- *attributes |= MT_EXECUTE_NEVER;
- } else {
- assert((desc & xn_mask) == 0);
- }
-
- return 0;
-}
-
-
-int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes)
-{
- return get_mem_attributes_internal(ctx, base_va, attributes,
- NULL, NULL, NULL);
-}
-
-
-int change_mem_attributes(xlat_ctx_t *ctx,
- uintptr_t base_va,
- size_t size,
- uint32_t attr)
-{
- /* Note: This implementation isn't optimized. */
-
- assert(ctx != NULL);
- assert(ctx->initialized);
-
- unsigned long long virt_addr_space_size =
- (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
-
- if (!IS_PAGE_ALIGNED(base_va)) {
- WARN("%s: Address %p is not aligned on a page boundary.\n",
- __func__, (void *)base_va);
- return -EINVAL;
- }
-
- if (size == 0) {
- WARN("%s: Size is 0.\n", __func__);
- return -EINVAL;
- }
-
- if ((size % PAGE_SIZE) != 0) {
- WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
- __func__, size);
- return -EINVAL;
- }
-
- if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
- WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
- __func__);
- return -EINVAL;
- }
-
- int pages_count = size / PAGE_SIZE;
-
- VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
- pages_count, (void *)base_va);
-
- uintptr_t base_va_original = base_va;
-
- /*
- * Sanity checks.
- */
- for (int i = 0; i < pages_count; ++i) {
- uint64_t *entry;
- uint64_t desc;
- int level;
-
- entry = find_xlat_table_entry(base_va,
- ctx->base_table,
- ctx->base_table_entries,
- virt_addr_space_size,
- &level);
- if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
- return -EINVAL;
- }
-
- desc = *entry;
-
- /*
- * Check that all the required pages are mapped at page
- * granularity.
- */
- if (((desc & DESC_MASK) != PAGE_DESC) ||
- (level != XLAT_TABLE_LEVEL_MAX)) {
- WARN("Address %p is not mapped at the right granularity.\n",
- (void *)base_va);
- WARN("Granularity is 0x%llx, should be 0x%x.\n",
- (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
- return -EINVAL;
- }
-
- /*
- * If the region type is device, it shouldn't be executable.
- */
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
- if (attr_index == ATTR_DEVICE_INDEX) {
- if ((attr & MT_EXECUTE_NEVER) == 0) {
- WARN("Setting device memory as executable at address %p.",
- (void *)base_va);
- return -EINVAL;
- }
- }
-
- base_va += PAGE_SIZE;
- }
-
- /* Restore original value. */
- base_va = base_va_original;
-
- VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
- __func__);
-
- for (int i = 0; i < pages_count; ++i) {
-
- uint32_t old_attr, new_attr;
- uint64_t *entry;
- int level;
- unsigned long long addr_pa;
-
- get_mem_attributes_internal(ctx, base_va, &old_attr,
- &entry, &addr_pa, &level);
-
- VERBOSE("Old attributes: 0x%x\n", old_attr);
-
- /*
- * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
- * MT_USER/MT_PRIVILEGED are taken into account. Any other
- * information is ignored.
- */
-
- /* Clean the old attributes so that they can be rebuilt. */
- new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
-
- /*
- * Update attributes, but filter out the ones this function
- * isn't allowed to change.
- */
- new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
-
- VERBOSE("New attributes: 0x%x\n", new_attr);
-
- /*
- * The break-before-make sequence requires writing an invalid
- * descriptor and making sure that the system sees the change
- * before writing the new descriptor.
- */
- *entry = INVALID_DESC;
-
- /* Invalidate any cached copy of this mapping in the TLBs. */
- xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
-
- /* Ensure completion of the invalidation. */
- xlat_arch_tlbi_va_sync();
-
- /* Write new descriptor */
- *entry = xlat_desc(ctx, new_attr, addr_pa, level);
-
- base_va += PAGE_SIZE;
- }
-
- /* Ensure that the last descriptor writen is seen by the system. */
- dsbish();
-
- return 0;
-}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 777189fb..4a54ec5d 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -50,7 +50,7 @@
* S-EL1.
*/
void xlat_arch_tlbi_va(uintptr_t va);
-void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
+void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
/*
* This function has to be called at the end of any code that uses the function
@@ -59,7 +59,7 @@ void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
void xlat_arch_tlbi_va_sync(void);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
-void print_mmap(mmap_region_t *const mmap);
+void xlat_mmap_print(mmap_region_t *const mmap);
/*
* Print the current state of the translation tables by reading them from
@@ -68,6 +68,12 @@ void print_mmap(mmap_region_t *const mmap);
void xlat_tables_print(xlat_ctx_t *ctx);
/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
+ unsigned long long addr_pa, int level);
+
+/*
* Architecture-specific initialization code.
*/
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
new file mode 100644
index 00000000..3df43741
--- /dev/null
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform_def.h>
+#include <types.h>
+#include <utils_def.h>
+#include <xlat_tables_arch_private.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+#include "xlat_tables_private.h"
+
+#if LOG_LEVEL < LOG_LEVEL_VERBOSE
+
+void xlat_mmap_print(__unused mmap_region_t *const mmap)
+{
+ /* Empty */
+}
+
+void xlat_tables_print(__unused xlat_ctx_t *ctx)
+{
+ /* Empty */
+}
+
+#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+void xlat_mmap_print(mmap_region_t *const mmap)
+{
+ tf_printf("mmap:\n");
+ const mmap_region_t *mm = mmap;
+
+ while (mm->size != 0U) {
+ tf_printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x "
+ "granularity:0x%zx\n", mm->base_va, mm->base_pa,
+ mm->size, mm->attr, mm->granularity);
+ ++mm;
+ };
+ tf_printf("\n");
+}
+
+/* Print the attributes of the specified block descriptor. */
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
+{
+ int mem_type_index = ATTR_INDEX_GET(desc);
+ int xlat_regime = ctx->xlat_regime;
+
+ if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ tf_printf("MEM");
+ } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
+ tf_printf("NC");
+ } else {
+ assert(mem_type_index == ATTR_DEVICE_INDEX);
+ tf_printf("DEV");
+ }
+
+ const char *priv_str = "(PRIV)";
+ const char *user_str = "(USER)";
+
+ /*
+ * Showing Privileged vs Unprivileged only makes sense for EL1&0
+ * mappings
+ */
+ const char *ro_str = "-RO";
+ const char *rw_str = "-RW";
+ const char *no_access_str = "-NOACCESS";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the AP[2] bit is all what matters */
+ tf_printf("%s", (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
+ } else {
+ const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
+ tf_printf("%s", ap_str);
+ tf_printf("%s", priv_str);
+ /*
+ * EL0 can only have the same permissions as EL1 or no
+ * permissions at all.
+ */
+ tf_printf("%s",
+ (desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ ? ap_str : no_access_str);
+ tf_printf("%s", user_str);
+ }
+
+ const char *xn_str = "-XN";
+ const char *exec_str = "-EXEC";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the XN bit is all what matters */
+ tf_printf("%s", (UPPER_ATTRS(XN) & desc) ? xn_str : exec_str);
+ } else {
+ /* For EL0 and EL1, we need to know who has which rights */
+ tf_printf("%s", (UPPER_ATTRS(PXN) & desc) ? xn_str : exec_str);
+ tf_printf("%s", priv_str);
+
+ tf_printf("%s", (UPPER_ATTRS(UXN) & desc) ? xn_str : exec_str);
+ tf_printf("%s", user_str);
+ }
+
+ tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+}
+
+static const char * const level_spacers[] = {
+ "[LV0] ",
+ " [LV1] ",
+ " [LV2] ",
+ " [LV3] "
+};
+
+static const char *invalid_descriptors_ommited =
+ "%s(%d invalid descriptors omitted)\n";
+
+/*
+ * Recursive function that reads the translation tables passed as an argument
+ * and prints their status.
+ */
+static void xlat_tables_print_internal(xlat_ctx_t *ctx,
+ const uintptr_t table_base_va,
+ uint64_t *const table_base, const int table_entries,
+ const unsigned int level)
+{
+ assert(level <= XLAT_TABLE_LEVEL_MAX);
+
+ uint64_t desc;
+ uintptr_t table_idx_va = table_base_va;
+ int table_idx = 0;
+
+ size_t level_size = XLAT_BLOCK_SIZE(level);
+
+ /*
+ * Keep track of how many invalid descriptors are counted in a row.
+ * Whenever multiple invalid descriptors are found, only the first one
+ * is printed, and a line is added to inform about how many descriptors
+ * have been omitted.
+ */
+ int invalid_row_count = 0;
+
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ tf_printf("%sVA:%p size:0x%zx\n",
+ level_spacers[level],
+ (void *)table_idx_va, level_size);
+ }
+ invalid_row_count++;
+
+ } else {
+
+ if (invalid_row_count > 1) {
+ tf_printf(invalid_descriptors_ommited,
+ level_spacers[level],
+ invalid_row_count - 1);
+ }
+ invalid_row_count = 0;
+
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
+ /*
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
+ */
+ tf_printf("%sVA:%p size:0x%zx\n",
+ level_spacers[level],
+ (void *)table_idx_va, level_size);
+
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
+
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1);
+ } else {
+ tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+ level_spacers[level],
+ (void *)table_idx_va,
+ (unsigned long long)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ tf_printf("\n");
+ }
+ }
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ tf_printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
+ }
+}
+
+void xlat_tables_print(xlat_ctx_t *ctx)
+{
+ const char *xlat_regime_str;
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
+ VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
+ VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
+ VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
+ VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
+ VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
+
+ VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %i\n",
+ ctx->base_table_entries);
+
+ int used_page_tables;
+#if PLAT_XLAT_TABLES_DYNAMIC
+ used_page_tables = 0;
+ for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+ if (ctx->tables_mapped_regions[i] != 0)
+ ++used_page_tables;
+ }
+#else
+ used_page_tables = ctx->next_table;
+#endif
+ VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
+ used_page_tables, ctx->tables_num,
+ ctx->tables_num - used_page_tables);
+
+ xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
+}
+
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ int entries;
+
+ VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+ VERBOSE("Starting translation table walk from level %i\n", start_level);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ int idx;
+ uint64_t desc;
+ uint64_t desc_type;
+
+ VERBOSE("Table address: %p\n", (void *)table);
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ VERBOSE("Index into level %i table: %i\n", level, idx);
+ if (idx >= entries) {
+ VERBOSE("Invalid address\n");
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+ VERBOSE("Descriptor at level %i: 0x%llx\n", level,
+ (unsigned long long)desc);
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * There can't be table entries at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ VERBOSE("Table descriptor, continuing xlat table walk...\n");
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(0);
+
+ return NULL;
+}
+
+
+static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ tf_printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0;
+
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ int ap2_bit = (desc >> AP2_SHIFT) & 1;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ int ns_bit = (desc >> NS_SHIFT) & 1;
+
+ if (ns_bit == 1)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0);
+ }
+
+ return 0;
+}
+
+
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attributes)
+{
+ return get_mem_attributes_internal(ctx, base_va, attributes,
+ NULL, NULL, NULL);
+}
+
+
+int change_mem_attributes(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size,
+ uint32_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address %p is not aligned on a page boundary.\n",
+ __func__, (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ int pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
+ pages_count, (void *)base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (int i = 0; i < pages_count; ++i) {
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address %p is not mapped at the right granularity.\n",
+ (void *)base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0) {
+ WARN("Setting device memory as executable at address %p.",
+ (void *)base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
+ __func__);
+
+ for (int i = 0; i < pages_count; ++i) {
+
+ uint32_t old_attr, new_attr;
+ uint64_t *entry;
+ int level;
+ unsigned long long addr_pa;
+
+ get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ VERBOSE("Old attributes: 0x%x\n", old_attr);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ VERBOSE("New attributes: 0x%x\n", new_attr);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}