summaryrefslogtreecommitdiff
path: root/lib/xlat_tables_v2
diff options
context:
space:
mode:
authorDavid Pu <dpu@nvidia.com>2019-02-22 02:31:40 -0800
committerDavid Pu <dpu@nvidia.com>2019-03-05 09:21:36 -0800
commit0ffe269215bdbfc76621f187f0fa4576f37e9791 (patch)
treec6526ac192bee3006d96b4d5ebf00d2caf672b35 /lib/xlat_tables_v2
parentaf3816789db46f69fca900e0fb8d086602219bb9 (diff)
xlat_tables_v2: map region without recursion.
This patch uses an array on stack to save parent xlat table information when traversing the xlat tables. It keeps exactly same xlat table traversal order compared to recursive version. fixes arm-software/tf-issues#664 Signed-off-by: David Pu <dpu@nvidia.com>
Diffstat (limited to 'lib/xlat_tables_v2')
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c176
1 files changed, 120 insertions, 56 deletions
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 0e6a6fa8..e0b24474 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -537,105 +537,169 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
}
/*
- * Recursive function that writes to the translation tables and maps the
+ * Function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- uintptr_t table_base_va,
+ const uintptr_t table_base_va,
uint64_t *const table_base,
unsigned int table_entries,
unsigned int level)
{
+
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
+ /*
+ * data structure to track DESC_TABLE entry before iterate into subtable
+ * of next translation level. it will be used to restore previous level
+ * after finish subtable iteration.
+ */
+ struct desc_table_map {
+ uint64_t *table_base;
+ uintptr_t table_idx_va;
+ unsigned int idx;
+ } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
+ {NULL, 0U, XLAT_TABLE_ENTRIES}, };
+
+ unsigned int this_level = level;
+ uint64_t *this_base = table_base;
+ unsigned int max_entries = table_entries;
+ size_t level_size = XLAT_BLOCK_SIZE(this_level);
uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
- unsigned long long table_idx_pa;
-
- uint64_t *subtable;
- uint64_t desc;
-
unsigned int table_idx;
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
-#if PLAT_XLAT_TABLES_DYNAMIC
- if (level > ctx->base_level)
- xlat_table_inc_regions_count(ctx, table_base);
+ while (this_base != NULL) {
+
+ uint64_t desc;
+ uint64_t desc_type;
+ unsigned long long table_idx_pa;
+ action_t action;
+
+ /* finish current xlat level iteration. */
+ if (table_idx >= max_entries) {
+ if (this_level <= level) {
+ this_base = NULL;
+ break;
+ } else {
+
+ /* back from subtable iteration, restore
+ * previous DESC_TABLE entry.
+ */
+ this_level--;
+ level_size = XLAT_BLOCK_SIZE(this_level);
+ this_base = desc_tables[this_level].table_base;
+ table_idx = desc_tables[this_level].idx;
+ if (this_level == level) {
+ max_entries = table_entries;
+ } else {
+ max_entries = XLAT_TABLE_ENTRIES;
+ }
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ uintptr_t subtable;
+ desc = this_base[table_idx];
+ subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
+ xlat_clean_dcache_range(subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
- while (table_idx < table_entries) {
+ table_idx++;
+ table_idx_va =
+ desc_tables[this_level].table_idx_va +
+ level_size;
+ }
+ }
- desc = table_base[table_idx];
+ desc = this_base[table_idx];
+ desc_type = desc & DESC_MASK;
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
- action_t action = xlat_tables_map_region_action(mm,
- (uint32_t)(desc & DESC_MASK), table_idx_pa,
- table_idx_va, level);
-
- if (action == ACTION_WRITE_BLOCK_ENTRY) {
+ /* If reached the end of the region, simply exit since we
+ * already write all BLOCK entries and create all required
+ * subtables.
+ */
+ if (mm_end_va <= table_idx_va) {
+ this_base = NULL;
+ break;
+ }
- table_base[table_idx] =
- xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
- level);
+ action = xlat_tables_map_region_action(mm, desc_type,
+ table_idx_pa, table_idx_va, this_level);
+ if (action == ACTION_WRITE_BLOCK_ENTRY) {
+ this_base[table_idx] = xlat_desc(ctx, mm->attr,
+ table_idx_pa, this_level);
+ table_idx++;
+ table_idx_va += level_size;
} else if (action == ACTION_CREATE_NEW_TABLE) {
- uintptr_t end_va;
- subtable = xlat_table_get_empty(ctx);
+ uintptr_t base_va;
+
+ uint64_t *subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
- /* Not enough free tables to map this region */
+ /* Not enough free tables to map this region. */
return table_idx_va;
}
/* Point to new subtable from this one. */
- table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+ this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+
+ desc_tables[this_level].table_base = this_base;
+ desc_tables[this_level].table_idx_va = table_idx_va;
+ desc_tables[this_level].idx = table_idx;
+ base_va = table_idx_va;
+
+ this_level++;
+ this_base = subtable;
+ level_size = XLAT_BLOCK_SIZE(this_level);
+ table_idx_va = xlat_tables_find_start_va(mm, base_va,
+ this_level);
+ table_idx = xlat_tables_va_to_index(base_va,
+ table_idx_va, this_level);
+ max_entries = XLAT_TABLE_ENTRIES;
- /* Recurse to write into subtable */
- end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
- subtable, XLAT_TABLE_ENTRIES,
- level + 1U);
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
- xlat_clean_dcache_range((uintptr_t)subtable,
- XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (this_level > ctx->base_level) {
+ xlat_table_inc_regions_count(ctx, subtable);
+ }
#endif
- if (end_va !=
- (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
- return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
- uintptr_t end_va;
- subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- /* Recurse to write into subtable */
- end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
- subtable, XLAT_TABLE_ENTRIES,
- level + 1U);
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
- xlat_clean_dcache_range((uintptr_t)subtable,
- XLAT_TABLE_ENTRIES * sizeof(uint64_t));
-#endif
- if (end_va !=
- (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
- return end_va;
+ uintptr_t base_va;
+ uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- } else {
+ desc_tables[this_level].table_base = this_base;
+ desc_tables[this_level].table_idx_va = table_idx_va;
+ desc_tables[this_level].idx = table_idx;
+ base_va = table_idx_va;
- assert(action == ACTION_NONE);
+ this_level++;
+ level_size = XLAT_BLOCK_SIZE(this_level);
+ table_idx_va = xlat_tables_find_start_va(mm, base_va,
+ this_level);
+ table_idx = xlat_tables_va_to_index(base_va,
+ table_idx_va, this_level);
+ this_base = subtable;
+ max_entries = XLAT_TABLE_ENTRIES;
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (this_level > ctx->base_level) {
+ xlat_table_inc_regions_count(ctx, subtable);
+ }
+#endif
+ } else {
+ assert(action == ACTION_NONE);
+ table_idx++;
+ table_idx_va += level_size;
}
-
- table_idx++;
- table_idx_va += XLAT_BLOCK_SIZE(level);
-
- /* If reached the end of the region, exit */
- if (mm_end_va <= table_idx_va)
- break;
}
return table_idx_va - 1U;