summaryrefslogtreecommitdiff
path: root/lib/xlat_tables_v2/xlat_tables_common.c
blob: a8d021caf9a79f3f140bee7137f2d9f6e7529487 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/*
 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
#include <common_def.h>
#include <debug.h>
#include <errno.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables_v2.h>
#ifdef AARCH32
# include "aarch32/xlat_tables_arch.h"
#else
# include "aarch64/xlat_tables_arch.h"
#endif
#include "xlat_tables_private.h"

/*
 * Private variables used by the TF
 */
static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1];

static uint64_t tf_xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
			__aligned(XLAT_TABLE_SIZE) __section("xlat_table");

static uint64_t tf_base_xlat_table[NUM_BASE_LEVEL_ENTRIES]
		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));

static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1];

#if PLAT_XLAT_TABLES_DYNAMIC
static int xlat_tables_mapped_regions[MAX_XLAT_TABLES];
#endif /* PLAT_XLAT_TABLES_DYNAMIC */

xlat_ctx_t tf_xlat_ctx = {

	.pa_max_address = PLAT_PHY_ADDR_SPACE_SIZE - 1,
	.va_max_address = PLAT_VIRT_ADDR_SPACE_SIZE - 1,

	.mmap = tf_mmap,
	.mmap_num = MAX_MMAP_REGIONS,

	.tables = tf_xlat_tables,
	.tables_num = MAX_XLAT_TABLES,
#if PLAT_XLAT_TABLES_DYNAMIC
	.tables_mapped_regions = xlat_tables_mapped_regions,
#endif /* PLAT_XLAT_TABLES_DYNAMIC */

	.base_table = tf_base_xlat_table,
	.base_table_entries = NUM_BASE_LEVEL_ENTRIES,

	.max_pa = 0,
	.max_va = 0,

	.next_table = 0,

	.base_level = XLAT_TABLE_LEVEL_BASE,

	.initialized = 0
};

void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
			size_t size, mmap_attr_t attr)
{
	mmap_region_t mm = {
		.base_va = base_va,
		.base_pa = base_pa,
		.size = size,
		.attr = attr,
	};
	mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)&mm);
}

void mmap_add(const mmap_region_t *mm)
{
	while (mm->size) {
		mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)mm);
		mm++;
	}
}

#if PLAT_XLAT_TABLES_DYNAMIC

int mmap_add_dynamic_region(unsigned long long base_pa,
			    uintptr_t base_va, size_t size, mmap_attr_t attr)
{
	mmap_region_t mm = {
		.base_va = base_va,
		.base_pa = base_pa,
		.size = size,
		.attr = attr,
	};
	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
}

int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
{
	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, base_va, size);
}

#endif /* PLAT_XLAT_TABLES_DYNAMIC */

void init_xlat_tables(void)
{
	assert(!is_mmu_enabled());
	assert(!tf_xlat_ctx.initialized);
	print_mmap(tf_xlat_ctx.mmap);
	init_xlation_table(&tf_xlat_ctx);
	xlat_tables_print(&tf_xlat_ctx);

	assert(tf_xlat_ctx.max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
	assert(tf_xlat_ctx.max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);

	init_xlat_tables_arch(tf_xlat_ctx.max_pa);
}

#ifdef AARCH32

void enable_mmu_secure(unsigned int flags)
{
	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
}

#else

void enable_mmu_el1(unsigned int flags)
{
	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
}

void enable_mmu_el3(unsigned int flags)
{
	enable_mmu_arch(flags, tf_xlat_ctx.base_table);
}

#endif /* AARCH32 */