summaryrefslogtreecommitdiff
path: root/lib/xlat_tables_v2
diff options
context:
space:
mode:
authorAntonio Nino Diaz <antonio.ninodiaz@arm.com>2018-07-24 10:20:53 +0100
committerAntonio Nino Diaz <antonio.ninodiaz@arm.com>2018-07-30 09:30:15 +0100
commite7b9886c7cbfac488b766b24379249853f78a040 (patch)
tree8abc48338cc5bdcacfc19fbf0f0cf9ae0ced7948 /lib/xlat_tables_v2
parent60e062fb0e33f7d9825267cbfb397d7c7aca1169 (diff)
xlat: Fix MISRA defects
Fix defects of MISRA C-2012 rules 8.13, 10.1, 10.3, 10.4, 10.8, 11.6, 14.4, 15.7, 17.8, 20.10, 20.12, 21.1 and Directive 4.9. Change-Id: I7ff61e71733908596dbafe2e99d99b4fce9765bd Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
Diffstat (limited to 'lib/xlat_tables_v2')
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c19
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c74
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c6
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c325
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h14
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c159
6 files changed, 311 insertions, 286 deletions
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 5e3220c6..21bb22d5 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -14,7 +14,7 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
#error ARMv7 target does not support LPAE MMU descriptors
#endif
@@ -27,12 +27,12 @@ int xlat_arch_is_granule_size_supported(size_t size)
* The library uses the long descriptor translation table format, which
* supports 4 KiB pages only.
*/
- return (size == (4U * 1024U));
+ return (size == PAGE_SIZE_4KB) ? 1 : 0;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- return 4U * 1024U;
+ return PAGE_SIZE_4KB;
}
#if ENABLE_ASSERTIONS
@@ -90,7 +90,7 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
/*
* If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
@@ -100,7 +100,7 @@ int xlat_arch_current_el(void)
* in AArch64 except for the XN bits, but we set and unset them at the
* same time, so there's no difference in practice.
*/
- return 1;
+ return 1U;
}
/*******************************************************************************
@@ -143,20 +143,23 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* 32 bits.
*/
if (max_va != UINT32_MAX) {
- uintptr_t virtual_addr_space_size = max_va + 1;
+ uintptr_t virtual_addr_space_size = max_va + 1U;
+
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed
* that virtual_addr_space_size is in the range [1, UINT32_MAX].
*/
- ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 32 - __builtin_ctzll(virtual_addr_space_size);
+
+ ttbcr |= (uint32_t) t0sz;
}
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks using TTBR0.
*/
- if (flags & XLAT_TABLE_NC) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
TTBCR_RGN0_INNER_NC;
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 0f289e28..24948462 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -20,58 +20,58 @@ int xlat_arch_is_granule_size_supported(size_t size)
{
u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
- if (size == (4U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
+ if (size == PAGE_SIZE_4KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
- } else if (size == (16U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED) ? 1 : 0;
+ } else if (size == PAGE_SIZE_16KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
- } else if (size == (64U * 1024U)) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED) ? 1 : 0;
+ } else if (size == PAGE_SIZE_64KB) {
+ return (((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED) ? 1 : 0;
+ } else {
+ return 0;
}
-
- return 0;
}
size_t xlat_arch_get_max_supported_granule_size(void)
{
- if (xlat_arch_is_granule_size_supported(64U * 1024U)) {
- return 64U * 1024U;
- } else if (xlat_arch_is_granule_size_supported(16U * 1024U)) {
- return 16U * 1024U;
+ if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB) != 0) {
+ return PAGE_SIZE_64KB;
+ } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB) != 0) {
+ return PAGE_SIZE_16KB;
} else {
- assert(xlat_arch_is_granule_size_supported(4U * 1024U));
- return 4U * 1024U;
+ assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB) != 0);
+ return PAGE_SIZE_4KB;
}
}
unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
- assert((max_addr & ADDR_MASK_48_TO_63) == 0);
+ assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
/* 48 bits address */
- if (max_addr & ADDR_MASK_44_TO_47)
+ if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
return TCR_PS_BITS_256TB;
/* 44 bits address */
- if (max_addr & ADDR_MASK_42_TO_43)
+ if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
return TCR_PS_BITS_16TB;
/* 42 bits address */
- if (max_addr & ADDR_MASK_40_TO_41)
+ if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
return TCR_PS_BITS_4TB;
/* 40 bits address */
- if (max_addr & ADDR_MASK_36_TO_39)
+ if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
return TCR_PS_BITS_1TB;
/* 36 bits address */
- if (max_addr & ADDR_MASK_32_TO_35)
+ if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
return TCR_PS_BITS_64GB;
return TCR_PS_BITS_4GB;
@@ -102,12 +102,12 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
- return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 1U);
+ return ((read_sctlr_el1() & SCTLR_M_BIT) != 0U) ? 1 : 0;
} else {
assert(ctx->xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
- return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ assert(xlat_arch_current_el() >= 3U);
+ return ((read_sctlr_el3() & SCTLR_M_BIT) != 0U) ? 1 : 0;
}
}
@@ -137,11 +137,11 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
* exception level (see section D4.9.2 of the ARM ARM rev B.a).
*/
if (xlat_regime == EL1_EL0_REGIME) {
- assert(xlat_arch_current_el() >= 1);
+ assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL3_REGIME);
- assert(xlat_arch_current_el() >= 3);
+ assert(xlat_arch_current_el() >= 3U);
tlbivae3is(TLBI_ADDR(va));
}
}
@@ -169,11 +169,11 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-int xlat_arch_current_el(void)
+unsigned int xlat_arch_current_el(void)
{
- int el = GET_EL(read_CurrentEl());
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
- assert(el > 0);
+ assert(el > 0U);
return el;
}
@@ -194,22 +194,24 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
- assert(max_va < ((uint64_t) UINTPTR_MAX));
+ assert(max_va < ((uint64_t)UINTPTR_MAX));
- virtual_addr_space_size = max_va + 1;
+ virtual_addr_space_size = (uintptr_t)max_va + 1U;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
- tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
+ int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
+
+ tcr = (uint64_t) t0sz;
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
- if ((flags & XLAT_TABLE_NC) != 0) {
+ if ((flags & XLAT_TABLE_NC) != 0U) {
/* Inner & outer non-cacheable non-shareable. */
tcr |= TCR_SH_NON_SHAREABLE |
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index 76c429d7..d7b2ebf8 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -78,12 +78,12 @@ void init_xlat_tables(void)
{
assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
- int current_el = xlat_arch_current_el();
+ unsigned int current_el = xlat_arch_current_el();
- if (current_el == 1) {
+ if (current_el == 1U) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
} else {
- assert(current_el == 3);
+ assert(current_el == 3U);
tf_xlat_ctx.xlat_regime = EL3_REGIME;
}
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index e3306e6d..80cd0a21 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -29,9 +29,9 @@
* Returns the index of the array corresponding to the specified translation
* table.
*/
-static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
+static int xlat_table_get_index(const xlat_ctx_t *ctx, const uint64_t *table)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables[i] == table)
return i;
@@ -45,9 +45,9 @@ static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
}
/* Returns a pointer to an empty translation table. */
-static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
+static uint64_t *xlat_table_get_empty(const xlat_ctx_t *ctx)
{
- for (unsigned int i = 0; i < ctx->tables_num; i++)
+ for (int i = 0; i < ctx->tables_num; i++)
if (ctx->tables_mapped_regions[i] == 0)
return ctx->tables[i];
@@ -55,21 +55,28 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
}
/* Increments region count for a given table. */
-static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_inc_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]++;
}
/* Decrements region count for a given table. */
-static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
+static void xlat_table_dec_regions_count(const xlat_ctx_t *ctx,
+ const uint64_t *table)
{
- ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
+ int idx = xlat_table_get_index(ctx, table);
+
+ ctx->tables_mapped_regions[idx]--;
}
/* Returns 0 if the specified table isn't empty, otherwise 1. */
-static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
+static int xlat_table_is_empty(const xlat_ctx_t *ctx, const uint64_t *table)
{
- return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
+ return (ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)] == 0)
+ ? 1 : 0;
}
#else /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -88,13 +95,13 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level)
+ unsigned long long addr_pa, unsigned int level)
{
uint64_t desc;
- int mem_type;
+ uint32_t mem_type;
/* Make sure that the granularity is fine enough to map this address. */
- assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0U);
desc = addr_pa;
/*
@@ -111,8 +118,8 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
- desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
- desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
+ desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U;
+ desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
/*
* Do not allow unprivileged access when the mapping is for a privileged
@@ -120,7 +127,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
*/
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- if (attr & MT_USER) {
+ if ((attr & MT_USER) != 0U) {
/* EL0 mapping requested, so we give User access */
desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
} else {
@@ -172,7 +179,7 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
* translation regime and the policy applied in
* xlat_arch_regime_get_xn_desc().
*/
- if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
+ if (((attr & MT_RW) != 0U) || ((attr & MT_EXECUTE_NEVER) != 0U)) {
desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
@@ -223,10 +230,10 @@ typedef enum {
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
const uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
+ const unsigned int table_entries,
const unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
uint64_t *subtable;
uint64_t desc;
@@ -234,16 +241,16 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uintptr_t table_idx_va;
uintptr_t table_idx_end_va; /* End VA of this entry */
- uintptr_t region_end_va = mm->base_va + mm->size - 1;
+ uintptr_t region_end_va = mm->base_va + mm->size - 1U;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
@@ -254,19 +261,18 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
while (table_idx < table_entries) {
- table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
- action_t action = ACTION_NONE;
+ action_t action;
if ((mm->base_va <= table_idx_va) &&
(region_end_va >= table_idx_end_va)) {
-
/* Region covers all block */
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors allowed,
* erase it.
@@ -293,7 +299,6 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
} else if ((mm->base_va <= table_idx_end_va) ||
(region_end_va >= table_idx_va)) {
-
/*
* Region partially covers block.
*
@@ -302,12 +307,13 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* There must be a table descriptor here, if not there
* was a problem when mapping the region.
*/
-
- assert(level < 3);
-
+ assert(level < 3U);
assert(desc_type == TABLE_DESC);
action = ACTION_RECURSE_INTO_TABLE;
+ } else {
+ /* The region doesn't cover the block at all */
+ action = ACTION_NONE;
}
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -322,12 +328,12 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
/* Recurse to write into subtable */
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
+ level + 1U);
/*
* If the subtable is now empty, remove its reference.
*/
- if (xlat_table_is_empty(ctx, subtable)) {
+ if (xlat_table_is_empty(ctx, subtable) != 0) {
table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va,
ctx->xlat_regime);
@@ -356,12 +362,12 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* specified region.
*/
static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
- const int desc_type, const unsigned long long dest_pa,
- const uintptr_t table_entry_base_va, const unsigned int level)
+ unsigned int desc_type, unsigned long long dest_pa,
+ uintptr_t table_entry_base_va, unsigned int level)
{
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_entry_end_va =
- table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
+ table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1U;
/*
* The descriptor types allowed depend on the current table level.
@@ -378,7 +384,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* translation with this granularity in principle.
*/
- if (level == 3) {
+ if (level == 3U) {
/*
* Last level, only page descriptors are allowed.
*/
@@ -416,8 +422,8 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* Also, check if the current level allows block
* descriptors. If not, create a table instead.
*/
- if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
- (level < MIN_LVL_BLOCK_DESC) ||
+ if (((dest_pa & XLAT_BLOCK_MASK(level)) != 0U)
+ || (level < MIN_LVL_BLOCK_DESC) ||
(mm->granularity < XLAT_BLOCK_SIZE(level)))
return ACTION_CREATE_NEW_TABLE;
else
@@ -449,7 +455,7 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* mmap region failed to detect that PA and VA must at least be
* aligned to PAGE_SIZE.
*/
- assert(level < 3);
+ assert(level < 3U);
if (desc_type == INVALID_DESC) {
/*
@@ -472,13 +478,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
*/
return ACTION_RECURSE_INTO_TABLE;
}
- }
+ } else {
- /*
- * This table entry is outside of the region specified in the arguments,
- * don't write anything to it.
- */
- return ACTION_NONE;
+ /*
+ * This table entry is outside of the region specified in the
+ * arguments, don't write anything to it.
+ */
+ return ACTION_NONE;
+ }
}
/*
@@ -488,14 +495,14 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- const uintptr_t table_base_va,
+ uintptr_t table_base_va,
uint64_t *const table_base,
- const int table_entries,
- const unsigned int level)
+ unsigned int table_entries,
+ unsigned int level)
{
- assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
+ assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
unsigned long long table_idx_pa;
@@ -503,20 +510,20 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uint64_t *subtable;
uint64_t desc;
- int table_idx;
+ unsigned int table_idx;
if (mm->base_va > table_base_va) {
/* Find the first index of the table affected by the region. */
table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
- table_idx = (table_idx_va - table_base_va) >>
- XLAT_ADDR_SHIFT(level);
+ table_idx = (unsigned int)((table_idx_va - table_base_va) >>
+ XLAT_ADDR_SHIFT(level));
assert(table_idx < table_entries);
} else {
/* Start from the beginning of the table. */
table_idx_va = table_base_va;
- table_idx = 0;
+ table_idx = 0U;
}
#if PLAT_XLAT_TABLES_DYNAMIC
@@ -531,7 +538,8 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
action_t action = xlat_tables_map_region_action(mm,
- desc & DESC_MASK, table_idx_pa, table_idx_va, level);
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
@@ -540,6 +548,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
level);
} else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
@@ -551,20 +560,23 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
- level + 1);
- if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
+ level + 1U);
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else {
@@ -581,7 +593,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
break;
}
- return table_idx_va - 1;
+ return table_idx_va - 1U;
}
/*
@@ -593,23 +605,23 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
-static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
+static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
{
unsigned long long base_pa = mm->base_pa;
uintptr_t base_va = mm->base_va;
size_t size = mm->size;
size_t granularity = mm->granularity;
- unsigned long long end_pa = base_pa + size - 1;
- uintptr_t end_va = base_va + size - 1;
+ unsigned long long end_pa = base_pa + size - 1U;
+ uintptr_t end_va = base_va + size - 1U;
if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
!IS_PAGE_ALIGNED(size))
return -EINVAL;
- if ((granularity != XLAT_BLOCK_SIZE(1)) &&
- (granularity != XLAT_BLOCK_SIZE(2)) &&
- (granularity != XLAT_BLOCK_SIZE(3))) {
+ if ((granularity != XLAT_BLOCK_SIZE(1U)) &&
+ (granularity != XLAT_BLOCK_SIZE(2U)) &&
+ (granularity != XLAT_BLOCK_SIZE(3U))) {
return -EINVAL;
}
@@ -624,26 +636,26 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
return -ERANGE;
/* Check that there is space in the ctx->mmap array */
- if (ctx->mmap[ctx->mmap_num - 1].size != 0)
+ if (ctx->mmap[ctx->mmap_num - 1].size != 0U)
return -ENOMEM;
/* Check for PAs and VAs overlaps with all other regions */
- for (mmap_region_t *mm_cursor = ctx->mmap;
- mm_cursor->size; ++mm_cursor) {
+ for (const mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size != 0U; ++mm_cursor) {
uintptr_t mm_cursor_end_va = mm_cursor->base_va
- + mm_cursor->size - 1;
+ + mm_cursor->size - 1U;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int fully_overlapped_va =
- ((base_va >= mm_cursor->base_va) &&
+ (((base_va >= mm_cursor->base_va) &&
(end_va <= mm_cursor_end_va)) ||
-
- ((mm_cursor->base_va >= base_va) &&
- (mm_cursor_end_va <= end_va));
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va)))
+ ? 1 : 0;
/*
* Full VA overlaps are only allowed if both regions are
@@ -651,11 +663,11 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
* offset. Also, make sure that it's not the exact same area.
* This can only be done with static regions.
*/
- if (fully_overlapped_va) {
+ if (fully_overlapped_va != 0) {
#if PLAT_XLAT_TABLES_DYNAMIC
- if ((mm->attr & MT_DYNAMIC) ||
- (mm_cursor->attr & MT_DYNAMIC))
+ if (((mm->attr & MT_DYNAMIC) != 0U) ||
+ ((mm_cursor->attr & MT_DYNAMIC) != 0U))
return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
if ((mm_cursor->base_va - mm_cursor->base_pa) !=
@@ -674,16 +686,14 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
*/
unsigned long long mm_cursor_end_pa =
- mm_cursor->base_pa + mm_cursor->size - 1;
+ mm_cursor->base_pa + mm_cursor->size - 1U;
- int separated_pa =
- (end_pa < mm_cursor->base_pa) ||
- (base_pa > mm_cursor_end_pa);
- int separated_va =
- (end_va < mm_cursor->base_va) ||
- (base_va > mm_cursor_end_va);
+ int separated_pa = ((end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa)) ? 1 : 0;
+ int separated_va = ((end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va)) ? 1 : 0;
- if (!(separated_va && separated_pa))
+ if ((separated_va == 0) || (separated_pa == 0))
return -EPERM;
}
}
@@ -695,17 +705,17 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
- mmap_region_t *mm_last;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Ignore empty regions */
- if (!mm->size)
+ if (mm->size == 0U)
return;
/* Static regions must be added before initializing the xlat tables. */
- assert(!ctx->initialized);
+ assert(ctx->initialized == 0);
ret = mmap_add_region_check(ctx, mm);
if (ret != 0) {
@@ -738,13 +748,15 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* Overlapping is only allowed for static regions.
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
- && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) &&
- (mm_cursor->size != 0U) && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/*
* Find the last entry marker in the mmap
@@ -763,7 +775,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
/* Make room for new region by moving other regions up by one place */
mm_destination = mm_cursor + 1;
- memmove(mm_destination, mm_cursor,
+ (void)memmove(mm_destination, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -783,9 +795,11 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
- while (mm->size) {
- mmap_add_region_ctx(ctx, mm);
- mm++;
+ const mmap_region_t *mm_cursor = mm;
+
+ while (mm_cursor->size != 0U) {
+ mmap_add_region_ctx(ctx, mm_cursor);
+ mm_cursor++;
}
}
@@ -794,13 +808,13 @@ void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap;
- mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
- unsigned long long end_pa = mm->base_pa + mm->size - 1;
- uintptr_t end_va = mm->base_va + mm->size - 1;
+ const mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ unsigned long long end_pa = mm->base_pa + mm->size - 1U;
+ uintptr_t end_va = mm->base_va + mm->size - 1U;
int ret;
/* Nothing to do */
- if (!mm->size)
+ if (mm->size == 0U)
return 0;
/* Now this region is a dynamic one */
@@ -815,16 +829,18 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* static regions in mmap_add_region_ctx().
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1)
- < end_va && mm_cursor->size)
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) < end_va)
+ && (mm_cursor->size != 0U)) {
++mm_cursor;
+ }
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
- && (mm_cursor->size < mm->size))
+ while (((mm_cursor->base_va + mm_cursor->size - 1U) == end_va) &&
+ (mm_cursor->size != 0U) && (mm_cursor->size < mm->size)) {
++mm_cursor;
+ }
/* Make room for new region by moving other regions up by one place */
- memmove(mm_cursor + 1, mm_cursor,
+ (void)memmove(mm_cursor + 1U, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -832,7 +848,7 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
- assert(mm_last->size == 0);
+ assert(mm_last->size == 0U);
*mm_cursor = *mm;
@@ -840,14 +856,14 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
- if (ctx->initialized) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
- 0, ctx->base_table, ctx->base_table_entries,
+ if (ctx->initialized != 0) {
+ end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0U, ctx->base_table, ctx->base_table_entries,
ctx->base_level);
/* Failed to map, remove mmap entry, unmap and return error. */
- if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
- memmove(mm_cursor, mm_cursor + 1,
+ if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
+ (void)memmove(mm_cursor, mm_cursor + 1U,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
@@ -862,13 +878,14 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* entries, undo every change done up to this point.
*/
mmap_region_t unmap_mm = {
- .base_pa = 0,
+ .base_pa = 0U,
.base_va = mm->base_va,
.size = end_va - mm->base_va,
- .attr = 0
+ .attr = 0U
};
- xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
return -ENOMEM;
}
@@ -903,61 +920,61 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
size_t size)
{
mmap_region_t *mm = ctx->mmap;
- mmap_region_t *mm_last = mm + ctx->mmap_num;
+ const mmap_region_t *mm_last = mm + ctx->mmap_num;
int update_max_va_needed = 0;
int update_max_pa_needed = 0;
/* Check sanity of mmap array. */
- assert(mm[ctx->mmap_num].size == 0);
+ assert(mm[ctx->mmap_num].size == 0U);
- while (mm->size) {
+ while (mm->size != 0U) {
if ((mm->base_va == base_va) && (mm->size == size))
break;
++mm;
}
/* Check that the region was found */
- if (mm->size == 0)
+ if (mm->size == 0U)
return -EINVAL;
/* If the region is static it can't be removed */
- if (!(mm->attr & MT_DYNAMIC))
+ if ((mm->attr & MT_DYNAMIC) == 0U)
return -EPERM;
/* Check if this region is using the top VAs or PAs. */
- if ((mm->base_va + mm->size - 1) == ctx->max_va)
+ if ((mm->base_va + mm->size - 1U) == ctx->max_va)
update_max_va_needed = 1;
- if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
+ if ((mm->base_pa + mm->size - 1U) == ctx->max_pa)
update_max_pa_needed = 1;
/* Update the translation tables if needed */
- if (ctx->initialized) {
- xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
+ if (ctx->initialized != 0) {
+ xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
ctx->base_table_entries,
ctx->base_level);
xlat_arch_tlbi_va_sync();
}
/* Remove this region by moving the rest down by one place. */
- memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
+ (void)memmove(mm, mm + 1U, (uintptr_t)mm_last - (uintptr_t)mm);
/* Check if we need to update the max VAs and PAs */
- if (update_max_va_needed) {
- ctx->max_va = 0;
+ if (update_max_va_needed == 1) {
+ ctx->max_va = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_va + mm->size - 1) > ctx->max_va)
- ctx->max_va = mm->base_va + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_va + mm->size - 1U) > ctx->max_va)
+ ctx->max_va = mm->base_va + mm->size - 1U;
++mm;
}
}
- if (update_max_pa_needed) {
- ctx->max_pa = 0;
+ if (update_max_pa_needed == 1) {
+ ctx->max_pa = 0U;
mm = ctx->mmap;
- while (mm->size) {
- if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
- ctx->max_pa = mm->base_pa + mm->size - 1;
+ while (mm->size != 0U) {
+ if ((mm->base_pa + mm->size - 1U) > ctx->max_pa)
+ ctx->max_pa = mm->base_pa + mm->size - 1U;
++mm;
}
}
@@ -970,9 +987,10 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
assert(ctx != NULL);
- assert(!ctx->initialized);
- assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
- assert(!is_mmu_enabled_ctx(ctx));
+ assert(ctx->initialized == 0);
+ assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL1_EL0_REGIME));
+ assert(is_mmu_enabled_ctx(ctx) == 0);
mmap_region_t *mm = ctx->mmap;
@@ -980,25 +998,26 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
/* All tables must be zeroed before mapping any region. */
- for (unsigned int i = 0; i < ctx->base_table_entries; i++)
+ for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
ctx->base_table[i] = INVALID_DESC;
- for (unsigned int j = 0; j < ctx->tables_num; j++) {
+ for (int j = 0; j < ctx->tables_num; j++) {
#if PLAT_XLAT_TABLES_DYNAMIC
ctx->tables_mapped_regions[j] = 0;
#endif
- for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
+ for (unsigned int i = 0U; i < XLAT_TABLE_ENTRIES; i++)
ctx->tables[j][i] = INVALID_DESC;
}
- while (mm->size) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ while (mm->size != 0U) {
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
+ ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
- if (end_va != mm->base_va + mm->size - 1) {
+ if (end_va != (mm->base_va + mm->size - 1U)) {
ERROR("Not enough memory to map region:\n"
- " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
- (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
+ " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
+ mm->base_va, mm->base_pa, mm->size, mm->attr);
panic();
}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 93640ddb..47c5ae8d 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef __XLAT_TABLES_PRIVATE_H__
-#define __XLAT_TABLES_PRIVATE_H__
+#ifndef XLAT_TABLES_PRIVATE_H
+#define XLAT_TABLES_PRIVATE_H
#include <platform_def.h>
#include <xlat_tables_defs.h>
@@ -35,6 +35,8 @@
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Return the execute-never mask that will prevent instruction fetch at the
* given translation regime.
@@ -61,7 +63,7 @@ void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
void xlat_arch_tlbi_va_sync(void);
/* Print VA, PA, size and attributes of all regions in the mmap array. */
-void xlat_mmap_print(mmap_region_t *const mmap);
+void xlat_mmap_print(const mmap_region_t *mmap);
/*
* Print the current state of the translation tables by reading them from
@@ -73,14 +75,14 @@ void xlat_tables_print(xlat_ctx_t *ctx);
* Returns a block/page table descriptor for the given level and attributes.
*/
uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr,
- unsigned long long addr_pa, int level);
+ unsigned long long addr_pa, unsigned int level);
/*
* Architecture-specific initialization code.
*/
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
-int xlat_arch_current_el(void);
+unsigned int xlat_arch_current_el(void);
/*
* Return the maximum physical address supported by the hardware.
@@ -94,4 +96,4 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
-#endif /* __XLAT_TABLES_PRIVATE_H__ */
+#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 90a0a862..0026e919 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -18,7 +18,7 @@
#if LOG_LEVEL < LOG_LEVEL_VERBOSE
-void xlat_mmap_print(__unused mmap_region_t *const mmap)
+void xlat_mmap_print(__unused const mmap_region_t *mmap)
{
/* Empty */
}
@@ -30,7 +30,7 @@ void xlat_tables_print(__unused xlat_ctx_t *ctx)
#else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
-void xlat_mmap_print(mmap_region_t *const mmap)
+void xlat_mmap_print(const mmap_region_t *mmap)
{
tf_printf("mmap:\n");
const mmap_region_t *mm = mmap;
@@ -47,7 +47,7 @@ void xlat_mmap_print(mmap_region_t *const mmap)
/* Print the attributes of the specified block descriptor. */
static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
- int mem_type_index = ATTR_INDEX_GET(desc);
+ uint64_t mem_type_index = ATTR_INDEX_GET(desc);
int xlat_regime = ctx->xlat_regime;
if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
@@ -61,8 +61,8 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
if (xlat_regime == EL3_REGIME) {
/* For EL3 only check the AP[2] and XN bits. */
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
- tf_printf((desc & UPPER_ATTRS(XN)) ? "-XN" : "-EXEC");
+ tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
+ tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else {
assert(xlat_regime == EL1_EL0_REGIME);
/*
@@ -80,18 +80,18 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
#endif
- tf_printf((desc & LOWER_ATTRS(AP_RO)) ? "-RO" : "-RW");
+ tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
/* Only check one of PXN and UXN, the other one is the same. */
- tf_printf((desc & UPPER_ATTRS(PXN)) ? "-XN" : "-EXEC");
+ tf_printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
/*
* Privileged regions can only be accessed from EL1, user
* regions can be accessed from EL1 and EL0.
*/
- tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ tf_printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
? "-USER" : "-PRIV");
}
- tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
+ tf_printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
}
static const char * const level_spacers[] = {
@@ -108,17 +108,15 @@ static const char *invalid_descriptors_ommited =
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(xlat_ctx_t *ctx,
- const uintptr_t table_base_va,
- uint64_t *const table_base, const int table_entries,
- const unsigned int level)
+static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
+ const uint64_t *table_base, unsigned int table_entries,
+ unsigned int level)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
uint64_t desc;
uintptr_t table_idx_va = table_base_va;
- int table_idx = 0;
-
+ unsigned int table_idx = 0U;
size_t level_size = XLAT_BLOCK_SIZE(level);
/*
@@ -136,9 +134,9 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
if ((desc & DESC_MASK) == INVALID_DESC) {
if (invalid_row_count == 0) {
- tf_printf("%sVA:%p size:0x%zx\n",
+ tf_printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
- (void *)table_idx_va, level_size);
+ table_idx_va, level_size);
}
invalid_row_count++;
@@ -164,20 +162,20 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
* but instead points to the next translation
* table in the translation table walk.
*/
- tf_printf("%sVA:%p size:0x%zx\n",
+ tf_printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
- (void *)table_idx_va, level_size);
+ table_idx_va, level_size);
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level + 1);
+ XLAT_TABLE_ENTRIES, level + 1U);
} else {
- tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
+ tf_printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
level_spacers[level],
- (void *)table_idx_va,
- (unsigned long long)(desc & TABLE_ADDR_MASK),
+ table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
level_size);
xlat_desc_print(ctx, desc);
tf_printf("\n");
@@ -197,6 +195,8 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx,
void xlat_tables_print(xlat_ctx_t *ctx)
{
const char *xlat_regime_str;
+ int used_page_tables;
+
if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0";
} else {
@@ -206,29 +206,28 @@ void xlat_tables_print(xlat_ctx_t *ctx)
VERBOSE("Translation tables state:\n");
VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
- VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
+ VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
- VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va);
+ VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
- VERBOSE(" Initial lookup level: %i\n", ctx->base_level);
- VERBOSE(" Entries @initial lookup level: %i\n",
+ VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
+ VERBOSE(" Entries @initial lookup level: %u\n",
ctx->base_table_entries);
- int used_page_tables;
#if PLAT_XLAT_TABLES_DYNAMIC
used_page_tables = 0;
- for (unsigned int i = 0; i < ctx->tables_num; ++i) {
+ for (int i = 0; i < ctx->tables_num; ++i) {
if (ctx->tables_mapped_regions[i] != 0)
++used_page_tables;
}
#else
used_page_tables = ctx->next_table;
#endif
- VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n",
+ VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
used_page_tables, ctx->tables_num,
ctx->tables_num - used_page_tables);
- xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ xlat_tables_print_internal(ctx, 0U, ctx->base_table,
ctx->base_table_entries, ctx->base_level);
}
@@ -251,13 +250,13 @@ void xlat_tables_print(xlat_ctx_t *ctx)
*/
static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
void *xlat_table_base,
- int xlat_table_base_entries,
+ unsigned int xlat_table_base_entries,
unsigned long long virt_addr_space_size,
- int *out_level)
+ unsigned int *out_level)
{
unsigned int start_level;
uint64_t *table;
- int entries;
+ unsigned int entries;
start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
@@ -267,9 +266,7 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
for (unsigned int level = start_level;
level <= XLAT_TABLE_LEVEL_MAX;
++level) {
- int idx;
- uint64_t desc;
- uint64_t desc_type;
+ uint64_t idx, desc, desc_type;
idx = XLAT_TABLE_IDX(virtual_addr, level);
if (idx >= entries) {
@@ -318,22 +315,23 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
uint32_t *attributes, uint64_t **table_entry,
- unsigned long long *addr_pa, int *table_level)
+ unsigned long long *addr_pa, unsigned int *table_level)
{
uint64_t *entry;
uint64_t desc;
- int level;
+ unsigned int level;
unsigned long long virt_addr_space_size;
/*
* Sanity-check arguments.
*/
assert(ctx != NULL);
- assert(ctx->initialized);
- assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+ assert(ctx->initialized != 0);
+ assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
- virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
+ assert(virt_addr_space_size > 0U);
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -341,7 +339,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -366,9 +364,9 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
assert(attributes != NULL);
- *attributes = 0;
+ *attributes = 0U;
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
*attributes |= MT_MEMORY;
@@ -379,20 +377,21 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
*attributes |= MT_DEVICE;
}
- int ap2_bit = (desc >> AP2_SHIFT) & 1;
+ uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
if (ap2_bit == AP2_RW)
*attributes |= MT_RW;
if (ctx->xlat_regime == EL1_EL0_REGIME) {
- int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
+
if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
*attributes |= MT_USER;
}
- int ns_bit = (desc >> NS_SHIFT) & 1;
+ uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
- if (ns_bit == 1)
+ if (ns_bit == 1U)
*attributes |= MT_NS;
uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
@@ -400,7 +399,7 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
if ((desc & xn_mask) == xn_mask) {
*attributes |= MT_EXECUTE_NEVER;
} else {
- assert((desc & xn_mask) == 0);
+ assert((desc & xn_mask) == 0U);
}
return 0;
@@ -415,7 +414,7 @@ int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
}
-int change_mem_attributes(xlat_ctx_t *ctx,
+int change_mem_attributes(const xlat_ctx_t *ctx,
uintptr_t base_va,
size_t size,
uint32_t attr)
@@ -423,49 +422,49 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Note: This implementation isn't optimized. */
assert(ctx != NULL);
- assert(ctx->initialized);
+ assert(ctx->initialized != 0);
unsigned long long virt_addr_space_size =
- (unsigned long long)ctx->va_max_address + 1;
- assert(virt_addr_space_size > 0);
+ (unsigned long long)ctx->va_max_address + 1U;
+ assert(virt_addr_space_size > 0U);
if (!IS_PAGE_ALIGNED(base_va)) {
- WARN("%s: Address %p is not aligned on a page boundary.\n",
- __func__, (void *)base_va);
+ WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
+ __func__, base_va);
return -EINVAL;
}
- if (size == 0) {
+ if (size == 0U) {
WARN("%s: Size is 0.\n", __func__);
return -EINVAL;
}
- if ((size % PAGE_SIZE) != 0) {
+ if ((size % PAGE_SIZE) != 0U) {
WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
__func__, size);
return -EINVAL;
}
- if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
WARN("%s: Mapping memory as read-write and executable not allowed.\n",
__func__);
return -EINVAL;
}
- int pages_count = size / PAGE_SIZE;
+ size_t pages_count = size / PAGE_SIZE;
- VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
- pages_count, (void *)base_va);
+ VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
+ pages_count, base_va);
uintptr_t base_va_original = base_va;
/*
* Sanity checks.
*/
- for (int i = 0; i < pages_count; ++i) {
- uint64_t *entry;
- uint64_t desc;
- int level;
+ for (size_t i = 0U; i < pages_count; ++i) {
+ const uint64_t *entry;
+ uint64_t desc, attr_index;
+ unsigned int level;
entry = find_xlat_table_entry(base_va,
ctx->base_table,
@@ -473,7 +472,7 @@ int change_mem_attributes(xlat_ctx_t *ctx,
virt_addr_space_size,
&level);
if (entry == NULL) {
- WARN("Address %p is not mapped.\n", (void *)base_va);
+ WARN("Address 0x%lx is not mapped.\n", base_va);
return -EINVAL;
}
@@ -485,8 +484,8 @@ int change_mem_attributes(xlat_ctx_t *ctx,
*/
if (((desc & DESC_MASK) != PAGE_DESC) ||
(level != XLAT_TABLE_LEVEL_MAX)) {
- WARN("Address %p is not mapped at the right granularity.\n",
- (void *)base_va);
+ WARN("Address 0x%lx is not mapped at the right granularity.\n",
+ base_va);
WARN("Granularity is 0x%llx, should be 0x%x.\n",
(unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
return -EINVAL;
@@ -495,11 +494,11 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/*
* If the region type is device, it shouldn't be executable.
*/
- int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
if (attr_index == ATTR_DEVICE_INDEX) {
- if ((attr & MT_EXECUTE_NEVER) == 0) {
- WARN("Setting device memory as executable at address %p.",
- (void *)base_va);
+ if ((attr & MT_EXECUTE_NEVER) == 0U) {
+ WARN("Setting device memory as executable at address 0x%lx.",
+ base_va);
return -EINVAL;
}
}
@@ -510,14 +509,14 @@ int change_mem_attributes(xlat_ctx_t *ctx,
/* Restore original value. */
base_va = base_va_original;
- for (int i = 0; i < pages_count; ++i) {
+ for (unsigned int i = 0U; i < pages_count; ++i) {
- uint32_t old_attr, new_attr;
- uint64_t *entry;
- int level;
- unsigned long long addr_pa;
+ uint32_t old_attr = 0U, new_attr;
+ uint64_t *entry = NULL;
+ unsigned int level = 0U;
+ unsigned long long addr_pa = 0ULL;
- get_mem_attributes_internal(ctx, base_va, &old_attr,
+ (void) get_mem_attributes_internal(ctx, base_va, &old_attr,
&entry, &addr_pa, &level);
/*