summaryrefslogtreecommitdiff
path: root/lib/xlat_tables
diff options
context:
space:
mode:
authorDeepika Bhavnani <deepika.bhavnani@arm.com>2019-09-03 21:06:17 +0300
committerDeepika Bhavnani <deepika.bhavnani@arm.com>2019-11-12 11:14:18 -0600
commit9afe8cdc06c8e82b83d327978b937e51ade4925a (patch)
tree607e1be1424ff86e6fdae2015e407bddd78cdd4a /lib/xlat_tables
parent1d2b41614c5675b144ae1f4517c1f8bf249a12d2 (diff)
downloadarm-trusted-firmware-9afe8cdc06c8e82b83d327978b937e51ade4925a.tar.gz
Coding guideline suggest not to use unsigned long
`unsigned long` should be replaced to 1. `unsigned int` or `unsigned long long` - If fixed, based on the architecture AArch32 or AArch64 2. `u_register_t` - If it is supposed to be 32-bit wide in AArch32 and 64-bit wide in AArch64. Translation descriptors are always 32-bit wide, here `uint32_t` is used to describe the `exact size` of translation descriptors instead of `unsigned int` which guarantees minimum 32-bits Signed-off-by: Deepika Bhavnani <deepika.bhavnani@arm.com> Change-Id: I6a2af2e8b3c71170e2634044e0b887f07a41677e
Diffstat (limited to 'lib/xlat_tables')
-rw-r--r--lib/xlat_tables/aarch32/nonlpae_tables.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/lib/xlat_tables/aarch32/nonlpae_tables.c b/lib/xlat_tables/aarch32/nonlpae_tables.c
index bd6b152ef..b8c268665 100644
--- a/lib/xlat_tables/aarch32/nonlpae_tables.c
+++ b/lib/xlat_tables/aarch32/nonlpae_tables.c
@@ -284,10 +284,10 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
}
/* map all memory as shared/global/domain0/no-usr access */
-static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
- unsigned int level)
+static uint32_t mmap_desc(unsigned attr, unsigned int addr_pa,
+ unsigned int level)
{
- unsigned long desc;
+ uint32_t desc;
switch (level) {
case 1:
@@ -380,14 +380,14 @@ static unsigned int mmap_region_attr(const mmap_region_t *mm, uintptr_t base_va,
}
static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
- unsigned long base_va,
- unsigned long *table,
+ unsigned int base_va,
+ uint32_t *table,
unsigned int level)
{
unsigned int level_size_shift = (level == 1) ?
ONE_MB_SHIFT : FOUR_KB_SHIFT;
unsigned int level_size = 1 << level_size_shift;
- unsigned long level_index_mask = (level == 1) ?
+ unsigned int level_index_mask = (level == 1) ?
(NUM_1MB_IN_4GB - 1) << ONE_MB_SHIFT :
(NUM_4K_IN_1MB - 1) << FOUR_KB_SHIFT;
@@ -396,7 +396,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
VERBOSE("init xlat table at %p (level%1d)\n", (void *)table, level);
do {
- unsigned long desc = MMU32B_UNSET_DESC;
+ uint32_t desc = MMU32B_UNSET_DESC;
if (mm->base_va + mm->size <= base_va) {
/* Area now after the region so skip it */
@@ -427,7 +427,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
}
if (desc == MMU32B_UNSET_DESC) {
- unsigned long xlat_table;
+ uintptr_t xlat_table;
/*
* Area not covered by a region so need finer table
@@ -443,7 +443,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
~(MMU32B_L1_TABLE_ALIGN - 1);
desc = *table;
} else {
- xlat_table = (unsigned long)mmu_l2_base +
+ xlat_table = (uintptr_t)mmu_l2_base +
next_xlat * MMU32B_L2_TABLE_SIZE;
next_xlat++;
assert(next_xlat <= MAX_XLAT_TABLES);
@@ -456,7 +456,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
}
/* Recurse to fill in new table */
mm = init_xlation_table_inner(mm, base_va,
- (unsigned long *)xlat_table,
+ (uint32_t *)xlat_table,
level + 1);
}
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
@@ -480,7 +480,7 @@ void init_xlat_tables(void)
memset(mmu_l1_base, 0, MMU32B_L1_TABLE_SIZE);
- init_xlation_table_inner(mmap, 0, (unsigned long *)mmu_l1_base, 1);
+ init_xlation_table_inner(mmap, 0, (uint32_t *)mmu_l1_base, 1);
VERBOSE("init xlat - max_va=%p, max_pa=%llx\n",
(void *)xlat_max_va, xlat_max_pa);