xlat lib v2: Remove hard-coded virtual address space size

Previous patches have made it possible to specify the physical and
virtual address spaces sizes for each translation context. However,
there are still some places in the code where the physical (resp.
virtual) address space size is assumed to be PLAT_PHY_ADDR_SPACE_SIZE
(resp. PLAT_VIRT_ADDR_SPACE_SIZE).

This patch removes them and reads the relevant address space size
from the translation context itself instead. This information is now
passed in argument to the enable_mmu_arch() function, which needs it
to configure the TCR_ELx.T0SZ field (in AArch64) or the TTBCR.T0SZ
field (in AArch32) appropriately.

Change-Id: I20b0e68b03a143e998695d42911d9954328a06aa
Signed-off-by: Sandrine Bailleux <sandrine.bailleux@arm.com>
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 78aae2b..9c4d68b 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -87,7 +87,8 @@
  ******************************************************************************/
 void enable_mmu_arch(unsigned int flags,
 		uint64_t *base_table,
-		unsigned long long max_pa)
+		unsigned long long max_pa,
+		uintptr_t max_va)
 {
 	u_register_t mair0, ttbcr, sctlr;
 	uint64_t ttbr0;
@@ -123,9 +124,18 @@
 
 	/*
 	 * Limit the input address ranges and memory region sizes translated
-	 * using TTBR0 to the given virtual address space size.
+	 * using TTBR0 to the given virtual address space size, if smaller than
+	 * 32 bits.
 	 */
-	ttbcr |= 32 - __builtin_ctzl((uintptr_t) PLAT_VIRT_ADDR_SPACE_SIZE);
+	if (max_va != UINT32_MAX) {
+		uintptr_t virtual_addr_space_size = max_va + 1;
+		assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+		/*
+		 * __builtin_ctzl(0) is undefined but here we are guaranteed
+		 * that virtual_addr_space_size is in the range [1, UINT32_MAX].
+		 */
+		ttbcr |= 32 - __builtin_ctzl(virtual_addr_space_size);
+	}
 
 	/*
 	 * Set the cacheability and shareability attributes for memory
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 49b0605..5f389f3 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -196,7 +196,8 @@
 
 void enable_mmu_arch(unsigned int flags,
 		uint64_t *base_table,
-		unsigned long long max_pa)
+		unsigned long long max_pa,
+		uintptr_t max_va)
 {
 	uint64_t mair, ttbr, tcr;
 
@@ -215,7 +216,14 @@
 	 * Limit the input address ranges and memory region sizes translated
 	 * using TTBR0 to the given virtual address space size.
 	 */
-	tcr = 64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE);
+	assert(max_va < UINTPTR_MAX);
+	uintptr_t virtual_addr_space_size = max_va + 1;
+	assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+	/*
+	 * __builtin_ctzl(0) is undefined but here we are guaranteed that
+	 * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
+	 */
+	tcr = 64 - __builtin_ctzl(virtual_addr_space_size);
 
 	/*
 	 * Set the cacheability and shareability attributes for memory
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index cdf1669..5efc834 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -1178,8 +1178,7 @@
 		mm++;
 	}
 
-	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <=
-	       xlat_arch_get_max_supported_pa());
+	assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa());
 	assert(ctx->max_va <= ctx->va_max_address);
 	assert(ctx->max_pa <= ctx->pa_max_address);
 
@@ -1205,7 +1204,7 @@
  * space size might be mapped.
  */
 #ifdef PLAT_XLAT_TABLES_DYNAMIC
-#define MAX_PHYS_ADDR	PLAT_PHY_ADDR_SPACE_SIZE
+#define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
 #else
 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
 #endif
@@ -1214,19 +1213,22 @@
 
 void enable_mmu_secure(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
 }
 
 #else
 
 void enable_mmu_el1(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
 }
 
 void enable_mmu_el3(unsigned int flags)
 {
-	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR);
+	enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+			tf_xlat_ctx.va_max_address);
 }
 
 #endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 3030388..d352583 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -79,9 +79,8 @@
 unsigned long long xlat_arch_get_max_supported_pa(void);
 
 /* Enable MMU and configure it to use the specified translation tables. */
-void enable_mmu_arch(unsigned int flags,
-		uint64_t *base_table,
-		unsigned long long max_pa);
+void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+		unsigned long long pa, uintptr_t max_va);
 
 /* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
 int is_mmu_enabled(void);