[arch][arm] Add option to enable shareable caches without smp support

Set WITH_SHAREABLE_CACHE to make cached mmu mappings shareable even for
non-smp builds. Allows running a non-smp lk kernel as a guest-os on a
multi-cluster smp system.

Change-Id: I09f3bf206af4629bfa3d0c0b72d1bfa100fa8bde
diff --git a/arch/arm/arm/mmu.c b/arch/arm/arm/mmu.c
index b502527..2ab9a05 100644
--- a/arch/arm/arm/mmu.c
+++ b/arch/arm/arm/mmu.c
@@ -55,7 +55,7 @@
     switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
         case ARCH_MMU_FLAG_CACHED:
             arch_flags |= MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
             arch_flags |= MMU_MEMORY_L1_SECTION_SHAREABLE;
 #endif
             break;
@@ -103,11 +103,11 @@
     uint32_t arch_flags = 0;
     switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
         case ARCH_MMU_FLAG_CACHED:
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
             arch_flags |= MMU_MEMORY_L2_SHAREABLE;
 #endif
             arch_flags |= MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
             arch_flags |= MMU_MEMORY_L2_SHAREABLE;
 #endif
             break;
diff --git a/arch/arm/include/arch/arm/mmu.h b/arch/arm/include/arch/arm/mmu.h
index 7fdb778..31e15e5 100644
--- a/arch/arm/include/arch/arm/mmu.h
+++ b/arch/arm/include/arch/arm/mmu.h
@@ -163,7 +163,7 @@
  * inner/outer (IRGN/RGN): write-back + write-allocate
  * (select inner sharable on smp)
  */
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
 #define MMU_TTBRx_SHARABLE_FLAGS (MMU_MEMORY_TTBR_S | MMU_MEMORY_TTBR_NOS)
 #else
 #define MMU_TTBRx_SHARABLE_FLAGS (0)
@@ -174,7 +174,7 @@
      MMU_TTBRx_SHARABLE_FLAGS)
 
 /* Section mapping, TEX[2:0]=001, CB=11, S=1, AP[2:0]=001 */
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
 #define MMU_KERNEL_L1_PTE_FLAGS \
     (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
      MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \
@@ -215,7 +215,7 @@
 /* tlb routines */
 
 static inline void arm_after_invalidate_tlb_barrier(void) {
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
     arm_write_bpiallis(0);
 #else
     arm_write_bpiall(0);
@@ -225,7 +225,7 @@
 }
 
 static inline void arm_invalidate_tlb_global_no_barrier(void) {
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
     arm_write_tlbiallis(0);
 #else
     arm_write_tlbiall(0);
@@ -239,7 +239,7 @@
 }
 
 static inline void arm_invalidate_tlb_mva_no_barrier(vaddr_t va) {
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
     arm_write_tlbimvaais(va & 0xfffff000);
 #else
     arm_write_tlbimvaa(va & 0xfffff000);
@@ -254,7 +254,7 @@
 
 
 static inline void arm_invalidate_tlb_asid_no_barrier(uint8_t asid) {
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
     arm_write_tlbiasidis(asid);
 #else
     arm_write_tlbiasid(asid);
@@ -268,7 +268,7 @@
 }
 
 static inline void arm_invalidate_tlb_mva_asid_no_barrier(vaddr_t va, uint8_t asid) {
-#if WITH_SMP
+#if WITH_SMP | WITH_SHAREABLE_CACHE
     arm_write_tlbimvais((va & 0xfffff000) | asid);
 #else
     arm_write_tlbimva((va & 0xfffff000) | asid);