Revert recent JIT code cache changes

Flakiness observed on the bots.

Revert "Jit Code Cache instruction pipeline flushing"
This reverts commit 56fe32eecd4f25237e66811fd766355a07908d22.

Revert "ARM64: More JIT Code Cache maintenace"
This reverts commit 17272ab679c9b5f5dac8754ac070b78b15271c27.

Revert "ARM64: JIT Code Cache maintenance"
This reverts commit 3ecac070ad55d433bbcbe11e21f4b44ab178effe.

Revert "Change flush order in JIT code cache"
This reverts commit 43ce5f82dae4dc5eebcf40e54b81ccd96eb5fba3.

Revert "Separate rw from rx views of jit code cache"
This reverts commit d1dbb74e5946fe6c6098a541012932e1e9dd3115.

Test: art/test.py --target --64
Bug: 64527643
Bug: 62356545
Change-Id: Ifc0ae042fd7950c1644ef439181775c38e41f0a3
(cherry picked from commit dbd05fe1a6ed2c3e23c9f6b372dd439ad59e777b)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f4b67b2..b76a0df 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1210,14 +1210,14 @@
   uint8_t* stack_map_data = nullptr;
   uint8_t* method_info_data = nullptr;
   uint8_t* roots_data = nullptr;
-  code_cache->ReserveData(self,
-                          stack_map_size,
-                          method_info_size,
-                          number_of_roots,
-                          method,
-                          &stack_map_data,
-                          &method_info_data,
-                          &roots_data);
+  uint32_t data_size = code_cache->ReserveData(self,
+                                               stack_map_size,
+                                               method_info_size,
+                                               number_of_roots,
+                                               method,
+                                               &stack_map_data,
+                                               &method_info_data,
+                                               &roots_data);
   if (stack_map_data == nullptr || roots_data == nullptr) {
     return false;
   }
@@ -1238,6 +1238,7 @@
       codegen->GetFpuSpillMask(),
       code_allocator.GetMemory().data(),
       code_allocator.GetSize(),
+      data_size,
       osr,
       roots,
       codegen->GetGraph()->HasShouldDeoptimizeFlag(),
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 1154620..a186f4c 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -191,14 +191,10 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
-  // Remap the tail. Pass MAP_PRIVATE since we don't want to share the same ashmem as the zygote
-  // space.
+  // Remap the tail.
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(),
-                                                          alloc_space_name,
-                                                          PROT_READ | PROT_WRITE,
-                                                          MAP_PRIVATE,
-                                                          &error_msg));
+  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
+                                                          PROT_READ | PROT_WRITE, &error_msg));
   CHECK(mem_map.get() != nullptr) << error_msg;
   void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
                                     low_memory_mode);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a030a51..7659a83 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -47,13 +47,9 @@
 static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
 static constexpr int kProtData = PROT_READ | PROT_WRITE;
 static constexpr int kProtCode = PROT_READ | PROT_EXEC;
-static constexpr int kProtReadOnly = PROT_READ;
-static constexpr int kProtNone = PROT_NONE;
 
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
-static constexpr size_t kMinMapSpacingPages = 1;
-static constexpr size_t kMaxMapSpacingPages = 128;
 
 #define CHECKED_MPROTECT(memory, size, prot)                \
   do {                                                      \
@@ -64,39 +60,12 @@
     }                                                       \
   } while (false)                                           \
 
-static MemMap* SplitMemMap(MemMap* existing_map,
-                           const char* name,
-                           size_t split_offset,
-                           int split_prot,
-                           std::string* error_msg,
-                           bool use_ashmem,
-                           unique_fd* shmem_fd = nullptr) {
-  std::string error_str;
-  uint8_t* divider = existing_map->Begin() + split_offset;
-  MemMap* new_map = existing_map->RemapAtEnd(divider,
-                                             name,
-                                             split_prot,
-                                             MAP_SHARED,
-                                             &error_str,
-                                             use_ashmem,
-                                             shmem_fd);
-  if (new_map == nullptr) {
-    std::ostringstream oss;
-    oss << "Failed to create spacing for " << name << ": "
-        << error_str << " offset=" << split_offset;
-    *error_msg = oss.str();
-    return nullptr;
-  }
-  return new_map;
-}
-
 JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
                                    size_t max_capacity,
                                    bool generate_debug_info,
                                    std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  CHECK_GT(max_capacity, initial_capacity);
-  CHECK_GE(max_capacity - kMaxMapSpacingPages * kPageSize, initial_capacity);
+  CHECK_GE(max_capacity, initial_capacity);
 
   // Generating debug information is for using the Linux perf tool on
   // host which does not work with ashmem.
@@ -106,10 +75,6 @@
   // With 'perf', we want a 1-1 mapping between an address and a method.
   bool garbage_collect_code = !generate_debug_info;
 
-  // We only use two mappings (separating rw from rx) if we are able to use ashmem.
-  // See the above comment for debug information and not using ashmem.
-  bool use_two_mappings = use_ashmem;
-
   // We need to have 32 bit offsets from method headers in code cache which point to things
   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
   // Ensure we're below 1 GB to be safe.
@@ -121,10 +86,6 @@
     return nullptr;
   }
 
-  // Align both capacities to page size, as that's the unit mspaces use.
-  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
-  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
-
   std::string error_str;
   // Map name specific for android_os_Debug.cpp accounting.
   // Map in low 4gb to simplify accessing root tables for x86_64.
@@ -146,138 +107,35 @@
     return nullptr;
   }
 
-  // Create a region for JIT data and executable code. This will be
-  // laid out as:
-  //
-  //          +----------------+ --------------------
-  //          | code_sync_map_ | ^ code_sync_size   ^
-  //          |                | v                  |
-  //          +----------------+ --                 |
-  //          :                : ^                  |
-  //          :  post_code_map : | post_code_size   |
-  //          :   [padding]    : v                  |
-  //          +----------------+ -                  |
-  //          |                | ^                  |
-  //          |   code_map     | | code_size        | total_mapping_size
-  //          |   [JIT Code]   | v                  |
-  //          +----------------+ -                  |
-  //          :                : ^                  |
-  //          :  pre_code_map  : | pre_code_size    |
-  //          :   [padding]    : v                  |
-  //          +----------------+ -                  |
-  //          |                | ^                  |
-  //          |    data_map    | | data_size        |
-  //          |   [Jit Data]   | v                  v
-  //          +----------------+ --------------------
-  //
-  // The code_sync_map_ contains a page that we use flush CPU instruction
-  // pipelines (see FlushInstructionPipelines()).
-  //
-  // The padding regions - pre_code_map and post_code_map - exist to
-  // put some random distance between the writable JIT code mapping
-  // and the executable mapping. The padding is discarded at the end
-  // of this function.
-  //
-  size_t data_size = (max_capacity - kMaxMapSpacingPages * kPageSize) / 2;
-  size_t pre_code_size =
-      GetRandomNumber(kMinMapSpacingPages, kMaxMapSpacingPages - 1) * kPageSize;
-  size_t code_size = max_capacity - data_size - kMaxMapSpacingPages * kPageSize;
-  size_t code_sync_size = kPageSize;
-  size_t post_code_size = kMaxMapSpacingPages * kPageSize - pre_code_size - code_sync_size;
-  DCHECK_EQ(data_size, code_size);
-  DCHECK_EQ(pre_code_size + post_code_size + code_sync_size, kMaxMapSpacingPages * kPageSize);
-  DCHECK_EQ(data_size + pre_code_size + code_size + post_code_size + code_sync_size, max_capacity);
+  // Align both capacities to page size, as that's the unit mspaces use.
+  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
 
-  // Create pre-code padding region after data region, discarded after
-  // code and data regions are set-up.
-  std::unique_ptr<MemMap> pre_code_map(SplitMemMap(data_map.get(),
-                                                   "jit-code-cache-padding",
-                                                   data_size,
-                                                   kProtNone,
-                                                   error_msg,
-                                                   use_ashmem));
-  if (pre_code_map == nullptr) {
-    return nullptr;
-  }
-  DCHECK_EQ(data_map->Size(), data_size);
-  DCHECK_EQ(pre_code_map->Size(), pre_code_size + code_size + post_code_size + code_sync_size);
+  // Data cache is 1 / 2 of the map.
+  // TODO: Make this variable?
+  size_t data_size = max_capacity / 2;
+  size_t code_size = max_capacity - data_size;
+  DCHECK_EQ(code_size + data_size, max_capacity);
+  uint8_t* divider = data_map->Begin() + data_size;
 
-  // Create code region.
-  unique_fd writable_code_fd;
-  std::unique_ptr<MemMap> code_map(SplitMemMap(pre_code_map.get(),
-                                               "jit-code-cache",
-                                               pre_code_size,
-                                               use_two_mappings ? kProtCode : kProtAll,
-                                               error_msg,
-                                               use_ashmem,
-                                               &writable_code_fd));
+  MemMap* code_map =
+      data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
   if (code_map == nullptr) {
+    std::ostringstream oss;
+    oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
+    *error_msg = oss.str();
     return nullptr;
   }
-  DCHECK_EQ(pre_code_map->Size(), pre_code_size);
-  DCHECK_EQ(code_map->Size(), code_size + post_code_size + code_sync_size);
-
-  // Padding after code region, discarded after code and data regions
-  // are set-up.
-  std::unique_ptr<MemMap> post_code_map(SplitMemMap(code_map.get(),
-                                                    "jit-code-cache-padding",
-                                                    code_size,
-                                                    kProtNone,
-                                                    error_msg,
-                                                    use_ashmem));
-  if (post_code_map == nullptr) {
-    return nullptr;
-  }
-  DCHECK_EQ(code_map->Size(), code_size);
-  DCHECK_EQ(post_code_map->Size(), post_code_size + code_sync_size);
-
-  std::unique_ptr<MemMap> code_sync_map(SplitMemMap(post_code_map.get(),
-                                                    "jit-code-sync",
-                                                    post_code_size,
-                                                    kProtCode,
-                                                    error_msg,
-                                                    use_ashmem));
-  if (code_sync_map == nullptr) {
-    return nullptr;
-  }
-  DCHECK_EQ(post_code_map->Size(), post_code_size);
-  DCHECK_EQ(code_sync_map->Size(), code_sync_size);
-
-  std::unique_ptr<MemMap> writable_code_map;
-  if (use_two_mappings) {
-    // Allocate the R/W view.
-    writable_code_map.reset(MemMap::MapFile(code_size,
-                                            kProtData,
-                                            MAP_SHARED,
-                                            writable_code_fd.get(),
-                                            /* start */ 0,
-                                            /* low_4gb */ true,
-                                            "jit-writable-code",
-                                            &error_str));
-    if (writable_code_map == nullptr) {
-      std::ostringstream oss;
-      oss << "Failed to create writable code cache: " << error_str << " size=" << code_size;
-      *error_msg = oss.str();
-      return nullptr;
-    }
-  }
+  DCHECK_EQ(code_map->Begin(), divider);
   data_size = initial_capacity / 2;
   code_size = initial_capacity - data_size;
   DCHECK_EQ(code_size + data_size, initial_capacity);
-  return new JitCodeCache(writable_code_map.release(),
-                          code_map.release(),
-                          data_map.release(),
-                          code_sync_map.release(),
-                          code_size,
-                          data_size,
-                          max_capacity,
-                          garbage_collect_code);
+  return new JitCodeCache(
+      code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code);
 }
 
-JitCodeCache::JitCodeCache(MemMap* writable_code_map,
-                           MemMap* executable_code_map,
+JitCodeCache::JitCodeCache(MemMap* code_map,
                            MemMap* data_map,
-                           MemMap* code_sync_map,
                            size_t initial_code_capacity,
                            size_t initial_data_capacity,
                            size_t max_capacity,
@@ -285,10 +143,8 @@
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
+      code_map_(code_map),
       data_map_(data_map),
-      executable_code_map_(executable_code_map),
-      writable_code_map_(writable_code_map),
-      code_sync_map_(code_sync_map),
       max_capacity_(max_capacity),
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
@@ -308,8 +164,7 @@
       inline_cache_cond_("Jit inline cache condition variable", lock_) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  MemMap* writable_map = GetWritableMemMap();
-  code_mspace_ = create_mspace_with_base(writable_map->Begin(), code_end_, false /*locked*/);
+  code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
   data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
 
   if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
@@ -318,10 +173,7 @@
 
   SetFootprintLimit(current_capacity_);
 
-  if (writable_code_map_ != nullptr) {
-    CHECKED_MPROTECT(writable_code_map_->Begin(), writable_code_map_->Size(), kProtReadOnly);
-  }
-  CHECKED_MPROTECT(executable_code_map_->Begin(), executable_code_map_->Size(), kProtCode);
+  CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
   CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
 
   VLOG(jit) << "Created jit code cache: initial data size="
@@ -331,7 +183,7 @@
 }
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return executable_code_map_->Begin() <= ptr && ptr < executable_code_map_->End();
+  return code_map_->Begin() <= ptr && ptr < code_map_->End();
 }
 
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
@@ -344,96 +196,27 @@
   return false;
 }
 
-/* This method is only for CHECK/DCHECK that pointers are within to a region. */
-static bool IsAddressInMap(const void* addr,
-                           const MemMap* mem_map,
-                           const char* check_name) {
-  if (addr == nullptr || mem_map->HasAddress(addr)) {
-    return true;
-  }
-  LOG(ERROR) << "Is" << check_name << "Address " << addr
-             << " not in [" << reinterpret_cast<void*>(mem_map->Begin())
-             << ", " << reinterpret_cast<void*>(mem_map->Begin() + mem_map->Size()) << ")";
-  return false;
-}
-
-bool JitCodeCache::IsDataAddress(const void* raw_addr) const {
-  return IsAddressInMap(raw_addr, data_map_.get(), "Data");
-}
-
-bool JitCodeCache::IsExecutableAddress(const void* raw_addr) const {
-  return IsAddressInMap(raw_addr, executable_code_map_.get(), "Executable");
-}
-
-bool JitCodeCache::IsWritableAddress(const void* raw_addr) const {
-  return IsAddressInMap(raw_addr, GetWritableMemMap(), "Writable");
-}
-
-// Convert one address within the source map to the same offset within the destination map.
-static void* ConvertAddress(const void* source_address,
-                            const MemMap* source_map,
-                            const MemMap* destination_map) {
-  DCHECK(source_map->HasAddress(source_address)) << source_address;
-  ptrdiff_t offset = reinterpret_cast<const uint8_t*>(source_address) - source_map->Begin();
-  uintptr_t address = reinterpret_cast<uintptr_t>(destination_map->Begin()) + offset;
-  return reinterpret_cast<void*>(address);
-}
-
-template <typename T>
-T* JitCodeCache::ToExecutableAddress(T* writable_address) const {
-  CHECK(IsWritableAddress(writable_address));
-  if (writable_address == nullptr) {
-    return nullptr;
-  }
-  void* executable_address = ConvertAddress(writable_address,
-                                            GetWritableMemMap(),
-                                            executable_code_map_.get());
-  CHECK(IsExecutableAddress(executable_address));
-  return reinterpret_cast<T*>(executable_address);
-}
-
-void* JitCodeCache::ToWritableAddress(const void* executable_address) const {
-  CHECK(IsExecutableAddress(executable_address));
-  if (executable_address == nullptr) {
-    return nullptr;
-  }
-  void* writable_address = ConvertAddress(executable_address,
-                                          executable_code_map_.get(),
-                                          GetWritableMemMap());
-  CHECK(IsWritableAddress(writable_address));
-  return writable_address;
-}
-
 class ScopedCodeCacheWrite : ScopedTrace {
  public:
-  explicit ScopedCodeCacheWrite(JitCodeCache* code_cache)
-      : ScopedTrace("ScopedCodeCacheWrite") {
+  explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
+      : ScopedTrace("ScopedCodeCacheWrite"),
+        code_map_(code_map),
+        only_for_tlb_shootdown_(only_for_tlb_shootdown) {
     ScopedTrace trace("mprotect all");
-    int prot_to_start_writing = kProtAll;
-    if (code_cache->writable_code_map_ == nullptr) {
-      // If there is only one mapping, use the executable mapping and toggle between rwx and rx.
-      prot_to_start_writing = kProtAll;
-      prot_to_stop_writing_ = kProtCode;
-    } else {
-      // If there are two mappings, use the writable mapping and toggle between rw and r.
-      prot_to_start_writing = kProtData;
-      prot_to_stop_writing_ = kProtReadOnly;
-    }
-    writable_map_ = code_cache->GetWritableMemMap();
-    // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
-    // one page.
-    size_ = writable_map_->Size();
-    CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_start_writing);
+    CHECKED_MPROTECT(
+        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
   }
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_stop_writing_);
+    CHECKED_MPROTECT(
+        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
   }
-
  private:
-  int prot_to_stop_writing_;
-  MemMap* writable_map_;
-  size_t size_;
+  MemMap* const code_map_;
+
+  // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
+  // one page.
+  const bool only_for_tlb_shootdown_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
 };
@@ -448,6 +231,7 @@
                                   size_t fp_spill_mask,
                                   const uint8_t* code,
                                   size_t code_size,
+                                  size_t data_size,
                                   bool osr,
                                   Handle<mirror::ObjectArray<mirror::Object>> roots,
                                   bool has_should_deoptimize_flag,
@@ -462,6 +246,7 @@
                                        fp_spill_mask,
                                        code,
                                        code_size,
+                                       data_size,
                                        osr,
                                        roots,
                                        has_should_deoptimize_flag,
@@ -479,6 +264,7 @@
                                 fp_spill_mask,
                                 code,
                                 code_size,
+                                data_size,
                                 osr,
                                 roots,
                                 has_should_deoptimize_flag,
@@ -540,10 +326,8 @@
   }
 }
 
-uint8_t* JitCodeCache::GetRootTable(const void* code_ptr, uint32_t* number_of_roots) {
-  CHECK(IsExecutableAddress(code_ptr));
+static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-  // GetOptimizedCodeInfoPtr uses offsets relative to the EXECUTABLE address.
   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
   uint32_t roots = GetNumberOfRoots(data);
   if (number_of_roots != nullptr) {
@@ -588,8 +372,6 @@
 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
   MutexLock mu(Thread::Current(), lock_);
   for (const auto& entry : method_code_map_) {
-    // GetRootTable takes an EXECUTABLE address.
-    CHECK(IsExecutableAddress(entry.first));
     uint32_t number_of_roots = 0;
     uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
@@ -627,19 +409,17 @@
   }
 }
 
-void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
-  CHECK(IsExecutableAddress(code_ptr));
+void JitCodeCache::FreeCode(const void* code_ptr) {
+  uintptr_t allocation = FromCodeToAllocation(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
   DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
-  // GetRootTable takes an EXECUTABLE address.
   FreeData(GetRootTable(code_ptr));
-  FreeRawCode(reinterpret_cast<uint8_t*>(FromCodeToAllocation(code_ptr)));
+  FreeCode(reinterpret_cast<uint8_t*>(allocation));
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
-  // method_headers are expected to be in the executable region.
   {
     MutexLock mu(Thread::Current(), *Locks::cha_lock_);
     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
@@ -651,9 +431,9 @@
   // so it's possible for the same method_header to start representing
   // different compile code.
   MutexLock mu(Thread::Current(), lock_);
-  ScopedCodeCacheWrite scc(this);
+  ScopedCodeCacheWrite scc(code_map_.get());
   for (const OatQuickMethodHeader* method_header : method_headers) {
-    FreeCodeAndData(method_header->GetCode());
+    FreeCode(method_header->GetCode());
   }
 }
 
@@ -670,10 +450,9 @@
     // with the classlinker_classes_lock_ held, and suspending ourselves could
     // lead to a deadlock.
     {
-      ScopedCodeCacheWrite scc(this);
+      ScopedCodeCacheWrite scc(code_map_.get());
       for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
         if (alloc.ContainsUnsafe(it->second)) {
-          CHECK(IsExecutableAddress(OatQuickMethodHeader::FromCodePointer(it->first)));
           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
           it = method_code_map_.erase(it);
         } else {
@@ -765,129 +544,6 @@
   method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
 }
 
-static void FlushInstructionPiplines(uint8_t* sync_page) {
-  // After updating the JIT code cache we need to force all CPUs to
-  // flush their instruction pipelines. In the absence of system call
-  // to do this explicitly, we can achieve this indirectly by toggling
-  // permissions on an executable page. This should send an IPI to
-  // each core to update the TLB entry with the interrupt raised on
-  // each core causing the instruction pipeline to be flushed.
-  CHECKED_MPROTECT(sync_page, kPageSize, kProtAll);
-  // Ensure the sync_page is present otherwise a TLB update may not be
-  // necessary.
-  sync_page[0] = 0;
-  CHECKED_MPROTECT(sync_page, kPageSize, kProtCode);
-}
-
-#ifdef __aarch64__
-
-static void FlushJitCodeCacheRange(uint8_t* code_ptr,
-                                   uint8_t* writable_ptr,
-                                   size_t code_size) {
-  // Cache maintenance instructions can cause permission faults when a
-  // page is not present (e.g. swapped out or not backed). These
-  // faults should be handled by the kernel, but a bug in some Linux
-  // kernels may surface these permission faults to user-land which
-  // does not currently deal with them (b/63885946). To work around
-  // this, we read a value from each page to fault it in before
-  // attempting to perform cache maintenance operations.
-  //
-  // For reference, this behavior is caused by this commit:
-  // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
-
-  // The cache-line size could be probed for from the CPU, but
-  // assuming a safe lower bound is safe for CPUs that have different
-  // cache-line sizes for big and little cores.
-  static const uintptr_t kSafeCacheLineSize = 32;
-
-  // Ensure stores are present in L1 data cache.
-  __asm __volatile("dsb ish" ::: "memory");
-
-  volatile uint8_t mutant;
-
-  // Push dirty cache-lines out to the point of unification (PoU). The
-  // point of unification is the first point in the cache/memory
-  // hierarchy where the instruction cache and data cache have the
-  // same view of memory. The PoU is where an instruction fetch will
-  // fetch the new code generated by the JIT.
-  //
-  // See: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch11s04.html
-  uintptr_t writable_addr = RoundDown(reinterpret_cast<uintptr_t>(writable_ptr),
-                                      kSafeCacheLineSize);
-  uintptr_t writable_end  = RoundUp(reinterpret_cast<uintptr_t>(writable_ptr) + code_size,
-                                    kSafeCacheLineSize);
-  while (writable_addr < writable_end) {
-    // Read from the cache-line to minimize the chance that a cache
-    // maintenance instruction causes a fault (see kernel bug comment
-    // above).
-    mutant = *reinterpret_cast<const uint8_t*>(writable_addr);
-
-    // Flush cache-line
-    __asm volatile("dc cvau, %0" :: "r"(writable_addr) : "memory");
-    writable_addr += kSafeCacheLineSize;
-  }
-
-  __asm __volatile("dsb ish" ::: "memory");
-
-  uintptr_t code_addr = RoundDown(reinterpret_cast<uintptr_t>(code_ptr), kSafeCacheLineSize);
-  const uintptr_t code_end = RoundUp(reinterpret_cast<uintptr_t>(code_ptr) + code_size,
-                                     kSafeCacheLineSize);
-  while (code_addr < code_end) {
-    // Read from the cache-line to minimize the chance that a cache
-    // maintenance instruction causes a fault (see kernel bug comment
-    // above).
-    mutant = *reinterpret_cast<const uint8_t*>(code_addr);
-
-    // Invalidating the data cache line is only strictly necessary
-    // when the JIT code cache has two mappings (the default). We know
-    // this cache line is clean so this is just invalidating it (using
-    // "dc ivac" would be preferable, but counts as a write and this
-    // memory may not be mapped write permission).
-    __asm volatile("dc cvau, %0" :: "r"(code_addr) : "memory");
-
-    // Invalidate the instruction cache line to force instructions in
-    // range to be re-fetched following update.
-    __asm volatile("ic ivau, %0" :: "r"(code_addr) : "memory");
-
-    code_addr += kSafeCacheLineSize;
-  }
-
-  // Wait for code cache invalidations to complete.
-  __asm __volatile("dsb ish" ::: "memory");
-
-  // Reset fetched instruction stream.
-  __asm __volatile("isb");
-}
-
-#else  // __aarch64
-
-static void FlushJitCodeCacheRange(uint8_t* code_ptr,
-                                   uint8_t* writable_ptr,
-                                   size_t code_size) {
-  if (writable_ptr != code_ptr) {
-    // When there are two mappings of the JIT code cache, RX and
-    // RW, flush the RW version first as we've just dirtied the
-    // cache lines with new code. Flushing the RX version first
-    // can cause a permission fault as the those addresses are not
-    // writable, but can appear dirty in the cache. There is a lot
-    // of potential subtlety here depending on how the cache is
-    // indexed and tagged.
-    //
-    // Flushing the RX version after the RW version is just
-    // invalidating cachelines in the instruction cache. This is
-    // necessary as the instruction cache will often have a
-    // different set of cache lines present and because the JIT
-    // code cache can start a new function at any boundary within
-    // a cache-line.
-    FlushDataCache(reinterpret_cast<char*>(writable_ptr),
-                   reinterpret_cast<char*>(writable_ptr + code_size));
-  }
-  FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
-                        reinterpret_cast<char*>(code_ptr + code_size));
-}
-
-#endif  // __aarch64
-
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
@@ -898,6 +554,7 @@
                                           size_t fp_spill_mask,
                                           const uint8_t* code,
                                           size_t code_size,
+                                          size_t data_size,
                                           bool osr,
                                           Handle<mirror::ObjectArray<mirror::Object>> roots,
                                           bool has_should_deoptimize_flag,
@@ -917,37 +574,35 @@
     MutexLock mu(self, lock_);
     WaitForPotentialCollectionToComplete(self);
     {
-      ScopedCodeCacheWrite scc(this);
+      ScopedCodeCacheWrite scc(code_map_.get());
       memory = AllocateCode(total_size);
       if (memory == nullptr) {
         return nullptr;
       }
-      uint8_t* writable_ptr = memory + header_size;
-      code_ptr = ToExecutableAddress(writable_ptr);
+      code_ptr = memory + header_size;
 
-      std::copy(code, code + code_size, writable_ptr);
-      OatQuickMethodHeader* writable_method_header =
-          OatQuickMethodHeader::FromCodePointer(writable_ptr);
-      // We need to be able to write the OatQuickMethodHeader, so we use writable_method_header.
-      // Otherwise, the offsets encoded in OatQuickMethodHeader are used relative to an executable
-      // address, so we use code_ptr.
-      new (writable_method_header) OatQuickMethodHeader(
+      std::copy(code, code + code_size, code_ptr);
+      method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+      new (method_header) OatQuickMethodHeader(
           code_ptr - stack_map,
           code_ptr - method_info,
           frame_size_in_bytes,
           core_spill_mask,
           fp_spill_mask,
           code_size);
-
-      FlushJitCodeCacheRange(code_ptr, writable_ptr, code_size);
-      FlushInstructionPiplines(code_sync_map_->Begin());
-
+      // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
+      // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
+      // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
+      // 6P) stop being supported or their kernels are fixed.
+      //
+      // For reference, this behavior is caused by this commit:
+      // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+      FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+                            reinterpret_cast<char*>(code_ptr + code_size));
       DCHECK(!Runtime::Current()->IsAotCompiler());
       if (has_should_deoptimize_flag) {
-        writable_method_header->SetHasShouldDeoptimizeFlag();
+        method_header->SetHasShouldDeoptimizeFlag();
       }
-      // All the pointers exported from the cache are executable addresses.
-      method_header = ToExecutableAddress(writable_method_header);
     }
 
     number_of_compilations_++;
@@ -986,14 +641,16 @@
     // but below we still make the compiled code valid for the method.
     MutexLock mu(self, lock_);
     // Fill the root table before updating the entry point.
-    CHECK(IsDataAddress(roots_data));
     DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
     DCHECK_LE(roots_data, stack_map);
     FillRootTable(roots_data, roots);
-
-    // Ensure the updates to the root table are visible with a store fence.
-    QuasiAtomic::ThreadFenceSequentiallyConsistent();
-
+    {
+      // Flush data cache, as compiled code references literals in it.
+      // We also need a TLB shootdown to act as memory barrier across cores.
+      ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+      FlushDataCache(reinterpret_cast<char*>(roots_data),
+                     reinterpret_cast<char*>(roots_data + data_size));
+    }
     method_code_map_.Put(code_ptr, method);
     if (osr) {
       number_of_osr_compilations_++;
@@ -1041,11 +698,11 @@
 
   bool in_cache = false;
   {
-    ScopedCodeCacheWrite ccw(this);
+    ScopedCodeCacheWrite ccw(code_map_.get());
     for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
       if (code_iter->second == method) {
         if (release_memory) {
-          FreeCodeAndData(code_iter->first);
+          FreeCode(code_iter->first);
         }
         code_iter = method_code_map_.erase(code_iter);
         in_cache = true;
@@ -1099,10 +756,10 @@
     profiling_infos_.erase(profile);
   }
   method->SetProfilingInfo(nullptr);
-  ScopedCodeCacheWrite ccw(this);
+  ScopedCodeCacheWrite ccw(code_map_.get());
   for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
     if (code_iter->second == method) {
-      FreeCodeAndData(code_iter->first);
+      FreeCode(code_iter->first);
       code_iter = method_code_map_.erase(code_iter);
       continue;
     }
@@ -1168,7 +825,6 @@
                              uint8_t* stack_map_data,
                              uint8_t* roots_data) {
   DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
-  CHECK(IsDataAddress(roots_data));
   MutexLock mu(self, lock_);
   FreeData(reinterpret_cast<uint8_t*>(roots_data));
 }
@@ -1290,11 +946,11 @@
 
 void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
   size_t per_space_footprint = new_footprint / 2;
-  CHECK(IsAlignedParam(per_space_footprint, kPageSize));
+  DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
   DCHECK_EQ(per_space_footprint * 2, new_footprint);
   mspace_set_footprint_limit(data_mspace_, per_space_footprint);
   {
-    ScopedCodeCacheWrite scc(this);
+    ScopedCodeCacheWrite scc(code_map_.get());
     mspace_set_footprint_limit(code_mspace_, per_space_footprint);
   }
 }
@@ -1372,8 +1028,8 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(executable_code_map_->Begin()),
-          reinterpret_cast<uintptr_t>(executable_code_map_->Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(code_map_->Begin()),
+          reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1449,16 +1105,14 @@
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
     MutexLock mu(self, lock_);
-    ScopedCodeCacheWrite scc(this);
+    ScopedCodeCacheWrite scc(code_map_.get());
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
-      CHECK(IsExecutableAddress(code_ptr));
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
       if (GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
-        CHECK(IsExecutableAddress(it->first));
         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
         it = method_code_map_.erase(it);
       }
@@ -1501,7 +1155,6 @@
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
-      CHECK(IsExecutableAddress(code_ptr));
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1527,7 +1180,6 @@
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
-        CHECK(IsDataAddress(info));
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
         // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
         // that the compiled code would not get revived. As mutator threads run concurrently,
@@ -1588,7 +1240,6 @@
   --it;
 
   const void* code_ptr = it->first;
-  CHECK(IsExecutableAddress(code_ptr));
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   if (!method_header->Contains(pc)) {
     return nullptr;
@@ -1671,7 +1322,6 @@
   // store in the ArtMethod's ProfilingInfo pointer.
   QuasiAtomic::ThreadFenceRelease();
 
-  CHECK(IsDataAddress(info));
   method->SetProfilingInfo(info);
   profiling_infos_.push_back(info);
   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
@@ -1684,8 +1334,7 @@
   if (code_mspace_ == mspace) {
     size_t result = code_end_;
     code_end_ += increment;
-    MemMap* writable_map = GetWritableMemMap();
-    return reinterpret_cast<void*>(result + writable_map->Begin());
+    return reinterpret_cast<void*>(result + code_map_->Begin());
   } else {
     DCHECK_EQ(data_mspace_, mspace);
     size_t result = data_end_;
@@ -1837,7 +1486,6 @@
 
 size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
   MutexLock mu(Thread::Current(), lock_);
-  CHECK(IsExecutableAddress(ptr));
   return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
 }
 
@@ -1873,27 +1521,22 @@
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
   // Ensure the header ends up at expected instruction alignment.
   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
-  CHECK(IsWritableAddress(result));
   used_memory_for_code_ += mspace_usable_size(result);
   return result;
 }
 
-void JitCodeCache::FreeRawCode(void* code) {
-  CHECK(IsExecutableAddress(code));
-  void* writable_code = ToWritableAddress(code);
-  used_memory_for_code_ -= mspace_usable_size(writable_code);
-  mspace_free(code_mspace_, writable_code);
+void JitCodeCache::FreeCode(uint8_t* code) {
+  used_memory_for_code_ -= mspace_usable_size(code);
+  mspace_free(code_mspace_, code);
 }
 
 uint8_t* JitCodeCache::AllocateData(size_t data_size) {
   void* result = mspace_malloc(data_mspace_, data_size);
-  CHECK(IsDataAddress(reinterpret_cast<uint8_t*>(result)));
   used_memory_for_data_ += mspace_usable_size(result);
   return reinterpret_cast<uint8_t*>(result);
 }
 
 void JitCodeCache::FreeData(uint8_t* data) {
-  CHECK(IsDataAddress(data));
   used_memory_for_data_ -= mspace_usable_size(data);
   mspace_free(data_mspace_, data);
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 175501f..daa1d61 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -113,6 +113,7 @@
                       size_t fp_spill_mask,
                       const uint8_t* code,
                       size_t code_size,
+                      size_t data_size,
                       bool osr,
                       Handle<mirror::ObjectArray<mirror::Object>> roots,
                       bool has_should_deoptimize_flag,
@@ -228,8 +229,6 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr);
-
   // The GC needs to disallow the reading of inline caches when it processes them,
   // to avoid having a class being used while it is being deleted.
   void AllowInlineCacheAccess() REQUIRES(!lock_);
@@ -248,13 +247,9 @@
   }
 
  private:
-  friend class ScopedCodeCacheWrite;
-
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
                MemMap* data_map,
-               MemMap* writable_code_map,
-               MemMap* code_sync_map,
                size_t initial_code_capacity,
                size_t initial_data_capacity,
                size_t max_capacity,
@@ -272,6 +267,7 @@
                               size_t fp_spill_mask,
                               const uint8_t* code,
                               size_t code_size,
+                              size_t data_size,
                               bool osr,
                               Handle<mirror::ObjectArray<mirror::Object>> roots,
                               bool has_should_deoptimize_flag,
@@ -296,7 +292,7 @@
       REQUIRES(!Locks::cha_lock_);
 
   // Free in the mspace allocations for `code_ptr`.
-  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
+  void FreeCode(const void* code_ptr) REQUIRES(lock_);
 
   // Number of bytes allocated in the code cache.
   size_t CodeCacheSizeLocked() REQUIRES(lock_);
@@ -329,7 +325,7 @@
   bool CheckLiveCompiledCodeHasProfilingInfo()
       REQUIRES(lock_);
 
-  void FreeRawCode(void* code) REQUIRES(lock_);
+  void FreeCode(uint8_t* code) REQUIRES(lock_);
   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
   void FreeData(uint8_t* data) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
@@ -339,61 +335,25 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  MemMap* GetWritableMemMap() const {
-    if (writable_code_map_ == nullptr) {
-      // The system required us to map the JIT Code Cache RWX (see
-      // JitCodeCache::Create()).
-      return executable_code_map_.get();
-    } else {
-      // Executable code is mapped RX, and writable code is mapped RW
-      // to the underlying same memory, but at a different address.
-      return writable_code_map_.get();
-    }
-  }
-
-  bool IsDataAddress(const void* raw_addr) const;
-
-  bool IsExecutableAddress(const void* raw_addr) const;
-
-  bool IsWritableAddress(const void* raw_addr) const;
-
-  template <typename T>
-  T* ToExecutableAddress(T* writable_address) const;
-
-  void* ToWritableAddress(const void* executable_address) const;
-
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
   ConditionVariable lock_cond_ GUARDED_BY(lock_);
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
-  // JITting methods obviously requires both write and execute permissions on a region of memory.
-  // In tye typical (non-debugging) case, we separate the memory mapped view that can write the code
-  // from a view that the runtime uses to execute the code. Having these two views eliminates any
-  // single address region having rwx permissions.  An attacker could still write the writable
-  // address and then execute the executable address. We allocate the mappings with a random
-  // address relationship to each other which makes the attacker need two addresses rather than
-  // just one.  In the debugging case there is no file descriptor to back the
-  // shared memory, and hence we have to use a single mapping.
+  // Mem map which holds code.
+  std::unique_ptr<MemMap> code_map_;
   // Mem map which holds data (stack maps and profiling info).
   std::unique_ptr<MemMap> data_map_;
-  // Mem map which holds a non-writable view of code for JIT.
-  std::unique_ptr<MemMap> executable_code_map_;
-  // Mem map which holds a non-executable view of code for JIT.
-  std::unique_ptr<MemMap> writable_code_map_;
-  // Mem map which holds one executable page that we use for flushing instruction
-  // fetch buffers. The code on this page is never executed.
-  std::unique_ptr<MemMap> code_sync_map_;
   // The opaque mspace for allocating code.
   void* code_mspace_ GUARDED_BY(lock_);
   // The opaque mspace for allocating data.
   void* data_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // Holds non-writable compiled code associated to the ArtMethod.
+  // Holds compiled code associated to the ArtMethod.
   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
-  // Holds non-writable osr compiled code associated to the ArtMethod.
+  // Holds osr compiled code associated to the ArtMethod.
   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
   // ProfilingInfo objects we have allocated.
   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1ef72ba..7b41608 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -497,7 +497,7 @@
     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
     int result = munmap(base_begin_, base_size_);
     if (result == -1) {
-      PLOG(FATAL) << "munmap failed: " << BaseBegin() << "..." << BaseEnd();
+      PLOG(FATAL) << "munmap failed";
     }
   }
 
@@ -536,13 +536,8 @@
   }
 }
 
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end,
-                           const char* tail_name,
-                           int tail_prot,
-                           int sharing_flags,
-                           std::string* error_msg,
-                           bool use_ashmem,
-                           unique_fd* shmem_fd) {
+MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
+                           std::string* error_msg, bool use_ashmem) {
   use_ashmem = use_ashmem && !kIsTargetLinux;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
@@ -561,12 +556,6 @@
   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
-  if (base_size_ == 0u) {
-    // All pages in this MemMap have been handed out. Invalidate base
-    // pointer to prevent the destructor calling munmap() on
-    // zero-length region (which can't succeed).
-    base_begin_ = nullptr;
-  }
   size_t tail_size = old_end - new_end;
   uint8_t* tail_base_begin = new_base_end;
   size_t tail_base_size = old_base_end - new_base_end;
@@ -574,14 +563,14 @@
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
   unique_fd fd;
-  int flags = MAP_ANONYMOUS | sharing_flags;
+  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
   if (use_ashmem) {
     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
     // prefixed "dalvik-".
     std::string debug_friendly_name("dalvik-");
     debug_friendly_name += tail_name;
     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
-    flags = MAP_FIXED | sharing_flags;
+    flags = MAP_PRIVATE | MAP_FIXED;
     if (fd.get() == -1) {
       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
                                 tail_name, strerror(errno));
@@ -615,9 +604,6 @@
                               fd.get());
     return nullptr;
   }
-  if (shmem_fd != nullptr) {
-    shmem_fd->reset(fd.release());
-  }
   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index d8908ad..5603963 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -25,7 +25,6 @@
 #include <string>
 
 #include "android-base/thread_annotations.h"
-#include "android-base/unique_fd.h"
 
 namespace art {
 
@@ -38,8 +37,6 @@
 #define USE_ART_LOW_4G_ALLOCATOR 0
 #endif
 
-using android::base::unique_fd;
-
 #ifdef __linux__
 static constexpr bool kMadviseZeroes = true;
 #else
@@ -171,14 +168,11 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
-  // sharing_flags should be either MAP_PRIVATE or MAP_SHARED.
   MemMap* RemapAtEnd(uint8_t* new_end,
                      const char* tail_name,
                      int tail_prot,
-                     int sharing_flags,
                      std::string* error_msg,
-                     bool use_ashmem = true,
-                     unique_fd* shmem_fd = nullptr);
+                     bool use_ashmem = true);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
       REQUIRES(!MemMap::mem_maps_lock_);
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 8d6bb38..5f027b1 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -74,7 +74,6 @@
     MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
                                 "MemMapTest_RemapAtEndTest_map1",
                                 PROT_READ | PROT_WRITE,
-                                MAP_PRIVATE,
                                 &error_msg);
     // Check the states of the two maps.
     EXPECT_EQ(m0->Begin(), base0) << error_msg;
@@ -457,7 +456,6 @@
   std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
                                             "MemMapTest_AlignByTest_map1",
                                             PROT_READ | PROT_WRITE,
-                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base1 = m1->Begin();
   ASSERT_TRUE(base1 != nullptr) << error_msg;
@@ -467,7 +465,6 @@
   std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
                                             "MemMapTest_AlignByTest_map2",
                                             PROT_READ | PROT_WRITE,
-                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base2 = m2->Begin();
   ASSERT_TRUE(base2 != nullptr) << error_msg;
@@ -477,7 +474,6 @@
   std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
                                             "MemMapTest_AlignByTest_map1",
                                             PROT_READ | PROT_WRITE,
-                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base3 = m3->Begin();
   ASSERT_TRUE(base3 != nullptr) << error_msg;