Revert^2 "JIT: Separate code allocation and initialization."

This reverts commit 63b0c26aae3e7237166dd781eb7a15fbc7c091c2.

Test: ./art/test.py -b -r --host --all-gc -t 708
Reason for revert: Reland after bug fix.
Change-Id: Ic13e2799bf4bdd8ca468f72cc0f3b72f224f2b08
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d30d681..a45f502 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1288,17 +1288,24 @@
     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
     ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
                                                              jni_compiled_method);
-    const uint8_t* roots_data = code_cache->ReserveData(
-        self, region, stack_map.size(), /* number_of_roots= */ 0, method);
-    if (roots_data == nullptr) {
+
+    ArrayRef<const uint8_t> reserved_code;
+    ArrayRef<const uint8_t> reserved_data;
+    if (!code_cache->Reserve(self,
+                             region,
+                             jni_compiled_method.GetCode().size(),
+                             stack_map.size(),
+                             /* number_of_roots= */ 0,
+                             method,
+                             /*out*/ &reserved_code,
+                             /*out*/ &reserved_data)) {
       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
       return false;
     }
+    const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
 
     // Add debug info after we know the code location but before we update entry-point.
-    const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
-      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
-      const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+    if (compiler_options.GenerateAnyDebugInfo()) {
       debug::MethodDebugInfo info = {};
       info.custom_name = "art_jni_trampoline";
       info.dex_file = dex_file;
@@ -1311,30 +1318,26 @@
       info.is_native_debuggable = compiler_options.GetNativeDebuggable();
       info.is_optimized = true;
       info.is_code_address_text_relative = false;
-      info.code_address = code_address;
+      info.code_address = reinterpret_cast<uintptr_t>(code);
       info.code_size = jni_compiled_method.GetCode().size();
-      info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+      info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
       info.code_info = nullptr;
       info.cfi = jni_compiled_method.GetCfi();
       GenerateJitDebugInfo(info);
-    };
+    }
 
-    const void* code = code_cache->CommitCode(
-        self,
-        region,
-        method,
-        jni_compiled_method.GetCode().data(),
-        jni_compiled_method.GetCode().size(),
-        stack_map.data(),
-        stack_map.size(),
-        roots_data,
-        roots,
-        osr,
-        /* has_should_deoptimize_flag= */ false,
-        cha_single_implementation_list,
-        generate_debug_info);
-    if (code == nullptr) {
-      code_cache->ClearData(self, region, roots_data);
+    if (!code_cache->Commit(self,
+                            region,
+                            method,
+                            reserved_code,
+                            jni_compiled_method.GetCode(),
+                            reserved_data,
+                            roots,
+                            ArrayRef<const uint8_t>(stack_map),
+                            osr,
+                            /* has_should_deoptimize_flag= */ false,
+                            cha_single_implementation_list)) {
+      code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
       return false;
     }
 
@@ -1382,13 +1385,23 @@
   }
 
   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
-  size_t number_of_roots = codegen->GetNumberOfJitRoots();
-  const uint8_t* roots_data = code_cache->ReserveData(
-      self, region, stack_map.size(), number_of_roots, method);
-  if (roots_data == nullptr) {
+
+  ArrayRef<const uint8_t> reserved_code;
+  ArrayRef<const uint8_t> reserved_data;
+  if (!code_cache->Reserve(self,
+                           region,
+                           code_allocator.GetMemory().size(),
+                           stack_map.size(),
+                           /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
+                           method,
+                           /*out*/ &reserved_code,
+                           /*out*/ &reserved_data)) {
     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
     return false;
   }
+  const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
+  const uint8_t* roots_data = reserved_data.data();
+
   std::vector<Handle<mirror::Object>> roots;
   codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
   // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
@@ -1399,10 +1412,8 @@
                      }));
 
   // Add debug info after we know the code location but before we update entry-point.
-  const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
-    const CompilerOptions& compiler_options = GetCompilerOptions();
-    const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
-    const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+  const CompilerOptions& compiler_options = GetCompilerOptions();
+  if (compiler_options.GenerateAnyDebugInfo()) {
     debug::MethodDebugInfo info = {};
     DCHECK(info.custom_name.empty());
     info.dex_file = dex_file;
@@ -1415,32 +1426,26 @@
     info.is_native_debuggable = compiler_options.GetNativeDebuggable();
     info.is_optimized = true;
     info.is_code_address_text_relative = false;
-    info.code_address = code_address;
+    info.code_address = reinterpret_cast<uintptr_t>(code);
     info.code_size = code_allocator.GetMemory().size();
-    info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
-    info.code_info = stack_map.size() == 0 ? nullptr : method_header->GetOptimizedCodeInfoPtr();
+    info.frame_size_in_bytes = codegen->GetFrameSize();
+    info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
     GenerateJitDebugInfo(info);
-  };
+  }
 
-  const void* code = code_cache->CommitCode(
-      self,
-      region,
-      method,
-      code_allocator.GetMemory().data(),
-      code_allocator.GetMemory().size(),
-      stack_map.data(),
-      stack_map.size(),
-      roots_data,
-      roots,
-      osr,
-      codegen->GetGraph()->HasShouldDeoptimizeFlag(),
-      codegen->GetGraph()->GetCHASingleImplementationList(),
-      generate_debug_info);
-
-  if (code == nullptr) {
-    MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
-    code_cache->ClearData(self, region, roots_data);
+  if (!code_cache->Commit(self,
+                          region,
+                          method,
+                          reserved_code,
+                          code_allocator.GetMemory(),
+                          reserved_data,
+                          roots,
+                          ArrayRef<const uint8_t>(stack_map),
+                          osr,
+                          codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+                          codegen->GetGraph()->GetCHASingleImplementationList())) {
+    code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
     return false;
   }
 
@@ -1477,7 +1482,7 @@
     std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
 
     // NB: Don't allow packing of full info since it would remove non-backtrace data.
-    Locks::jit_lock_->AssertHeld(Thread::Current());
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
     const void* code_ptr = reinterpret_cast<const void*>(info.code_address);
     AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info);
   }
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6ab811b..ff23385 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -323,53 +323,6 @@
   return nullptr;
 }
 
-uint8_t* JitCodeCache::CommitCode(Thread* self,
-                                  JitMemoryRegion* region,
-                                  ArtMethod* method,
-                                  const uint8_t* code,
-                                  size_t code_size,
-                                  const uint8_t* stack_map,
-                                  size_t stack_map_size,
-                                  const uint8_t* roots_data,
-                                  const std::vector<Handle<mirror::Object>>& roots,
-                                  bool osr,
-                                  bool has_should_deoptimize_flag,
-                                  const ArenaSet<ArtMethod*>& cha_single_implementation_list,
-                                  const std::function<void(const uint8_t* code)>&
-                                      generate_debug_info) {
-  uint8_t* result = CommitCodeInternal(self,
-                                       region,
-                                       method,
-                                       code,
-                                       code_size,
-                                       stack_map,
-                                       stack_map_size,
-                                       roots_data,
-                                       roots,
-                                       osr,
-                                       has_should_deoptimize_flag,
-                                       cha_single_implementation_list,
-                                       generate_debug_info);
-  if (result == nullptr) {
-    // Retry.
-    GarbageCollectCache(self);
-    result = CommitCodeInternal(self,
-                                region,
-                                method,
-                                code,
-                                code_size,
-                                stack_map,
-                                stack_map_size,
-                                roots_data,
-                                roots,
-                                osr,
-                                has_should_deoptimize_flag,
-                                cha_single_implementation_list,
-                                generate_debug_info);
-  }
-  return result;
-}
-
 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
   bool in_collection = false;
   while (collection_in_progress_) {
@@ -672,21 +625,17 @@
   }
 }
 
-uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
-                                          JitMemoryRegion* region,
-                                          ArtMethod* method,
-                                          const uint8_t* code,
-                                          size_t code_size,
-                                          const uint8_t* stack_map,
-                                          size_t stack_map_size,
-                                          const uint8_t* roots_data,
-                                          const std::vector<Handle<mirror::Object>>& roots,
-                                          bool osr,
-                                          bool has_should_deoptimize_flag,
-                                          const ArenaSet<ArtMethod*>&
-                                              cha_single_implementation_list,
-                                          const std::function<void(const uint8_t* code)>&
-                                              generate_debug_info) {
+bool JitCodeCache::Commit(Thread* self,
+                          JitMemoryRegion* region,
+                          ArtMethod* method,
+                          ArrayRef<const uint8_t> reserved_code,
+                          ArrayRef<const uint8_t> code,
+                          ArrayRef<const uint8_t> reserved_data,
+                          const std::vector<Handle<mirror::Object>>& roots,
+                          ArrayRef<const uint8_t> stack_map,
+                          bool osr,
+                          bool has_should_deoptimize_flag,
+                          const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
   DCHECK(!method->IsNative() || !osr);
 
   if (!method->IsNative()) {
@@ -695,6 +644,7 @@
     DCheckRootsAreValid(roots, IsSharedRegion(*region));
   }
 
+  const uint8_t* roots_data = reserved_data.data();
   size_t root_table_size = ComputeRootTableSize(roots.size());
   const uint8_t* stack_map_data = roots_data + root_table_size;
 
@@ -702,26 +652,20 @@
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
-  const uint8_t* code_ptr = region->AllocateCode(
-      code, code_size, stack_map_data, has_should_deoptimize_flag);
+  const uint8_t* code_ptr = region->CommitCode(
+      reserved_code, code, stack_map_data, has_should_deoptimize_flag);
   if (code_ptr == nullptr) {
-    return nullptr;
+    return false;
   }
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
 
   // Commit roots and stack maps before updating the entry point.
-  if (!region->CommitData(roots_data, roots, stack_map, stack_map_size)) {
-    ScopedCodeCacheWrite ccw(*region);
-    uintptr_t allocation = FromCodeToAllocation(code_ptr);
-    region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
-    return nullptr;
+  if (!region->CommitData(reserved_data, roots, stack_map)) {
+    return false;
   }
 
   number_of_compilations_++;
 
-  // Add debug info after we know the code location but before we update entry-point.
-  generate_debug_info(code_ptr);
-
   // We need to update the entry point in the runnable state for the instrumentation.
   {
     // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -743,10 +687,7 @@
     // Discard the code if any single-implementation assumptions are now invalid.
     if (UNLIKELY(!single_impl_still_valid)) {
       VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
-      ScopedCodeCacheWrite ccw(*region);
-      uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      region->FreeCode(reinterpret_cast<uint8_t*>(allocation));
-      return nullptr;
+      return false;
     }
     DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
         << "Should not be using cha on debuggable apps/runs!";
@@ -805,16 +746,9 @@
         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
                                          method_header->GetCodeSize());
-    histogram_code_memory_use_.AddValue(code_size);
-    if (code_size > kCodeSizeLogThreshold) {
-      LOG(INFO) << "JIT allocated "
-                << PrettySize(code_size)
-                << " for compiled code of "
-                << ArtMethod::PrettyMethod(method);
-    }
   }
 
-  return reinterpret_cast<uint8_t*>(method_header);
+  return true;
 }
 
 size_t JitCodeCache::CodeCacheSize() {
@@ -966,38 +900,73 @@
   return GetCurrentRegion()->GetUsedMemoryForData();
 }
 
-void JitCodeCache::ClearData(Thread* self,
-                             JitMemoryRegion* region,
-                             const uint8_t* roots_data) {
-  MutexLock mu(self, *Locks::jit_lock_);
-  region->FreeData(roots_data);
-}
+bool JitCodeCache::Reserve(Thread* self,
+                           JitMemoryRegion* region,
+                           size_t code_size,
+                           size_t stack_map_size,
+                           size_t number_of_roots,
+                           ArtMethod* method,
+                           /*out*/ArrayRef<const uint8_t>* reserved_code,
+                           /*out*/ArrayRef<const uint8_t>* reserved_data) {
+  code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
+  size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
 
-const uint8_t* JitCodeCache::ReserveData(Thread* self,
-                                         JitMemoryRegion* region,
-                                         size_t stack_map_size,
-                                         size_t number_of_roots,
-                                         ArtMethod* method) {
-  size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
-  const uint8_t* result = nullptr;
-
-  {
-    ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, *Locks::jit_lock_);
-    WaitForPotentialCollectionToComplete(self);
-    result = region->AllocateData(size);
+  const uint8_t* code;
+  const uint8_t* data;
+  // We might need to try the allocation twice (with GC in between to free up memory).
+  for (int i = 0; i < 2; i++) {
+    {
+      ScopedThreadSuspension sts(self, kSuspended);
+      MutexLock mu(self, *Locks::jit_lock_);
+      WaitForPotentialCollectionToComplete(self);
+      ScopedCodeCacheWrite ccw(*region);
+      code = region->AllocateCode(code_size);
+      data = region->AllocateData(data_size);
+    }
+    if (code == nullptr || data == nullptr) {
+      Free(self, region, code, data);
+      if (i == 0) {
+        GarbageCollectCache(self);
+        continue;  // Retry after GC.
+      } else {
+        return false;  // Fail.
+      }
+    }
+    break;  // Success.
   }
+  *reserved_code = ArrayRef<const uint8_t>(code, code_size);
+  *reserved_data = ArrayRef<const uint8_t>(data, data_size);
 
   MutexLock mu(self, *Locks::jit_lock_);
-  histogram_stack_map_memory_use_.AddValue(size);
-  if (size > kStackMapSizeLogThreshold) {
+  histogram_code_memory_use_.AddValue(code_size);
+  if (code_size > kCodeSizeLogThreshold) {
     LOG(INFO) << "JIT allocated "
-              << PrettySize(size)
+              << PrettySize(code_size)
+              << " for compiled code of "
+              << ArtMethod::PrettyMethod(method);
+  }
+  histogram_stack_map_memory_use_.AddValue(data_size);
+  if (data_size > kStackMapSizeLogThreshold) {
+    LOG(INFO) << "JIT allocated "
+              << PrettySize(data_size)
               << " for stack maps of "
               << ArtMethod::PrettyMethod(method);
   }
-  return result;
+  return true;
+}
+
+void JitCodeCache::Free(Thread* self,
+                        JitMemoryRegion* region,
+                        const uint8_t* code,
+                        const uint8_t* data) {
+  MutexLock mu(self, *Locks::jit_lock_);
+  ScopedCodeCacheWrite ccw(*region);
+  if (code != nullptr) {
+    region->FreeCode(code);
+  }
+  if (data != nullptr) {
+    region->FreeData(data);
+  }
 }
 
 class MarkCodeClosure final : public Closure {
@@ -1685,7 +1654,7 @@
     if (UNLIKELY(!data->IsCompiled())) {
       // Failed to compile; the JNI compiler never fails, but the cache may be full.
       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
-    }  // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
+    }  // else Commit() updated entrypoints of all methods in the JniStubData.
   } else {
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (info != nullptr) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index ace851f..12425cf 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -180,28 +180,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
-  // Allocate and write code and its metadata to the code cache.
-  // `cha_single_implementation_list` needs to be registered via CHA (if it's
-  // still valid), since the compiled code still needs to be invalidated if the
-  // single-implementation assumptions are violated later. This needs to be done
-  // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
-  // guard elimination.
-  uint8_t* CommitCode(Thread* self,
-                      JitMemoryRegion* region,
-                      ArtMethod* method,
-                      const uint8_t* code,
-                      size_t code_size,
-                      const uint8_t* stack_map,
-                      size_t stack_map_size,
-                      const uint8_t* roots_data,
-                      const std::vector<Handle<mirror::Object>>& roots,
-                      bool osr,
-                      bool has_should_deoptimize_flag,
-                      const ArenaSet<ArtMethod*>& cha_single_implementation_list,
-                      const std::function<void(const uint8_t* code)>& generate_debug_info)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::jit_lock_);
-
   // Return true if the code cache contains this pc.
   bool ContainsPc(const void* pc) const;
 
@@ -215,20 +193,42 @@
   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
   const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
-  // Allocate a region of data that will contain a stack map of size `stack_map_size` and
-  // `number_of_roots` roots accessed by the JIT code.
-  // Return a pointer to where roots will be stored.
-  const uint8_t* ReserveData(Thread* self,
-                             JitMemoryRegion* region,
-                             size_t stack_map_size,
-                             size_t number_of_roots,
-                             ArtMethod* method)
+  // Allocate a region for both code and data in the JIT code cache.
+  // The reserved memory is left completely uninitialized.
+  bool Reserve(Thread* self,
+               JitMemoryRegion* region,
+               size_t code_size,
+               size_t stack_map_size,
+               size_t number_of_roots,
+               ArtMethod* method,
+               /*out*/ArrayRef<const uint8_t>* reserved_code,
+               /*out*/ArrayRef<const uint8_t>* reserved_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
-  // Clear data from the data portion of the code cache.
-  void ClearData(
-      Thread* self, JitMemoryRegion* region, const uint8_t* roots_data)
+  // Initialize code and data of previously allocated memory.
+  //
+  // `cha_single_implementation_list` needs to be registered via CHA (if it's
+  // still valid), since the compiled code still needs to be invalidated if the
+  // single-implementation assumptions are violated later. This needs to be done
+  // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
+  // guard elimination.
+  bool Commit(Thread* self,
+              JitMemoryRegion* region,
+              ArtMethod* method,
+              ArrayRef<const uint8_t> reserved_code,  // Uninitialized destination.
+              ArrayRef<const uint8_t> code,           // Compiler output (source).
+              ArrayRef<const uint8_t> reserved_data,  // Uninitialized destination.
+              const std::vector<Handle<mirror::Object>>& roots,
+              ArrayRef<const uint8_t> stack_map,      // Compiler output (source).
+              bool osr,
+              bool has_should_deoptimize_flag,
+              const ArenaSet<ArtMethod*>& cha_single_implementation_list)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::jit_lock_);
+
+  // Free the previously allocated memory regions.
+  void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -357,24 +357,6 @@
  private:
   JitCodeCache();
 
-  // Internal version of 'CommitCode' that will not retry if the
-  // allocation fails. Return null if the allocation fails.
-  uint8_t* CommitCodeInternal(Thread* self,
-                              JitMemoryRegion* region,
-                              ArtMethod* method,
-                              const uint8_t* code,
-                              size_t code_size,
-                              const uint8_t* stack_map,
-                              size_t stack_map_size,
-                              const uint8_t* roots_data,
-                              const std::vector<Handle<mirror::Object>>& roots,
-                              bool osr,
-                              bool has_should_deoptimize_flag,
-                              const ArenaSet<ArtMethod*>& cha_single_implementation_list,
-                              const std::function<void(const uint8_t* code)>& generate_debug_info)
-      REQUIRES(!Locks::jit_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 43ef08e..09980c8 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -350,41 +350,37 @@
   }
 }
 
-const uint8_t* JitMemoryRegion::AllocateCode(const uint8_t* code,
-                                             size_t code_size,
-                                             const uint8_t* stack_map,
-                                             bool has_should_deoptimize_flag) {
+const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code,
+                                           ArrayRef<const uint8_t> code,
+                                           const uint8_t* stack_map,
+                                           bool has_should_deoptimize_flag) {
+  DCHECK(IsInExecSpace(reserved_code.data()));
   ScopedCodeCacheWrite scc(*this);
 
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
-  // Ensure the header ends up at expected instruction alignment.
-  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-  size_t total_size = header_size + code_size;
+  size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
+  size_t total_size = header_size + code.size();
 
   // Each allocation should be on its own set of cache lines.
   // `total_size` covers the OatQuickMethodHeader, the JIT generated machine code,
   // and any alignment padding.
   DCHECK_GT(total_size, header_size);
-  uint8_t* w_memory = reinterpret_cast<uint8_t*>(
-      mspace_memalign(exec_mspace_, alignment, total_size));
-  if (UNLIKELY(w_memory == nullptr)) {
-    return nullptr;
-  }
-  uint8_t* x_memory = GetExecutableAddress(w_memory);
+  DCHECK_LE(total_size, reserved_code.size());
+  uint8_t* x_memory = const_cast<uint8_t*>(reserved_code.data());
+  uint8_t* w_memory = const_cast<uint8_t*>(GetNonExecutableAddress(x_memory));
   // Ensure the header ends up at expected instruction alignment.
   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(w_memory + header_size), alignment);
-  used_memory_for_code_ += mspace_usable_size(w_memory);
   const uint8_t* result = x_memory + header_size;
 
   // Write the code.
-  std::copy(code, code + code_size, w_memory + header_size);
+  std::copy(code.begin(), code.end(), w_memory + header_size);
 
   // Write the header.
   OatQuickMethodHeader* method_header =
       OatQuickMethodHeader::FromCodePointer(w_memory + header_size);
   new (method_header) OatQuickMethodHeader(
       (stack_map != nullptr) ? result - stack_map : 0u,
-      code_size);
+      code.size());
   if (has_should_deoptimize_flag) {
     method_header->SetHasShouldDeoptimizeFlag();
   }
@@ -419,7 +415,6 @@
   // correctness of the instructions present in the processor caches.
   if (!cache_flush_success) {
     PLOG(ERROR) << "Cache flush failed triggering code allocation failure";
-    FreeCode(x_memory);
     return nullptr;
   }
 
@@ -452,24 +447,35 @@
   reinterpret_cast<uint32_t*>(roots_data)[length] = length;
 }
 
-bool JitMemoryRegion::CommitData(const uint8_t* readonly_roots_data,
+bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
                                  const std::vector<Handle<mirror::Object>>& roots,
-                                 const uint8_t* stack_map,
-                                 size_t stack_map_size) {
-  uint8_t* roots_data = GetWritableDataAddress(readonly_roots_data);
+                                 ArrayRef<const uint8_t> stack_map) {
+  DCHECK(IsInDataSpace(reserved_data.data()));
+  uint8_t* roots_data = GetWritableDataAddress(reserved_data.data());
   size_t root_table_size = ComputeRootTableSize(roots.size());
   uint8_t* stack_map_data = roots_data + root_table_size;
+  DCHECK_LE(root_table_size + stack_map.size(), reserved_data.size());
   FillRootTable(roots_data, roots);
-  memcpy(stack_map_data, stack_map, stack_map_size);
+  memcpy(stack_map_data, stack_map.data(), stack_map.size());
   // Flush data cache, as compiled code references literals in it.
   // TODO(oth): establish whether this is necessary.
-  if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map_size))) {
+  if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map.size()))) {
     VLOG(jit) << "Failed to flush data in CommitData";
     return false;
   }
   return true;
 }
 
+const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+  void* result = mspace_memalign(exec_mspace_, alignment, size);
+  if (UNLIKELY(result == nullptr)) {
+    return nullptr;
+  }
+  used_memory_for_code_ += mspace_usable_size(result);
+  return reinterpret_cast<uint8_t*>(GetExecutableAddress(result));
+}
+
 void JitMemoryRegion::FreeCode(const uint8_t* code) {
   code = GetNonExecutableAddress(code);
   used_memory_for_code_ -= mspace_usable_size(code);
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
index 2bb69a7..6db931d 100644
--- a/runtime/jit/jit_memory_region.h
+++ b/runtime/jit/jit_memory_region.h
@@ -79,24 +79,25 @@
   // Set the footprint limit of the code cache.
   void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
 
-  // Copy the code into the region, and allocate an OatQuickMethodHeader.
-  // Callers should not write into the returned memory, as it may be read-only.
-  const uint8_t* AllocateCode(const uint8_t* code,
-                              size_t code_size,
-                              const uint8_t* stack_map,
-                              bool has_should_deoptimize_flag)
-      REQUIRES(Locks::jit_lock_);
+  const uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
   void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_);
   const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
   void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_);
   void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete;
   void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_);
 
-  // Emit roots and stack map into the memory pointed by `roots_data`.
-  bool CommitData(const uint8_t* roots_data,
+  // Emit header and code into the memory pointed by `reserved_code` (despite it being const).
+  // Returns pointer to copied code (within reserved_code region; after OatQuickMethodHeader).
+  const uint8_t* CommitCode(ArrayRef<const uint8_t> reserved_code,
+                            ArrayRef<const uint8_t> code,
+                            const uint8_t* stack_map,
+                            bool has_should_deoptimize_flag)
+      REQUIRES(Locks::jit_lock_);
+
+  // Emit roots and stack map into the memory pointed by `roots_data` (despite it being const).
+  bool CommitData(ArrayRef<const uint8_t> reserved_data,
                   const std::vector<Handle<mirror::Object>>& roots,
-                  const uint8_t* stack_map,
-                  size_t stack_map_size)
+                  ArrayRef<const uint8_t> stack_map)
       REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index e41c7ee..9d0883b 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -50,6 +50,10 @@
     return FromCodePointer(EntryPointToCodePointer(entry_point));
   }
 
+  static size_t InstructionAlignedSize() {
+    return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetAlignment(kRuntimeISA));
+  }
+
   OatQuickMethodHeader(const OatQuickMethodHeader&) = default;
   OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;