Merge "Add buildbot target using linux_bionic zipapex"
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b9fd868..9b8bb3e 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -142,10 +142,11 @@
     const ArrayRef<mirror::Class*> types_array(types, count);
     std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
         kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
-    MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
     // We never free debug info for types, so we don't need to provide a handle
     // (which would have been otherwise used as identifier to remove it later).
-    AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
+    AddNativeDebugInfoForJit(Thread::Current(),
+                             /*code_ptr=*/ nullptr,
+                             elf_file);
   }
 }
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 92aaa19..c9b4d36 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1470,13 +1470,14 @@
       compiler_options.GetInstructionSetFeatures(),
       mini_debug_info,
       info);
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
+  AddNativeDebugInfoForJit(Thread::Current(),
+                           reinterpret_cast<const void*>(info.code_address),
+                           elf_file);
 
   VLOG(jit)
       << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
       << " size=" << PrettySize(elf_file.size())
-      << " total_size=" << PrettySize(GetJitNativeDebugInfoMemUsage());
+      << " total_size=" << PrettySize(GetJitMiniDebugInfoMemUsage());
 }
 
 }  // namespace art
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index 9e91c65..678574be2 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -575,11 +575,6 @@
   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
 };
 
-class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
- public:
-  Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
-};
-
 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
   const LinkerPatch patches[] = {
       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
@@ -834,18 +829,6 @@
       { 0x1234u, 0x1238u });
 }
 
-TEST_F(Arm64RelativePatcherTestDenver64, StringBssEntryLdur) {
-  TestForAdrpOffsets(
-      [&](uint32_t adrp_offset, uint32_t string_entry_offset) {
-        Reset();
-        TestAdrpLdurLdr(adrp_offset,
-                        /*has_thunk=*/ false,
-                        /*bss_begin=*/ 0x12345678u,
-                        string_entry_offset);
-      },
-      { 0x1234u, 0x1238u });
-}
-
 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryWPcRel) {
   TestForAdrpOffsets(
@@ -917,15 +900,6 @@
       { 0x12345678u, 0xffffc840u });
 }
 
-TEST_F(Arm64RelativePatcherTestDenver64, StringReferenceLdur) {
-  TestForAdrpOffsets(
-      [&](uint32_t adrp_offset, uint32_t string_offset) {
-        Reset();
-        TestAdrpLdurAdd(adrp_offset, /*has_thunk=*/ false, string_offset);
-      },
-      { 0x12345678u, 0xffffc840U });
-}
-
 TEST_F(Arm64RelativePatcherTestDefault, StringReferenceSubX3X2) {
   TestForAdrpOffsets(
       [&](uint32_t adrp_offset, uint32_t string_offset) {
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 58ae394..fcf3c75 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -53,7 +53,6 @@
       "cortex-a75",
       "cortex-a76",
       "exynos-m1",
-      "denver",
       "kryo",
       "kryo385",
   };
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index d9651f9..36e31bd 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -46,20 +46,6 @@
   EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
   EXPECT_EQ(kryo_features->AsBitmap(), 7U);
 
-  // Build features for a 32-bit ARM denver processor.
-  std::unique_ptr<const InstructionSetFeatures> denver_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "denver", &error_msg));
-  ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
-  EXPECT_TRUE(denver_features->Equals(denver_features.get()));
-  EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
-  EXPECT_FALSE(krait_features->Equals(denver_features.get()));
-  EXPECT_FALSE(krait_features->HasAtLeast(denver_features.get()));
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
-  EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
-  EXPECT_EQ(denver_features->AsBitmap(), 7U);
-
   // Build features for a 32-bit ARMv7 processor.
   std::unique_ptr<const InstructionSetFeatures> generic_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 963c207..4a2b9d5 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -126,7 +126,6 @@
         "exynos-m1",
         "exynos-m2",
         "exynos-m3",
-        "denver64",
         "kryo",
         "kryo385",
     };
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e31fe63..d33541c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3736,8 +3736,7 @@
   data.weak_root = dex_cache_jweak;
   data.dex_file = dex_cache->GetDexFile();
   data.class_table = ClassTableForClassLoader(class_loader);
-  AddNativeDebugInfoForDex(self, ArrayRef<const uint8_t>(data.dex_file->Begin(),
-                                                         data.dex_file->Size()));
+  AddNativeDebugInfoForDex(self, data.dex_file);
   DCHECK(data.class_table != nullptr);
   // Make sure to hold the dex cache live in the class table. This case happens for the boot class
   // path dex caches without an image.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index f45de27..4c2074d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -379,6 +379,281 @@
             << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
 }
 
+template <PointerSize kPointerSize, typename ReferenceVisitor>
+class ImageSpace::PatchObjectVisitor final {
+ public:
+  explicit PatchObjectVisitor(ReferenceVisitor reference_visitor)
+      : reference_visitor_(reference_visitor) {}
+
+  void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // A mirror::Class object consists of
+    //  - instance fields inherited from j.l.Object,
+    //  - instance fields inherited from j.l.Class,
+    //  - embedded tables (vtable, interface method table),
+    //  - static fields of the class itself.
+    // The reference fields are at the start of each field section (this is how the
+    // ClassLinker orders fields; except when that would create a gap between superclass
+    // fields and the first reference of the subclass due to alignment, it can be filled
+    // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
+
+    DCHECK_ALIGNED(klass, kObjectAlignment);
+    static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
+    // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
+    // This should be the only reference field in j.l.Object and we assert that below.
+    PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
+    // Then patch the reference instance fields described by j.l.Class.class.
+    // Use the sizeof(Object) to determine where these reference fields start;
+    // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
+    // after patching but the j.l.Class may not have been patched yet.
+    mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+    size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
+    DCHECK_NE(num_reference_instance_fields, 0u);
+    static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
+    MemberOffset instance_field_offset(sizeof(mirror::Object));
+    for (size_t i = 0; i != num_reference_instance_fields; ++i) {
+      PatchReferenceField(klass, instance_field_offset);
+      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                    "Heap reference sizes equality check.");
+      instance_field_offset =
+          MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
+    }
+    // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
+    // we can get a reference to j.l.Object.class and assert that it has only one
+    // reference instance field (the `klass_` patched above).
+    if (kIsDebugBuild && klass == class_class) {
+      ObjPtr<mirror::Class> object_class =
+          klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+      CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
+    }
+    // Then patch static fields.
+    size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
+    if (num_reference_static_fields != 0u) {
+      MemberOffset static_field_offset =
+          klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
+      for (size_t i = 0; i != num_reference_static_fields; ++i) {
+        PatchReferenceField(klass, static_field_offset);
+        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                      "Heap reference sizes equality check.");
+        static_field_offset =
+            MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
+      }
+    }
+    // Then patch native pointers.
+    klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
+  }
+
+  template <typename T>
+  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (ptr != nullptr) {
+      ptr = reference_visitor_(ptr);
+    }
+    return ptr;
+  }
+
+  void VisitPointerArray(mirror::PointerArray* pointer_array)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Fully patch the pointer array, including the `klass_` field.
+    PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
+
+    int32_t length = pointer_array->GetLength<kVerifyNone>();
+    for (int32_t i = 0; i != length; ++i) {
+      ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
+          pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
+      PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
+    }
+  }
+
+  void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Visit all reference fields.
+    object->VisitReferences</*kVisitNativeRoots=*/ false,
+                            kVerifyNone,
+                            kWithoutReadBarrier>(*this, *this);
+    // This function should not be called for classes.
+    DCHECK(!object->IsClass<kVerifyNone>());
+  }
+
+  // Visitor for VisitReferences().
+  ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
+      const REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!is_static);
+    PatchReferenceField(object, field_offset);
+  }
+  // Visitor for VisitReferences(), java.lang.ref.Reference case.
+  ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
+  }
+  // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+      const {}
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+  void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
+    FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
+                                                   mirror::DexCache::StringsOffset(),
+                                                   dex_cache->NumStrings<kVerifyNone>());
+    FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
+                                                 mirror::DexCache::ResolvedTypesOffset(),
+                                                 dex_cache->NumResolvedTypes<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
+                                                   mirror::DexCache::ResolvedMethodsOffset(),
+                                                   dex_cache->NumResolvedMethods<kVerifyNone>());
+    FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
+                                                  mirror::DexCache::ResolvedFieldsOffset(),
+                                                  dex_cache->NumResolvedFields<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
+        dex_cache,
+        mirror::DexCache::ResolvedMethodTypesOffset(),
+        dex_cache->NumResolvedMethodTypes<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::CallSite>>(
+        dex_cache,
+        mirror::DexCache::ResolvedCallSitesOffset(),
+        dex_cache->NumResolvedCallSites<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::String>>(
+        dex_cache,
+        mirror::DexCache::PreResolvedStringsOffset(),
+        dex_cache->NumPreResolvedStrings<kVerifyNone>());
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
+    T* old_value = root->template Read<kWithoutReadBarrier>();
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      *root = GcRoot<T>(reference_visitor_(old_value));
+    }
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
+    if (kPointerSize == PointerSize::k64) {
+      uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
+      T* old_value = reinterpret_cast64<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = reference_visitor_(old_value);
+        *raw_entry = reinterpret_cast64<uint64_t>(new_value);
+      }
+    } else {
+      uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
+      T* old_value = reinterpret_cast32<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = reference_visitor_(old_value);
+        *raw_entry = reinterpret_cast32<uint32_t>(new_value);
+      }
+    }
+  }
+
+  template <bool kMayBeNull = true>
+  ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::Object* old_value =
+        object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      mirror::Object* new_value = reference_visitor_(old_value);
+      object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+                                                /*kCheckTransaction=*/ true,
+                                                kVerifyNone>(offset, new_value);
+    }
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
+                      sizeof(mirror::NativeDexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    mirror::NativeDexCachePair<T> pair =
+        mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
+    if (pair.object != nullptr) {
+      pair.object = reference_visitor_(pair.object);
+      mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
+    }
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  template <typename EntryType>
+  void FixupDexCacheArray(mirror::DexCache* dex_cache,
+                          MemberOffset array_offset,
+                          uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
+    EntryType* old_array =
+        reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
+    DCHECK_EQ(old_array != nullptr, size != 0u);
+    if (old_array != nullptr) {
+      EntryType* new_array = reference_visitor_(old_array);
+      dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
+      for (uint32_t i = 0; i != size; ++i) {
+        FixupDexCacheArrayEntry(new_array, i);
+      }
+    }
+  }
+
+ private:
+  ReferenceVisitor reference_visitor_;
+};
+
+template <typename ObjectVisitor>
+class ImageSpace::PatchArtFieldVisitor final : public ArtFieldVisitor {
+ public:
+  explicit PatchArtFieldVisitor(const ObjectVisitor& visitor) : visitor_(visitor) {}
+
+  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    visitor_.template PatchGcRoot</*kMayBeNull=*/ false>(&field->DeclaringClassRoot());
+  }
+
+ private:
+  const ObjectVisitor visitor_;
+};
+
+template <PointerSize kPointerSize, typename ObjectVisitor, typename CodeVisitor>
+class ImageSpace::PatchArtMethodVisitor final : public ArtMethodVisitor {
+ public:
+  explicit PatchArtMethodVisitor(const ObjectVisitor& object_visitor,
+                                 const CodeVisitor& code_visitor)
+      : object_visitor_(object_visitor),
+        code_visitor_(code_visitor) {}
+
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    object_visitor_.PatchGcRoot(&method->DeclaringClassRoot());
+    void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
+    object_visitor_.PatchNativePointer(data_address);
+    void** entrypoint_address =
+        PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
+    code_visitor_.PatchNativePointer(entrypoint_address);
+  }
+
+ private:
+  void** PointerAddress(ArtMethod* method, MemberOffset offset) {
+    return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
+  }
+
+  const ObjectVisitor object_visitor_;
+  const CodeVisitor code_visitor_;
+};
+
 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
 // nested class), but not declare functions in the header.
 class ImageSpace::Loader {
@@ -1441,43 +1716,20 @@
     return true;
   }
 
-  template <typename T>
-  ALWAYS_INLINE static T* RelocatedAddress(T* src, uint32_t diff) {
-    DCHECK(src != nullptr);
-    return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff);
-  }
+ private:
+  class RelocateVisitor {
+   public:
+    explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
 
-  template <bool kMayBeNull = true, typename T>
-  ALWAYS_INLINE static void PatchGcRoot(uint32_t diff, /*inout*/GcRoot<T>* root)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
-    T* old_value = root->template Read<kWithoutReadBarrier>();
-    DCHECK(kMayBeNull || old_value != nullptr);
-    if (!kMayBeNull || old_value != nullptr) {
-      *root = GcRoot<T>(RelocatedAddress(old_value, diff));
+    template <typename T>
+    ALWAYS_INLINE T* operator()(T* src) const {
+      DCHECK(src != nullptr);
+      return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
     }
-  }
 
-  template <PointerSize kPointerSize, bool kMayBeNull = true, typename T>
-  ALWAYS_INLINE static void PatchNativePointer(uint32_t diff, /*inout*/T** entry) {
-    if (kPointerSize == PointerSize::k64) {
-      uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
-      T* old_value = reinterpret_cast64<T*>(*raw_entry);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        T* new_value = RelocatedAddress(old_value, diff);
-        *raw_entry = reinterpret_cast64<uint64_t>(new_value);
-      }
-    } else {
-      uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
-      T* old_value = reinterpret_cast32<T*>(*raw_entry);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        T* new_value = RelocatedAddress(old_value, diff);
-        *raw_entry = reinterpret_cast32<uint32_t>(new_value);
-      }
-    }
-  }
+   private:
+    const uint32_t diff_;
+  };
 
   class PatchedObjectsMap {
    public:
@@ -1514,258 +1766,20 @@
     BitMemoryRegion visited_objects_;
   };
 
-  class PatchArtFieldVisitor final : public ArtFieldVisitor {
-   public:
-    explicit PatchArtFieldVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot</*kMayBeNull=*/ false>(diff_, &field->DeclaringClassRoot());
-    }
-
-   private:
-    const uint32_t diff_;
-  };
-
-  template <PointerSize kPointerSize>
-  class PatchArtMethodVisitor final : public ArtMethodVisitor {
-   public:
-    explicit PatchArtMethodVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &method->DeclaringClassRoot());
-      void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
-      PatchNativePointer<kPointerSize>(diff_, data_address);
-      void** entrypoint_address =
-          PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
-      PatchNativePointer<kPointerSize>(diff_, entrypoint_address);
-    }
-
-   private:
-    void** PointerAddress(ArtMethod* method, MemberOffset offset) {
-      return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
-    }
-
-    const uint32_t diff_;
-  };
-
+  template <typename ReferenceVisitor>
   class ClassTableVisitor final {
    public:
-    explicit ClassTableVisitor(uint32_t diff) : diff_(diff) {}
+    explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
+        : reference_visitor_(reference_visitor) {}
 
     void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
         REQUIRES_SHARED(Locks::mutator_lock_) {
       DCHECK(root->AsMirrorPtr() != nullptr);
-      root->Assign(RelocatedAddress(root->AsMirrorPtr(), diff_));
+      root->Assign(reference_visitor_(root->AsMirrorPtr()));
     }
 
    private:
-    const uint32_t diff_;
-  };
-
-  template <PointerSize kPointerSize>
-  class PatchObjectVisitor final {
-   public:
-    explicit PatchObjectVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
-      // A mirror::Class object consists of
-      //  - instance fields inherited from j.l.Object,
-      //  - instance fields inherited from j.l.Class,
-      //  - embedded tables (vtable, interface method table),
-      //  - static fields of the class itself.
-      // The reference fields are at the start of each field section (this is how the
-      // ClassLinker orders fields; except when that would create a gap between superclass
-      // fields and the first reference of the subclass due to alignment, it can be filled
-      // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
-
-      DCHECK_ALIGNED(klass, kObjectAlignment);
-      static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
-      // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
-      // This should be the only reference field in j.l.Object and we assert that below.
-      PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
-      // Then patch the reference instance fields described by j.l.Class.class.
-      // Use the sizeof(Object) to determine where these reference fields start;
-      // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
-      // after patching but the j.l.Class may not have been patched yet.
-      mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
-      size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
-      DCHECK_NE(num_reference_instance_fields, 0u);
-      static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
-      MemberOffset instance_field_offset(sizeof(mirror::Object));
-      for (size_t i = 0; i != num_reference_instance_fields; ++i) {
-        PatchReferenceField(klass, instance_field_offset);
-        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
-                      "Heap reference sizes equality check.");
-        instance_field_offset =
-            MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
-      }
-      // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
-      // we can get a reference to j.l.Object.class and assert that it has only one
-      // reference instance field (the `klass_` patched above).
-      if (kIsDebugBuild && klass == class_class) {
-        ObjPtr<mirror::Class> object_class =
-            klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
-        CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
-      }
-      // Then patch static fields.
-      size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
-      if (num_reference_static_fields != 0u) {
-        MemberOffset static_field_offset =
-            klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
-        for (size_t i = 0; i != num_reference_static_fields; ++i) {
-          PatchReferenceField(klass, static_field_offset);
-          static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
-                        "Heap reference sizes equality check.");
-          static_field_offset =
-              MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
-        }
-      }
-      // Then patch native pointers.
-      klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
-    }
-
-    template <typename T>
-    T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (ptr != nullptr) {
-        ptr = RelocatedAddress(ptr, diff_);
-      }
-      return ptr;
-    }
-
-    void VisitPointerArray(mirror::PointerArray* pointer_array)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      // Fully patch the pointer array, including the `klass_` field.
-      PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
-
-      int32_t length = pointer_array->GetLength<kVerifyNone>();
-      for (int32_t i = 0; i != length; ++i) {
-        ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
-            pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
-        PatchNativePointer<kPointerSize, /*kMayBeNull=*/ false>(diff_, method_entry);
-      }
-    }
-
-    void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
-      // Visit all reference fields.
-      object->VisitReferences</*kVisitNativeRoots=*/ false,
-                              kVerifyNone,
-                              kWithoutReadBarrier>(*this, *this);
-      // This function should not be called for classes.
-      DCHECK(!object->IsClass<kVerifyNone>());
-    }
-
-    // Visitor for VisitReferences().
-    ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
-        const REQUIRES_SHARED(Locks::mutator_lock_) {
-      DCHECK(!is_static);
-      PatchReferenceField(object, field_offset);
-    }
-    // Visitor for VisitReferences(), java.lang.ref.Reference case.
-    ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      DCHECK(klass->IsTypeOfReferenceClass());
-      this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
-    }
-    // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
-    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {}
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
-    void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
-      FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
-                                                     mirror::DexCache::StringsOffset(),
-                                                     dex_cache->NumStrings<kVerifyNone>());
-      FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
-                                                   mirror::DexCache::ResolvedTypesOffset(),
-                                                   dex_cache->NumResolvedTypes<kVerifyNone>());
-      FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
-                                                     mirror::DexCache::ResolvedMethodsOffset(),
-                                                     dex_cache->NumResolvedMethods<kVerifyNone>());
-      FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
-                                                    mirror::DexCache::ResolvedFieldsOffset(),
-                                                    dex_cache->NumResolvedFields<kVerifyNone>());
-      FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
-          dex_cache,
-          mirror::DexCache::ResolvedMethodTypesOffset(),
-          dex_cache->NumResolvedMethodTypes<kVerifyNone>());
-      FixupDexCacheArray<GcRoot<mirror::CallSite>>(
-          dex_cache,
-          mirror::DexCache::ResolvedCallSitesOffset(),
-          dex_cache->NumResolvedCallSites<kVerifyNone>());
-      FixupDexCacheArray<GcRoot<mirror::String>>(
-          dex_cache,
-          mirror::DexCache::PreResolvedStringsOffset(),
-          dex_cache->NumPreResolvedStrings<kVerifyNone>());
-    }
-
-   private:
-    template <bool kMayBeNull = true>
-    ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      mirror::Object* old_value =
-          object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        mirror::Object* new_value = RelocatedAddress(old_value, diff_);
-        object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
-                                                  /*kCheckTransaction=*/ true,
-                                                  kVerifyNone>(offset, new_value);
-      }
-    }
-
-    template <typename T>
-    void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
-                    "Size check for removing std::atomic<>.");
-      PatchGcRoot(diff_, &(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
-    }
-
-    template <typename T>
-    void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
-                        sizeof(mirror::NativeDexCachePair<T>),
-                    "Size check for removing std::atomic<>.");
-      mirror::NativeDexCachePair<T> pair =
-          mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
-      if (pair.object != nullptr) {
-        pair.object = RelocatedAddress(pair.object, diff_);
-        mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
-      }
-    }
-
-    void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &array[index]);
-    }
-
-    void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &array[index]);
-    }
-
-    template <typename EntryType>
-    void FixupDexCacheArray(mirror::DexCache* dex_cache,
-                            MemberOffset array_offset,
-                            uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
-      EntryType* old_array =
-          reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
-      DCHECK_EQ(old_array != nullptr, size != 0u);
-      if (old_array != nullptr) {
-        EntryType* new_array = RelocatedAddress(old_array, diff_);
-        dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
-        for (uint32_t i = 0; i != size; ++i) {
-          FixupDexCacheArrayEntry(new_array, i);
-        }
-      }
-    }
-
-    const uint32_t diff_;
+    ReferenceVisitor reference_visitor_;
   };
 
   template <PointerSize kPointerSize>
@@ -1773,7 +1787,9 @@
                                uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
     PatchedObjectsMap patched_objects(spaces.front()->Begin(),
                                       spaces.back()->End() - spaces.front()->Begin());
-    PatchObjectVisitor<kPointerSize> patch_object_visitor(diff);
+    using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor>;
+    RelocateVisitor relocate_visitor(diff);
+    PatchRelocateVisitor patch_object_visitor(relocate_visitor);
 
     mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
     for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
@@ -1787,13 +1803,14 @@
 
       // Patch fields and methods.
       const ImageHeader& image_header = space->GetImageHeader();
-      PatchArtFieldVisitor field_visitor(diff);
+      PatchArtFieldVisitor<PatchRelocateVisitor> field_visitor(patch_object_visitor);
       image_header.VisitPackedArtFields(&field_visitor, space->Begin());
-      PatchArtMethodVisitor<kPointerSize> method_visitor(diff);
+      PatchArtMethodVisitor<kPointerSize, PatchRelocateVisitor, PatchRelocateVisitor>
+          method_visitor(patch_object_visitor, patch_object_visitor);
       image_header.VisitPackedArtMethods(&method_visitor, space->Begin(), kPointerSize);
-      auto method_table_visitor = [diff](ArtMethod* method) {
+      auto method_table_visitor = [&](ArtMethod* method) {
         DCHECK(method != nullptr);
-        return RelocatedAddress(method, diff);
+        return relocate_visitor(method);
       };
       image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
       image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
@@ -1804,7 +1821,7 @@
         size_t read_count;
         InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         for (GcRoot<mirror::String>& slot : temp_set) {
-          PatchGcRoot</*kMayBeNull=*/ false>(diff, &slot);
+          patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
         }
       }
 
@@ -1815,7 +1832,7 @@
         size_t read_count;
         ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         DCHECK(!temp_set.empty());
-        ClassTableVisitor class_table_visitor(diff);
+        ClassTableVisitor class_table_visitor(relocate_visitor);
         for (ClassTable::TableSlot& slot : temp_set) {
           slot.VisitRoot(class_table_visitor);
           mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
@@ -1844,7 +1861,7 @@
                   iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
               if (unpatched_ifarray != nullptr) {
                 // The iftable has not been patched, so we need to explicitly adjust the pointer.
-                mirror::PointerArray* ifarray = RelocatedAddress(unpatched_ifarray, diff);
+                mirror::PointerArray* ifarray = relocate_visitor(unpatched_ifarray);
                 if (!patched_objects.IsVisited(ifarray)) {
                   patched_objects.MarkVisited(ifarray);
                   patch_object_visitor.VisitPointerArray(ifarray);
@@ -1900,7 +1917,7 @@
             ObjPtr<mirror::Executable> as_executable =
                 ObjPtr<mirror::Executable>::DownCast(MakeObjPtr(object));
             ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
-            ArtMethod* patched_method = RelocatedAddress(unpatched_method, diff);
+            ArtMethod* patched_method = relocate_visitor(unpatched_method);
             as_executable->SetArtMethod</*kTransactionActive=*/ false,
                                         /*kCheckTransaction=*/ true,
                                         kVerifyNone>(patched_method);
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 05e7fa5..dbc12d1 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -188,8 +188,14 @@
   friend class Space;
 
  private:
-  class Loader;
   class BootImageLoader;
+  class Loader;
+  template <typename PatchObjectVisitor>
+  class PatchArtFieldVisitor;
+  template <PointerSize kPointerSize, typename PatchObjectVisitor, typename PatchCodeVisitor>
+  class PatchArtMethodVisitor;
+  template <PointerSize kPointerSize, typename ReferenceVisitor>
+  class PatchObjectVisitor;
 
   DISALLOW_COPY_AND_ASSIGN(ImageSpace);
 };
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 6cd719a..853c0ca 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -21,12 +21,13 @@
 #include "base/array_ref.h"
 #include "base/mutex.h"
 #include "base/time_utils.h"
+#include "dex/dex_file.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 
 #include <atomic>
-#include <unordered_map>
 #include <cstddef>
+#include <map>
 
 //
 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
@@ -126,14 +127,14 @@
   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
 
   // The root data structure describing of all JITed methods.
-  JITDescriptor __jit_debug_descriptor {};
+  JITDescriptor __jit_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
 
   // The following globals mirror the ones above, but are used to register dex files.
   void __attribute__((noinline)) __dex_debug_register_code() {
     __asm__("");
   }
   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
-  JITDescriptor __dex_debug_descriptor {};
+  JITDescriptor __dex_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
 }
 
 // Mark the descriptor as "locked", so native tools know the data is being modified.
@@ -155,8 +156,17 @@
 static JITCodeEntry* CreateJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    const ArrayRef<const uint8_t>& symfile)
+    ArrayRef<const uint8_t> symfile,
+    bool copy_symfile)
     REQUIRES(Locks::native_debug_interface_lock_) {
+  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
+  if (copy_symfile) {
+    uint8_t* copy = new uint8_t[symfile.size()];
+    CHECK(copy != nullptr);
+    memcpy(copy, symfile.data(), symfile.size());
+    symfile = ArrayRef<const uint8_t>(copy, symfile.size());
+  }
+
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
   uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
@@ -188,9 +198,11 @@
 static void DeleteJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    JITCodeEntry* entry)
+    JITCodeEntry* entry,
+    bool free_symfile)
     REQUIRES(Locks::native_debug_interface_lock_) {
   CHECK(entry != nullptr);
+  const uint8_t* symfile = entry->symfile_addr_;
 
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
@@ -221,83 +233,88 @@
   memset(entry, 0, sizeof(*entry));
 
   delete entry;
+  if (free_symfile) {
+    delete[] symfile;
+  }
 }
 
-static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries
+    GUARDED_BY(*Locks::native_debug_interface_lock_);
 
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  DCHECK(dexfile.data() != nullptr);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  DCHECK(dexfile != nullptr);
   // This is just defensive check. The class linker should not register the dex file twice.
-  if (__dex_debug_entries.count(dexfile.data()) == 0) {
+  if (g_dex_debug_entries.count(dexfile) == 0) {
+    const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
     JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
                                                      __dex_debug_register_code_ptr,
-                                                     dexfile);
-    __dex_debug_entries.emplace(dexfile.data(), entry);
+                                                     symfile,
+                                                     /*copy_symfile=*/ false);
+    g_dex_debug_entries.emplace(dexfile, entry);
   }
 }
 
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  auto it = __dex_debug_entries.find(dexfile.data());
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  auto it = g_dex_debug_entries.find(dexfile);
   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
   // there might be cases where we load the dex file without using it in the class linker.
-  if (it != __dex_debug_entries.end()) {
+  if (it != g_dex_debug_entries.end()) {
     DeleteJITCodeEntryInternal(__dex_debug_descriptor,
                                __dex_debug_register_code_ptr,
-                               it->second);
-    __dex_debug_entries.erase(it);
+                               /*entry=*/ it->second,
+                               /*free_symfile=*/ false);
+    g_dex_debug_entries.erase(it);
   }
 }
 
-static size_t __jit_debug_mem_usage
-    GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
-
 // Mapping from handle to entry. Used to manage life-time of the entries.
-static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::map<const void*, JITCodeEntry*> g_jit_debug_entries
+    GUARDED_BY(*Locks::native_debug_interface_lock_);
 
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) {
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
   DCHECK_NE(symfile.size(), 0u);
 
-  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
-  uint8_t* copy = new uint8_t[symfile.size()];
-  CHECK(copy != nullptr);
-  memcpy(copy, symfile.data(), symfile.size());
-
   JITCodeEntry* entry = CreateJITCodeEntryInternal(
       __jit_debug_descriptor,
       __jit_debug_register_code_ptr,
-      ArrayRef<const uint8_t>(copy, symfile.size()));
-  __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
+      ArrayRef<const uint8_t>(symfile),
+      /*copy_symfile=*/ true);
 
-  // We don't provide handle for type debug info, which means we cannot free it later.
+  // We don't provide code_ptr for type debug info, which means we cannot free it later.
   // (this only happens when --generate-debug-info flag is enabled for the purpose
   // of being debugged with gdb; it does not happen for debuggable apps by default).
-  bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second;
-  DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle;
-}
-
-void RemoveNativeDebugInfoForJit(const void* handle) {
-  auto it = __jit_debug_entries.find(handle);
-  // We generate JIT native debug info only if the right runtime flags are enabled,
-  // but we try to remove it unconditionally whenever code is freed from JIT cache.
-  if (it != __jit_debug_entries.end()) {
-    JITCodeEntry* entry = it->second;
-    const uint8_t* symfile_addr = entry->symfile_addr_;
-    uint64_t symfile_size = entry->symfile_size_;
-    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
-                               __jit_debug_register_code_ptr,
-                               entry);
-    __jit_debug_entries.erase(it);
-    __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size;
-    delete[] symfile_addr;
+  if (code_ptr != nullptr) {
+    bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second;
+    DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr;
   }
 }
 
-size_t GetJitNativeDebugInfoMemUsage() {
-  return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*);
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  auto it = g_jit_debug_entries.find(code_ptr);
+  // We generate JIT native debug info only if the right runtime flags are enabled,
+  // but we try to remove it unconditionally whenever code is freed from JIT cache.
+  if (it != g_jit_debug_entries.end()) {
+    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
+                               __jit_debug_register_code_ptr,
+                               it->second,
+                               /*free_symfile=*/ true);
+    g_jit_debug_entries.erase(it);
+  }
+}
+
+size_t GetJitMiniDebugInfoMemUsage() {
+  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+  size_t size = 0;
+  for (auto entry : g_jit_debug_entries) {
+    size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
+  }
+  return size;
 }
 
 }  // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index fb5e81b..4b0d011 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,35 +18,37 @@
 #define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 
 #include <inttypes.h>
-#include <memory>
 #include <vector>
 
-#include "base/array_ref.h"
 #include "base/locks.h"
 
 namespace art {
 
+class DexFile;
+class Thread;
+
 // Notify native tools (e.g. libunwind) that DEX file has been opened.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
 // Notify native tools (e.g. libunwind) that DEX file has been closed.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Notify native tools about new JITed code by passing in-memory ELF.
-// The handle is the object that is being described (needed to be able to remove the entry).
+// Notify native tools (e.g. libunwind) that JIT has compiled a new method.
 // The method will make copy of the passed ELF file (to shrink it to the minimum size).
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile)
-    REQUIRES(Locks::native_debug_interface_lock_);
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Notify native debugger that JITed code has been removed and free the debug info.
-void RemoveNativeDebugInfoForJit(const void* handle)
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Notify native tools (e.g. libunwind) that JIT code has been garbage collected.
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Returns approximate memory used by all JITCodeEntries.
-size_t GetJitNativeDebugInfoMemUsage()
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Returns approximate memory used by debug info for JIT code.
+size_t GetJitMiniDebugInfoMemUsage()
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
 }  // namespace art
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 749758a..d976fec 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -765,8 +765,7 @@
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  RemoveNativeDebugInfoForJit(code_ptr);
+  RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
     FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
@@ -2101,10 +2100,9 @@
 
 void JitCodeCache::Dump(std::ostream& os) {
   MutexLock mu(Thread::Current(), lock_);
-  MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
   os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
      << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
-     << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n"
+     << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
      << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 085dcab..7c25529 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1947,7 +1947,7 @@
 
   // Determine offset and limit for accesses.
   int32_t byte_buffer_offset;
-  if (native_address == 0l) {
+  if (native_address == 0L) {
     // Accessing a heap allocated byte buffer.
     byte_buffer_offset = byte_buffer->GetField32(
         GetMemberOffset(WellKnownClasses::java_nio_ByteBuffer_offset));
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 203d200..1da91b0 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -337,8 +337,7 @@
     int32_t i = kDexFileIndexStart;  // Oat file is at index 0.
     for (const DexFile* dex_file : dex_files) {
       if (dex_file != nullptr) {
-        RemoveNativeDebugInfoForDex(soa.Self(), ArrayRef<const uint8_t>(dex_file->Begin(),
-                                                                        dex_file->Size()));
+        RemoveNativeDebugInfoForDex(soa.Self(), dex_file);
         // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
         // are calls to DexFile.close while the ART DexFile is still in use.
         if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index cbc3ff8..08ee690 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -100,7 +100,7 @@
 
 template<typename T>
 static void lookup_next_symbol(T* output, T wrapper, const char* name) {
-  void* sym = dlsym(RTLD_NEXT, name);
+  void* sym = dlsym(RTLD_NEXT, name);  // NOLINT glibc triggers cert-dcl16-c with RTLD_NEXT.
   if (sym == nullptr) {
     sym = dlsym(RTLD_DEFAULT, name);
     if (sym == wrapper || sym == sigaction) {