Revert "Write conflict tables in image"

Some strange issues on angler.

This reverts commit cda9386add68d94697449c6cb08b356747e55c21.

(cherry picked from commit 8e2478d23e89a7022c93ddc608dcbba7b29b91e6)

Change-Id: Iffd25c5fb732ff72b58c787c107dc33c56f8c8d4
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e84faff..be82956 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2440,7 +2440,7 @@
   context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
 }
 
-class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
+class InitializeArrayClassVisitor : public ClassVisitor {
  public:
   virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     if (klass->IsArrayClass()) {
@@ -2450,10 +2450,6 @@
                                                               true,
                                                               true);
     }
-    // Create the conflict tables.
-    if (klass->ShouldHaveEmbeddedImtAndVTable()) {
-      Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
-    }
     return true;
   }
 };
@@ -2466,15 +2462,13 @@
     CHECK(dex_file != nullptr);
     InitializeClasses(class_loader, *dex_file, dex_files, timings);
   }
-  if (image_classes_ != nullptr) {
+  {
     // Make sure that we call EnsureIntiailized on all the array classes to call
     // SetVerificationAttempted so that the access flags are set. If we do not do this they get
     // changed at runtime resulting in more dirty image pages.
-    // Also create conflict tables.
-    // Only useful if we are compiling an image (image_classes_ is not null).
     ScopedObjectAccess soa(Thread::Current());
-    InitializeArrayClassesAndCreateConflictTablesVisitor visitor;
-    Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
+    InitializeArrayClassVisitor visitor;
+    Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
   }
   if (IsBootImage()) {
     // Prune garbage objects created during aborted transactions.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 00ff522..8bb462c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -653,7 +653,8 @@
   for (ImageInfo& image_info : image_infos_) {
     ImageSection unused_sections[ImageHeader::kSectionCount];
     const size_t length = RoundUp(
-        image_info.CreateImageSections(unused_sections), kPageSize);
+        image_info.CreateImageSections(target_ptr_size_, unused_sections),
+        kPageSize);
 
     std::string error_msg;
     image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1213,20 +1214,6 @@
           AssignMethodOffset(&m, type, oat_index);
         }
         (any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
-
-        // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
-        // live.
-        if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
-          for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
-            ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
-            DCHECK(imt_method != nullptr);
-            if (imt_method->IsRuntimeMethod() &&
-                !IsInBootImage(imt_method) &&
-                !NativeRelocationAssigned(imt_method)) {
-              AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
-            }
-          }
-        }
       }
     } else if (h_obj->IsObjectArray()) {
       // Walk elements of an object array.
@@ -1250,37 +1237,13 @@
   }
 }
 
-bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
-  return native_object_relocations_.find(ptr) != native_object_relocations_.end();
-}
-
-void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
-  // No offset, or already assigned.
-  if (table == nullptr || NativeRelocationAssigned(table)) {
-    return;
-  }
-  CHECK(!IsInBootImage(table));
-  // If the method is a conflict method we also want to assign the conflict table offset.
-  ImageInfo& image_info = GetImageInfo(oat_index);
-  const size_t size = table->ComputeSize(target_ptr_size_);
-  native_object_relocations_.emplace(
-      table,
-      NativeObjectRelocation {
-          oat_index,
-          image_info.bin_slot_sizes_[kBinIMTConflictTable],
-          kNativeObjectRelocationTypeIMTConflictTable});
-  image_info.bin_slot_sizes_[kBinIMTConflictTable] += size;
-}
-
 void ImageWriter::AssignMethodOffset(ArtMethod* method,
                                      NativeObjectRelocationType type,
                                      size_t oat_index) {
   DCHECK(!IsInBootImage(method));
-  CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned "
+  auto it = native_object_relocations_.find(method);
+  CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
       << PrettyMethod(method);
-  if (method->IsRuntimeMethod()) {
-    TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index);
-  }
   ImageInfo& image_info = GetImageInfo(oat_index);
   size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
   native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type });
@@ -1329,7 +1292,8 @@
   // know where image_roots is going to end up
   image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);  // 64-bit-alignment
 
-  const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
+  // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
+  heap->VisitObjects(WalkFieldsCallback, this);
   // Write the image runtime methods.
   image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
   image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
@@ -1339,19 +1303,31 @@
       runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
   image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
       runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-  // Visit image methods first to have the main runtime methods in the first image.
+
+  // Add room for fake length prefixed array for holding the image methods.
+  const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
+  auto it = native_object_relocations_.find(&image_method_array_);
+  CHECK(it == native_object_relocations_.end());
+  ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex());
+  size_t& offset =
+      default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
+  if (!compile_app_image_) {
+    native_object_relocations_.emplace(&image_method_array_,
+        NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type });
+  }
+  size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
+  const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
+      0, ArtMethod::Size(target_ptr_size_), method_alignment);
+  CHECK_ALIGNED_PARAM(array_size, method_alignment);
+  offset += array_size;
   for (auto* m : image_methods_) {
     CHECK(m != nullptr);
     CHECK(m->IsRuntimeMethod());
     DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
     if (!IsInBootImage(m)) {
-      AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex());
+      AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex());
     }
   }
-
-  // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
-  heap->VisitObjects(WalkFieldsCallback, this);
-
   // Calculate size of the dex cache arrays slot and prepare offsets.
   PrepareDexCacheArraySlots();
 
@@ -1370,22 +1346,15 @@
   for (ImageInfo& image_info : image_infos_) {
     size_t bin_offset = image_objects_offset_begin_;
     for (size_t i = 0; i != kBinSize; ++i) {
-      switch (i) {
-        case kBinArtMethodClean:
-        case kBinArtMethodDirty: {
-          bin_offset = RoundUp(bin_offset, method_alignment);
-          break;
-        }
-        case kBinIMTConflictTable: {
-          bin_offset = RoundUp(bin_offset, target_ptr_size_);
-          break;
-        }
-        default: {
-          // Normal alignment.
-        }
-      }
       image_info.bin_slot_offsets_[i] = bin_offset;
       bin_offset += image_info.bin_slot_sizes_[i];
+      if (i == kBinArtField) {
+        static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
+        static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
+        DCHECK_ALIGNED(bin_offset, 4u);
+        DCHECK(method_alignment == 4u || method_alignment == 8u);
+        bin_offset = RoundUp(bin_offset, method_alignment);
+      }
     }
     // NOTE: There may be additional padding between the bin slots and the intern table.
     DCHECK_EQ(image_info.image_end_,
@@ -1398,7 +1367,9 @@
     image_info.image_begin_ = global_image_begin_ + image_offset;
     image_info.image_offset_ = image_offset;
     ImageSection unused_sections[ImageHeader::kSectionCount];
-    image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
+    image_info.image_size_ = RoundUp(
+        image_info.CreateImageSections(target_ptr_size_, unused_sections),
+        kPageSize);
     // There should be no gaps until the next image.
     image_offset += image_info.image_size_;
   }
@@ -1425,52 +1396,42 @@
   // Note that image_info.image_end_ is left at end of used mirror object section.
 }
 
-size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
+                                                   ImageSection* out_sections) const {
   DCHECK(out_sections != nullptr);
-
-  // Do not round up any sections here that are represented by the bins since it will break
-  // offsets.
-
   // Objects section
-  ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
+  auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
   *objects_section = ImageSection(0u, image_end_);
-
+  size_t cur_pos = objects_section->End();
   // Add field section.
-  ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
-  *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]);
+  auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
+  *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
   CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
-
+  cur_pos = field_section->End();
+  // Round up to the alignment the required by the method section.
+  cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
   // Add method section.
-  ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
-  *methods_section = ImageSection(
-      bin_slot_offsets_[kBinArtMethodClean],
-      bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
-
-  // Conflict tables section.
-  ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
-  *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
-                                              bin_slot_sizes_[kBinIMTConflictTable]);
-
-  // Runtime methods section.
-  ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
-  *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod],
-                                          bin_slot_sizes_[kBinRuntimeMethod]);
-
+  auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
+  *methods_section = ImageSection(cur_pos,
+                                  bin_slot_sizes_[kBinArtMethodClean] +
+                                      bin_slot_sizes_[kBinArtMethodDirty]);
+  CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
+  cur_pos = methods_section->End();
   // Add dex cache arrays section.
-  ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
-  *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
-                                           bin_slot_sizes_[kBinDexCacheArray]);
-
+  auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+  *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
+  CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
+  cur_pos = dex_cache_arrays_section->End();
   // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
-  size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
+  cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
   // Calculate the size of the interned strings.
-  ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
+  auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
   *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
   cur_pos = interned_strings_section->End();
   // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
   cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
   // Calculate the size of the class table section.
-  ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
+  auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
   *class_table_section = ImageSection(cur_pos, class_table_bytes_);
   cur_pos = class_table_section->End();
   // Image end goes right before the start of the image bitmap.
@@ -1485,7 +1446,7 @@
 
   // Create the image sections.
   ImageSection sections[ImageHeader::kSectionCount];
-  const size_t image_end = image_info.CreateImageSections(sections);
+  const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
 
   // Finally bitmap section.
   const size_t bitmap_bytes = image_info.image_bitmap_->Size();
@@ -1570,20 +1531,8 @@
   ImageWriter* const image_writer_;
 };
 
-void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
-  const size_t count = orig->NumEntries(target_ptr_size_);
-  for (size_t i = 0; i < count; ++i) {
-    ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
-    ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
-    copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method));
-    copy->SetImplementationMethod(i,
-                                  target_ptr_size_,
-                                  NativeLocationInImage(implementation_method));
-  }
-}
-
 void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
-  const ImageInfo& image_info = GetImageInfo(oat_index);
+  ImageInfo& image_info = GetImageInfo(oat_index);
   // Copy ArtFields and methods to their locations and update the array for convenience.
   for (auto& pair : native_object_relocations_) {
     NativeObjectRelocation& relocation = pair.second;
@@ -1601,7 +1550,6 @@
             GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
         break;
       }
-      case kNativeObjectRelocationTypeRuntimeMethod:
       case kNativeObjectRelocationTypeArtMethodClean:
       case kNativeObjectRelocationTypeArtMethodDirty: {
         CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
@@ -1627,22 +1575,26 @@
       case kNativeObjectRelocationTypeDexCacheArray:
         // Nothing to copy here, everything is done in FixupDexCache().
         break;
-      case kNativeObjectRelocationTypeIMTConflictTable: {
-        auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
-        CopyAndFixupImtConflictTable(
-            orig_table,
-            new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
-        break;
-      }
     }
   }
   // Fixup the image method roots.
   auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
+  const ImageSection& methods_section = image_header->GetMethodsSection();
   for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
     ArtMethod* method = image_methods_[i];
     CHECK(method != nullptr);
+    // Only place runtime methods in the image of the default oat file.
+    if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) {
+      continue;
+    }
     if (!IsInBootImage(method)) {
-      method = NativeLocationInImage(method);
+      auto it = native_object_relocations_.find(method);
+      CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method);
+      NativeObjectRelocation& relocation = it->second;
+      CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
+          << methods_section;
+      CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
+      method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset);
     }
     image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
   }
@@ -2105,28 +2057,24 @@
 
   // The resolution method has a special trampoline to call.
   Runtime* runtime = Runtime::Current();
-  if (orig->IsRuntimeMethod()) {
-    ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_);
-    if (orig_table != nullptr) {
-      // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
-      copy->SetEntryPointFromQuickCompiledCodePtrSize(
-          GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
-      copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_);
-    } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
-      copy->SetEntryPointFromQuickCompiledCodePtrSize(
-          GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
-    } else {
-      bool found_one = false;
-      for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
-        auto idx = static_cast<Runtime::CalleeSaveType>(i);
-        if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
-          found_one = true;
-          break;
-        }
+  if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
+    copy->SetEntryPointFromQuickCompiledCodePtrSize(
+        GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
+  } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
+                      orig == runtime->GetImtUnimplementedMethod())) {
+    copy->SetEntryPointFromQuickCompiledCodePtrSize(
+        GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
+  } else if (UNLIKELY(orig->IsRuntimeMethod())) {
+    bool found_one = false;
+    for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
+      auto idx = static_cast<Runtime::CalleeSaveType>(i);
+      if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
+        found_one = true;
+        break;
       }
-      CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
-      CHECK(copy->IsRuntimeMethod());
     }
+    CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
+    CHECK(copy->IsRuntimeMethod());
   } else {
     // We assume all methods have code. If they don't currently then we set them to the use the
     // resolution trampoline. Abstract methods never have code and so we need to make sure their
@@ -2193,10 +2141,6 @@
       return kBinArtMethodDirty;
     case kNativeObjectRelocationTypeDexCacheArray:
       return kBinDexCacheArray;
-    case kNativeObjectRelocationTypeRuntimeMethod:
-      return kBinRuntimeMethod;
-    case kNativeObjectRelocationTypeIMTConflictTable:
-      return kBinIMTConflictTable;
   }
   UNREACHABLE();
 }
@@ -2298,6 +2242,7 @@
       compile_app_image_(compile_app_image),
       target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
       image_infos_(oat_filenames.size()),
+      image_method_array_(ImageHeader::kImageMethodsCount),
       dirty_methods_(0u),
       clean_methods_(0u),
       image_storage_mode_(image_storage_mode),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 51976c5..0cb6aea 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,10 +169,6 @@
     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
     // initialized.
     kBinArtMethodDirty,
-    // Conflict tables (clean).
-    kBinIMTConflictTable,
-    // Runtime methods (always clean, do not have a length prefix array).
-    kBinRuntimeMethod,
     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
     // huge, and as such their dirtiness is not important for the clean/dirty separation,
     // we arbitrarily keep them at the end of the native data.
@@ -190,8 +186,6 @@
     kNativeObjectRelocationTypeArtMethodArrayClean,
     kNativeObjectRelocationTypeArtMethodDirty,
     kNativeObjectRelocationTypeArtMethodArrayDirty,
-    kNativeObjectRelocationTypeRuntimeMethod,
-    kNativeObjectRelocationTypeIMTConflictTable,
     kNativeObjectRelocationTypeDexCacheArray,
   };
   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
@@ -246,7 +240,7 @@
 
     // Create the image sections into the out sections variable, returns the size of the image
     // excluding the bitmap.
-    size_t CreateImageSections(ImageSection* out_sections) const;
+    size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
 
     std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
 
@@ -401,8 +395,6 @@
   void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
-      SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupClass(mirror::Class* orig, mirror::Class* copy)
       SHARED_REQUIRES(Locks::mutator_lock_);
   void FixupObject(mirror::Object* orig, mirror::Object* copy)
@@ -433,11 +425,6 @@
                           size_t oat_index)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
-  // relocation.
-  void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   // Return true if klass is loaded by the boot class loader but not in the boot image.
   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -494,9 +481,6 @@
   // remove duplicates in the multi image and app image case.
   mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Return true if there already exists a native allocation for an object.
-  bool NativeRelocationAssigned(void* ptr) const;
-
   const CompilerDriver& compiler_driver_;
 
   // Beginning target image address for the first image.
@@ -533,14 +517,16 @@
 
     bool IsArtMethodRelocation() const {
       return type == kNativeObjectRelocationTypeArtMethodClean ||
-          type == kNativeObjectRelocationTypeArtMethodDirty ||
-          type == kNativeObjectRelocationTypeRuntimeMethod;
+          type == kNativeObjectRelocationTypeArtMethodDirty;
     }
   };
   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
 
   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
+  // Fake length prefixed array for image methods. This array does not contain the actual
+  // ArtMethods. We only use it for the header and relocation addresses.
+  LengthPrefixedArray<ArtMethod> image_method_array_;
 
   // Counters for measurements, used for logging only.
   uint64_t dirty_methods_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 6ecfc74..3c6a05d 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1416,10 +1416,11 @@
       indent_os << "\n";
       // TODO: Dump fields.
       // Dump methods after.
+      const auto& methods_section = image_header_.GetMethodsSection();
       DumpArtMethodVisitor visitor(this);
-      image_header_.VisitPackedArtMethods(&visitor,
-                                          image_space_.Begin(),
-                                          image_header_.GetPointerSize());
+      methods_section.VisitPackedArtMethods(&visitor,
+                                            image_space_.Begin(),
+                                            image_header_.GetPointerSize());
       // Dump the large objects separately.
       heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
       indent_os << "\n";
@@ -1778,7 +1779,6 @@
     DCHECK(method != nullptr);
     const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
     const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
-    const size_t pointer_size = image_header_.GetPointerSize();
     OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
         reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
     if (method->IsNative()) {
@@ -1792,16 +1792,13 @@
           image_header_.GetPointerSize())) {
         indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
       }
-    } else if (method->IsAbstract() || method->IsClassInitializer()) {
+    } else if (method->IsAbstract() ||
+               method->IsCalleeSaveMethod() ||
+               method->IsResolutionMethod() ||
+               (method == Runtime::Current()->GetImtConflictMethod()) ||
+               method->IsImtUnimplementedMethod() ||
+               method->IsClassInitializer()) {
       // Don't print information for these.
-    } else if (method->IsRuntimeMethod()) {
-      ImtConflictTable* table = method->GetImtConflictTable(image_header_.GetPointerSize());
-      if (table != nullptr) {
-        indent_os << "IMT conflict table " << table << " method: ";
-        for (size_t i = 0, count = table->NumEntries(pointer_size); i < count; ++i) {
-          indent_os << PrettyMethod(table->GetImplementationMethod(i, pointer_size)) << " ";
-        }
-      }
     } else {
       const DexFile::CodeItem* code_item = method->GetCodeItem();
       size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 0a7ffda..93e40af 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -472,7 +472,8 @@
 
 void PatchOat::PatchArtFields(const ImageHeader* image_header) {
   PatchOatArtFieldVisitor visitor(this);
-  image_header->VisitPackedArtFields(&visitor, heap_->Begin());
+  const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields);
+  section.VisitPackedArtFields(&visitor, heap_->Begin());
 }
 
 class PatchOatArtMethodVisitor : public ArtMethodVisitor {
@@ -489,20 +490,10 @@
 };
 
 void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
+  const auto& section = image_header->GetMethodsSection();
   const size_t pointer_size = InstructionSetPointerSize(isa_);
   PatchOatArtMethodVisitor visitor(this);
-  image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
-}
-
-void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
-  const size_t pointer_size = InstructionSetPointerSize(isa_);
-  // We can safely walk target image since the conflict tables are independent.
-  image_header->VisitPackedImtConflictTables(
-      [this](ArtMethod* method) {
-        return RelocatedAddressOfPointer(method);
-      },
-      image_->Begin(),
-      pointer_size);
+  section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
 }
 
 class FixupRootVisitor : public RootVisitor {
@@ -636,7 +627,6 @@
 
   PatchArtFields(image_header);
   PatchArtMethods(image_header);
-  PatchImtConflictTables(image_header);
   PatchInternedStrings(image_header);
   PatchClassTable(image_header);
   // Patch dex file int/long arrays which point to ArtFields.
@@ -735,7 +725,6 @@
       RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes(pointer_size)), pointer_size);
   copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
       object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
-  // No special handling for IMT conflict table since all pointers are moved by the same offset.
   copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer(
       object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size);
 }
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 3ef837f..510ff1e 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -117,8 +117,6 @@
   bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
-  void PatchImtConflictTables(const ImageHeader* image_header)
-      SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchInternedStrings(const ImageHeader* image_header)
       SHARED_REQUIRES(Locks::mutator_lock_);
   void PatchClassTable(const ImageHeader* image_header)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 3cdff55..75d9073 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -2010,14 +2010,14 @@
   // that will create it: the runtime stub expects to be called by compiled code.
   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
-  ImtConflictTable* empty_conflict_table =
-      Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
+  static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
+  ImtConflictTable* empty_conflict_table = reinterpret_cast<ImtConflictTable*>(&empty_entry);
   void* data = linear_alloc->Alloc(
       self,
-      ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
+      ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table));
   ImtConflictTable* new_table = new (data) ImtConflictTable(
-      empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
-  conflict_method->SetImtConflictTable(new_table, sizeof(void*));
+      empty_conflict_table, inf_contains, contains_amethod);
+  conflict_method->SetImtConflictTable(new_table);
 
   size_t result =
       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index d239b42..ae60447 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -41,7 +41,6 @@
 namespace mirror {
 class Array;
 class Class;
-class IfTable;
 class PointerArray;
 }  // namespace mirror
 
@@ -51,151 +50,97 @@
 // with the last entry being null to make an assembly implementation of a lookup
 // faster.
 class ImtConflictTable {
-  enum MethodIndex {
-    kMethodInterface,
-    kMethodImplementation,
-    kMethodCount,  // Number of elements in enum.
-  };
-
  public:
   // Build a new table copying `other` and adding the new entry formed of
   // the pair { `interface_method`, `implementation_method` }
   ImtConflictTable(ImtConflictTable* other,
                    ArtMethod* interface_method,
-                   ArtMethod* implementation_method,
-                   size_t pointer_size) {
-    const size_t count = other->NumEntries(pointer_size);
-    for (size_t i = 0; i < count; ++i) {
-      SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
-      SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size));
+                   ArtMethod* implementation_method) {
+    size_t index = 0;
+    while (other->entries_[index].interface_method != nullptr) {
+      entries_[index] = other->entries_[index];
+      index++;
     }
-    SetInterfaceMethod(count, pointer_size, interface_method);
-    SetImplementationMethod(count, pointer_size, implementation_method);
+    entries_[index].interface_method = interface_method;
+    entries_[index].implementation_method = implementation_method;
     // Add the null marker.
-    SetInterfaceMethod(count + 1, pointer_size, nullptr);
-    SetImplementationMethod(count + 1, pointer_size, nullptr);
+    entries_[index + 1].interface_method = nullptr;
+    entries_[index + 1].implementation_method = nullptr;
   }
 
   // num_entries excludes the header.
-  ImtConflictTable(size_t num_entries, size_t pointer_size) {
-    SetInterfaceMethod(num_entries, pointer_size, nullptr);
-    SetImplementationMethod(num_entries, pointer_size, nullptr);
+  explicit ImtConflictTable(size_t num_entries) {
+    entries_[num_entries].interface_method = nullptr;
+    entries_[num_entries].implementation_method = nullptr;
   }
 
   // Set an entry at an index.
-  void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) {
-    SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
+  void SetInterfaceMethod(size_t index, ArtMethod* method) {
+    entries_[index].interface_method = method;
   }
 
-  void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) {
-    SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
+  void SetImplementationMethod(size_t index, ArtMethod* method) {
+    entries_[index].implementation_method = method;
   }
 
-  ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const {
-    return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
+  ArtMethod* GetInterfaceMethod(size_t index) const {
+    return entries_[index].interface_method;
   }
 
-  ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const {
-    return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
-  }
-
-  // Visit all of the entries.
-  // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
-  // and also returns one. The order is <interface, implementation>.
-  template<typename Visitor>
-  void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS {
-    uint32_t table_index = 0;
-    for (;;) {
-      ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
-      if (interface_method == nullptr) {
-        break;
-      }
-      ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size);
-      auto input = std::make_pair(interface_method, implementation_method);
-      std::pair<ArtMethod*, ArtMethod*> updated = visitor(input);
-      if (input.first != updated.first) {
-        SetInterfaceMethod(table_index, pointer_size, updated.first);
-      }
-      if (input.second != updated.second) {
-        SetImplementationMethod(table_index, pointer_size, updated.second);
-      }
-      ++table_index;
-    }
+  ArtMethod* GetImplementationMethod(size_t index) const {
+    return entries_[index].implementation_method;
   }
 
   // Lookup the implementation ArtMethod associated to `interface_method`. Return null
   // if not found.
-  ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const {
+  ArtMethod* Lookup(ArtMethod* interface_method) const {
     uint32_t table_index = 0;
-    for (;;) {
-      ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
-      if (current_interface_method == nullptr) {
-        break;
-      }
+    ArtMethod* current_interface_method;
+    while ((current_interface_method = entries_[table_index].interface_method) != nullptr) {
       if (current_interface_method == interface_method) {
-        return GetImplementationMethod(table_index, pointer_size);
+        return entries_[table_index].implementation_method;
       }
-      ++table_index;
+      table_index++;
     }
     return nullptr;
   }
 
   // Compute the number of entries in this table.
-  size_t NumEntries(size_t pointer_size) const {
+  size_t NumEntries() const {
     uint32_t table_index = 0;
-    while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
-      ++table_index;
+    while (entries_[table_index].interface_method != nullptr) {
+      table_index++;
     }
     return table_index;
   }
 
   // Compute the size in bytes taken by this table.
-  size_t ComputeSize(size_t pointer_size) const {
+  size_t ComputeSize() const {
     // Add the end marker.
-    return ComputeSize(NumEntries(pointer_size), pointer_size);
+    return ComputeSize(NumEntries());
   }
 
   // Compute the size in bytes needed for copying the given `table` and add
   // one more entry.
-  static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) {
-    return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
+  static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table) {
+    return table->ComputeSize() + sizeof(Entry);
   }
 
   // Compute size with a fixed number of entries.
-  static size_t ComputeSize(size_t num_entries, size_t pointer_size) {
-    return (num_entries + 1) * EntrySize(pointer_size);  // Add one for null terminator.
+  static size_t ComputeSize(size_t num_entries) {
+    return (num_entries + 1) * sizeof(Entry);  // Add one for null terminator.
   }
 
-  static size_t EntrySize(size_t pointer_size) {
-    return pointer_size * static_cast<size_t>(kMethodCount);
-  }
+  struct Entry {
+    ArtMethod* interface_method;
+    ArtMethod* implementation_method;
+  };
 
  private:
-  ArtMethod* GetMethod(size_t index, size_t pointer_size) const {
-    if (pointer_size == 8) {
-      return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
-    } else {
-      DCHECK_EQ(pointer_size, 4u);
-      return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
-    }
-  }
-
-  void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) {
-    if (pointer_size == 8) {
-      data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
-    } else {
-      DCHECK_EQ(pointer_size, 4u);
-      data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
-    }
-  }
-
   // Array of entries that the assembly stubs will iterate over. Note that this is
   // not fixed size, and we allocate data prior to calling the constructor
   // of ImtConflictTable.
-  union {
-    uint32_t data32_[0];
-    uint64_t data64_[0];
-  };
+  Entry entries_[0];
 
   DISALLOW_COPY_AND_ASSIGN(ImtConflictTable);
 };
@@ -437,6 +382,7 @@
 
   // Find the method that this method overrides.
   ArtMethod* FindOverriddenMethod(size_t pointer_size)
+      REQUIRES(Roles::uninterruptible_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Find the method index for this method within other_dexfile. If this method isn't present then
@@ -502,8 +448,8 @@
     return reinterpret_cast<ImtConflictTable*>(GetEntryPointFromJniPtrSize(pointer_size));
   }
 
-  ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) {
-    SetEntryPointFromJniPtrSize(table, pointer_size);
+  ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table) {
+    SetEntryPointFromJniPtrSize(table, sizeof(void*));
   }
 
   ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 774c543..ddb254d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -687,9 +687,6 @@
     self->AssertNoPendingException();
   }
 
-  // Create conflict tables that depend on the class linker.
-  runtime->FixupConflictTables();
-
   FinishInit(self);
 
   VLOG(startup) << "ClassLinker::InitFromCompiler exiting";
@@ -776,13 +773,9 @@
     bool contains = false;
     for (gc::space::ImageSpace* space : spaces) {
       auto& header = space->GetImageHeader();
-      size_t offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
-
-      const ImageSection& methods = header.GetMethodsSection();
-      contains = contains || methods.Contains(offset);
-
-      const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
-      contains = contains || runtime_methods.Contains(offset);
+      auto& methods = header.GetMethodsSection();
+      auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
+      contains |= methods.Contains(offset);
     }
     CHECK(contains) << m << " not found";
   }
@@ -1445,14 +1438,29 @@
   if (*out_forward_dex_cache_array) {
     ScopedTrace timing("Fixup ArtMethod dex cache arrays");
     FixupArtMethodArrayVisitor visitor(header);
-    header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+    header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+        &visitor,
+        space->Begin(),
+        sizeof(void*));
     Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
   }
   if (kVerifyArtMethodDeclaringClasses) {
     ScopedTrace timing("Verify declaring classes");
     ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
     VerifyDeclaringClassVisitor visitor;
-    header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+    header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+        &visitor,
+        space->Begin(),
+        sizeof(void*));
+  }
+  if (kVerifyArtMethodDeclaringClasses) {
+    ScopedTrace timing("Verify declaring classes");
+    ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
+    VerifyDeclaringClassVisitor visitor;
+    header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+        &visitor,
+        space->Begin(),
+        sizeof(void*));
   }
   return true;
 }
@@ -1730,8 +1738,9 @@
 
   // Set entry point to interpreter if in InterpretOnly mode.
   if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
+    const ImageSection& methods = header.GetMethodsSection();
     SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
-    header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
+    methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
   }
 
   ClassTable* class_table = nullptr;
@@ -1800,7 +1809,10 @@
     // This verification needs to happen after the classes have been added to the class loader.
     // Since it ensures classes are in the class table.
     VerifyClassInTableArtMethodVisitor visitor2(class_table);
-    header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*));
+    header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+        &visitor2,
+        space->Begin(),
+        sizeof(void*));
   }
   VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
   return true;
@@ -5976,16 +5988,14 @@
   // Allocate a new table. Note that we will leak this table at the next conflict,
   // but that's a tradeoff compared to making the table fixed size.
   void* data = linear_alloc->Alloc(
-      Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table,
-                                                                       image_pointer_size_));
+      Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table));
   if (data == nullptr) {
     LOG(ERROR) << "Failed to allocate conflict table";
     return conflict_method;
   }
   ImtConflictTable* new_table = new (data) ImtConflictTable(current_table,
                                                             interface_method,
-                                                            method,
-                                                            image_pointer_size_);
+                                                            method);
 
   // Do a fence to ensure threads see the data in the table before it is assigned
   // to the conflict method.
@@ -5993,7 +6003,7 @@
   // memory from the LinearAlloc, but that's a tradeoff compared to using
   // atomic operations.
   QuasiAtomic::ThreadFenceRelease();
-  new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
+  new_conflict_method->SetImtConflictTable(new_table);
   return new_conflict_method;
 }
 
@@ -6025,52 +6035,18 @@
   }
 }
 
-void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) {
-  DCHECK(klass->ShouldHaveEmbeddedImtAndVTable());
-  ArtMethod* imt[mirror::Class::kImtSize];
-  Runtime* const runtime = Runtime::Current();
-  ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
-  ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
-  std::fill_n(imt, arraysize(imt), unimplemented_method);
-  if (klass->GetIfTable() != nullptr) {
-    FillIMTFromIfTable(klass->GetIfTable(),
-                       unimplemented_method,
-                       conflict_method,
-                       klass,
-                       true,
-                       false,
-                       &imt[0]);
-  }
-  for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
-    klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_);
-  }
-}
-
 static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
 }
 
-ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
-                                                      LinearAlloc* linear_alloc,
-                                                      size_t image_pointer_size) {
-  void* data = linear_alloc->Alloc(Thread::Current(),
-                                   ImtConflictTable::ComputeSize(count,
-                                                                 image_pointer_size));
-  return (data != nullptr) ? new (data) ImtConflictTable(count, image_pointer_size) : nullptr;
-}
-
-ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc) {
-  return CreateImtConflictTable(count, linear_alloc, image_pointer_size_);
-}
-
-void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
-                                     ArtMethod* unimplemented_method,
-                                     ArtMethod* imt_conflict_method,
-                                     mirror::Class* klass,
-                                     bool create_conflict_tables,
-                                     bool ignore_copied_methods,
-                                     ArtMethod** imt) {
+void ClassLinker::ConstructIMTFromIfTable(mirror::IfTable* if_table,
+                                          ArtMethod* unimplemented_method,
+                                          ArtMethod* imt_conflict_method,
+                                          mirror::Class* klass,
+                                          bool create_conflict_tables,
+                                          bool ignore_copied_methods,
+                                          ArtMethod** out_imt) {
   uint32_t conflict_counts[mirror::Class::kImtSize] = {};
   for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
     mirror::Class* interface = if_table->GetInterface(i);
@@ -6113,7 +6089,7 @@
       SetIMTRef(unimplemented_method,
                 imt_conflict_method,
                 implementation_method,
-                /*out*/&imt[imt_index]);
+                /*out*/&out_imt[imt_index]);
     }
   }
 
@@ -6122,22 +6098,24 @@
     LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
     for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
       size_t conflicts = conflict_counts[i];
-      if (imt[i] == imt_conflict_method) {
-        ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc);
-        if (new_table != nullptr) {
-          ArtMethod* new_conflict_method =
-              Runtime::Current()->CreateImtConflictMethod(linear_alloc);
-          new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
-          imt[i] = new_conflict_method;
+      if (conflicts > 1) {
+        void* data = linear_alloc->Alloc(Thread::Current(),
+                                         ImtConflictTable::ComputeSize(conflicts));
+        if (data != nullptr) {
+          ImtConflictTable* new_table = new (data) ImtConflictTable(conflicts);
+          ArtMethod* new_conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
+          new_conflict_method->SetImtConflictTable(new_table);
+          out_imt[i] = new_conflict_method;
         } else {
           LOG(ERROR) << "Failed to allocate conflict table";
-          imt[i] = imt_conflict_method;
+          out_imt[i] = imt_conflict_method;
         }
       } else {
-        DCHECK_NE(imt[i], imt_conflict_method);
+        DCHECK_NE(out_imt[i], imt_conflict_method);
       }
     }
 
+    // No imt in the super class, need to reconstruct from the iftable.
     for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
       mirror::Class* interface = if_table->GetInterface(i);
       const size_t method_array_count = if_table->GetMethodArrayCount(i);
@@ -6155,15 +6133,18 @@
         DCHECK(implementation_method != nullptr);
         ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
         const uint32_t imt_index = GetIMTIndex(interface_method);
-        if (!imt[imt_index]->IsRuntimeMethod() ||
-            imt[imt_index] == unimplemented_method ||
-            imt[imt_index] == imt_conflict_method) {
+        if (conflict_counts[imt_index] <= 1) {
           continue;  // Only care about the conflicts.
         }
-        ImtConflictTable* table = imt[imt_index]->GetImtConflictTable(image_pointer_size_);
-        const size_t num_entries = table->NumEntries(image_pointer_size_);
-        table->SetInterfaceMethod(num_entries, image_pointer_size_, interface_method);
-        table->SetImplementationMethod(num_entries, image_pointer_size_, implementation_method);
+        DCHECK_NE(out_imt[imt_index], unimplemented_method) << PrettyMethod(out_imt[imt_index]);
+        DCHECK_NE(out_imt[imt_index], imt_conflict_method) << PrettyMethod(out_imt[imt_index]);
+        DCHECK(out_imt[imt_index]->IsRuntimeMethod()) << PrettyMethod(out_imt[imt_index]);
+        ImtConflictTable* table = out_imt[imt_index]->GetImtConflictTable(image_pointer_size_);
+        // Add to the end of the conflict table.
+        const size_t current_count = table->NumEntries();
+        CHECK_LT(current_count, conflict_counts[imt_index]);
+        table->SetInterfaceMethod(current_count, interface_method);
+        table->SetImplementationMethod(current_count, implementation_method);
       }
     }
   }
@@ -6407,25 +6388,25 @@
 void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
                                         ArtMethod* unimplemented_method,
                                         ArtMethod* imt_conflict_method,
-                                        ArtMethod** imt) {
+                                        ArtMethod** out_imt) {
   DCHECK(klass->HasSuperClass());
   mirror::Class* super_class = klass->GetSuperClass();
   if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
     for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
-      imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
+      out_imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
     }
   } else {
     // No imt in the super class, need to reconstruct from the iftable.
     mirror::IfTable* if_table = super_class->GetIfTable();
     if (if_table != nullptr) {
       // Ignore copied methods since we will handle these in LinkInterfaceMethods.
-      FillIMTFromIfTable(if_table,
-                         unimplemented_method,
-                         imt_conflict_method,
-                         klass.Get(),
-                         /*create_conflict_table*/false,
-                         /*ignore_copied_methods*/true,
-                         /*out*/imt);
+      ConstructIMTFromIfTable(if_table,
+                              unimplemented_method,
+                              imt_conflict_method,
+                              klass.Get(),
+                              /*create_conflict_table*/false,
+                              /*ignore_copied_methods*/true,
+                              out_imt);
     }
   }
 }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index ece171c..2743921 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -53,7 +53,6 @@
   class StackTraceElement;
 }  // namespace mirror
 
-class ImtConflictTable;
 template<class T> class Handle;
 template<class T> class MutableHandle;
 class InternTable;
@@ -618,19 +617,6 @@
                                       bool force_new_conflict_method)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Create a conflict table with a specified capacity.
-  ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc);
-
-  // Static version for when the class linker is not yet created.
-  static ImtConflictTable* CreateImtConflictTable(size_t count,
-                                                  LinearAlloc* linear_alloc,
-                                                  size_t pointer_size);
-
-
-  // Create the IMT and conflict tables for a class.
-  void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
-
-
   struct DexCacheData {
     // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
     // not work properly.
@@ -1087,18 +1073,18 @@
                  ArtMethod* current_method,
                  /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void FillIMTFromIfTable(mirror::IfTable* if_table,
-                          ArtMethod* unimplemented_method,
-                          ArtMethod* imt_conflict_method,
-                          mirror::Class* klass,
-                          bool create_conflict_tables,
-                          bool ignore_copied_methods,
-                          ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+  void ConstructIMTFromIfTable(mirror::IfTable* if_table,
+                               ArtMethod* unimplemented_method,
+                               ArtMethod* imt_conflict_method,
+                               mirror::Class* klass,
+                               bool create_conflict_tables,
+                               bool ignore_copied_methods,
+                               ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_);
 
   void FillImtFromSuperClass(Handle<mirror::Class> klass,
                              ArtMethod* unimplemented_method,
                              ArtMethod* imt_conflict_method,
-                             ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+                             ArtMethod** out_imt) SHARED_REQUIRES(Locks::mutator_lock_);
 
   std::vector<const DexFile*> boot_class_path_;
   std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index e9cdbb7..278c4a3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2174,8 +2174,7 @@
         imt_index % mirror::Class::kImtSize, sizeof(void*));
     if (LIKELY(conflict_method->IsRuntimeMethod())) {
       ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
-      DCHECK(current_table != nullptr);
-      method = current_table->Lookup(interface_method, sizeof(void*));
+      method = current_table->Lookup(interface_method);
     } else {
       // It seems we aren't really a conflict method!
       method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1a33d1f..d386c74 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -914,26 +914,10 @@
         pointer_size_(pointer_size) {}
 
   virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
-    // TODO: Separate visitor for runtime vs normal methods.
-    if (UNLIKELY(method->IsRuntimeMethod())) {
-      ImtConflictTable* table = method->GetImtConflictTable(pointer_size_);
-      if (table != nullptr) {
-        ImtConflictTable* new_table = ForwardObject(table);
-        if (table != new_table) {
-          method->SetImtConflictTable(new_table, pointer_size_);
-        }
-      }
-      const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
-      const void* new_code = ForwardCode(old_code);
-      if (old_code != new_code) {
-        method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_);
-      }
-    } else {
-      if (fixup_heap_objects_) {
-        method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
-      }
-      method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
+    if (fixup_heap_objects_) {
+      method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
     }
+    method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
   }
 
  private:
@@ -1034,7 +1018,6 @@
   const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
   uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
   uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-  FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
   if (fixup_image) {
     // Two pass approach, fix up all classes first, then fix up non class-objects.
     // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1054,6 +1037,7 @@
     ScopedObjectAccess soa(Thread::Current());
     timing.NewTiming("Fixup objects");
     bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
+    FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
     // Fixup image roots.
     CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
         image_header.GetImageRoots<kWithoutReadBarrier>())));
@@ -1120,18 +1104,19 @@
                                          boot_oat,
                                          app_image,
                                          app_oat);
-    image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
+    image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+        &method_visitor,
+        target_base,
+        pointer_size);
   }
   if (fixup_image) {
     {
       // Only touches objects in the app image, no need for mutator lock.
       TimingLogger::ScopedTiming timing("Fixup fields", &logger);
       FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
-      image_header.VisitPackedArtFields(&field_visitor, target_base);
-    }
-    {
-      TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
-      image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
+      image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields(
+          &field_visitor,
+          target_base);
     }
     // In the app image case, the image methods are actually in the boot image.
     image_header.RelocateImageMethods(boot_image.Delta());
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index ea75a62..e3307d8 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -19,8 +19,6 @@
 
 #include "image.h"
 
-#include "art_method.h"
-
 namespace art {
 
 template <ReadBarrierOption kReadBarrierOption>
@@ -44,20 +42,6 @@
   return image_roots;
 }
 
-template <typename Visitor>
-inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
-                                                      uint8_t* base,
-                                                      size_t pointer_size) const {
-  const ImageSection& section = GetImageSection(kSectionIMTConflictTables);
-  for (size_t pos = 0; pos < section.Size(); ) {
-    auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos);
-    table->Visit([&visitor](const std::pair<ArtMethod*, ArtMethod*>& methods) {
-      return std::make_pair(visitor(methods.first), visitor(methods.second));
-    }, pointer_size);
-    pos += table->ComputeSize(pointer_size);
-  }
-}
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_IMAGE_INL_H_
diff --git a/runtime/image.cc b/runtime/image.cc
index a9552c2..1f54e3e 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
@@ -147,10 +147,9 @@
   return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
 }
 
-void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
-  const ImageSection& fields = GetFieldsSection();
-  for (size_t pos = 0; pos < fields.Size(); ) {
-    auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
+void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
+  for (size_t pos = 0; pos < Size(); ) {
+    auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos);
     for (size_t i = 0; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, sizeof(ArtField)));
     }
@@ -158,25 +157,18 @@
   }
 }
 
-void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
-                                        uint8_t* base,
-                                        size_t pointer_size) const {
+void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
+                                         uint8_t* base,
+                                         size_t pointer_size) const {
   const size_t method_alignment = ArtMethod::Alignment(pointer_size);
   const size_t method_size = ArtMethod::Size(pointer_size);
-  const ImageSection& methods = GetMethodsSection();
-  for (size_t pos = 0; pos < methods.Size(); ) {
-    auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
+  for (size_t pos = 0; pos < Size(); ) {
+    auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
     for (size_t i = 0; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, method_size, method_alignment));
     }
     pos += array->ComputeSize(array->size(), method_size, method_alignment);
   }
-  const ImageSection& runtime_methods = GetRuntimeMethodsSection();
-  for (size_t pos = 0; pos < runtime_methods.Size(); ) {
-    auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
-    visitor->Visit(method);
-    pos += method_size;
-  }
 }
 
 }  // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index 2ea9af7..8e5dbad 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -64,6 +64,12 @@
     return offset - offset_ < size_;
   }
 
+  // Visit ArtMethods in the section starting at base.
+  void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
+
+  // Visit ArtMethods in the section starting at base.
+  void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
+
  private:
   uint32_t offset_;
   uint32_t size_;
@@ -194,8 +200,6 @@
     kSectionObjects,
     kSectionArtFields,
     kSectionArtMethods,
-    kSectionRuntimeMethods,
-    kSectionIMTConflictTables,
     kSectionDexCacheArrays,
     kSectionInternedStrings,
     kSectionClassTable,
@@ -207,19 +211,10 @@
   void SetImageMethod(ImageMethod index, ArtMethod* method);
 
   const ImageSection& GetImageSection(ImageSections index) const;
-
   const ImageSection& GetMethodsSection() const {
     return GetImageSection(kSectionArtMethods);
   }
 
-  const ImageSection& GetRuntimeMethodsSection() const {
-    return GetImageSection(kSectionRuntimeMethods);
-  }
-
-  const ImageSection& GetFieldsSection() const {
-    return GetImageSection(ImageHeader::kSectionArtFields);
-  }
-
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* GetImageRoot(ImageRoot image_root) const
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -270,19 +265,6 @@
     return boot_image_size_ != 0u;
   }
 
-  // Visit ArtMethods in the section starting at base. Includes runtime methods.
-  // TODO: Delete base parameter if it is always equal to GetImageBegin.
-  void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
-
-  // Visit ArtMethods in the section starting at base.
-  // TODO: Delete base parameter if it is always equal to GetImageBegin.
-  void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
-
-  template <typename Visitor>
-  void VisitPackedImtConflictTables(const Visitor& visitor,
-                                    uint8_t* base,
-                                    size_t pointer_size) const;
-
  private:
   static const uint8_t kImageMagic[4];
   static const uint8_t kImageVersion[4];
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 95995fb..a4d31ef 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1613,19 +1613,18 @@
   }
 }
 
+static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
+
 ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
-  ClassLinker* const class_linker = GetClassLinker();
-  ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
+  auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(linear_alloc);
   // When compiling, the code pointer will get set later when the image is loaded.
-  const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
   if (IsAotCompiler()) {
+    size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
     method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
   } else {
     method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
+    method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
   }
-  // Create empty conflict table.
-  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
-                              pointer_size);
   return method;
 }
 
@@ -1633,6 +1632,9 @@
   CHECK(method != nullptr);
   CHECK(method->IsRuntimeMethod());
   imt_conflict_method_ = method;
+  if (!IsAotCompiler()) {
+    method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
+  }
 }
 
 ArtMethod* Runtime::CreateResolutionMethod() {
@@ -1942,21 +1944,8 @@
   CHECK(method != nullptr);
   CHECK(method->IsRuntimeMethod());
   imt_unimplemented_method_ = method;
-}
-
-void Runtime::FixupConflictTables() {
-  // We can only do this after the class linker is created.
-  const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
-  // Ones in image wont have correct tables. TODO: Fix.
-  if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr || (true)) {
-    imt_unimplemented_method_->SetImtConflictTable(
-        ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
-        pointer_size);
-  }
-  if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr || (true)) {
-    imt_conflict_method_->SetImtConflictTable(
-          ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
-          pointer_size);
+  if (!IsAotCompiler()) {
+    method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
   }
 }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b6a9125..ae25dd1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -383,7 +383,6 @@
     return imt_conflict_method_ != nullptr;
   }
 
-  void FixupConflictTables();
   void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
   void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 45aeb92..56ef5aa 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -678,10 +678,8 @@
           if (space->IsImageSpace()) {
             auto* image_space = space->AsImageSpace();
             const auto& header = image_space->GetImageHeader();
-            const ImageSection& methods = header.GetMethodsSection();
-            const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
-            const size_t offset =  reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
-            if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
+            const auto* methods = &header.GetMethodsSection();
+            if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
               in_image = true;
               break;
             }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index cdbf995..7922b60 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3010,6 +3010,7 @@
   return count;
 }
 
+
 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
   DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
   ClearException();
@@ -3030,11 +3031,4 @@
   interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result);
 }
 
-void Thread::SetException(mirror::Throwable* new_exception) {
-  CHECK(new_exception != nullptr);
-  // TODO: DCHECK(!IsExceptionPending());
-  tlsPtr_.exception = new_exception;
-  // LOG(ERROR) << new_exception->Dump();
-}
-
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 2092feb..ed42e46 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -363,7 +363,12 @@
   void AssertNoPendingException() const;
   void AssertNoPendingExceptionForNewException(const char* msg) const;
 
-  void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
+  void SetException(mirror::Throwable* new_exception)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    CHECK(new_exception != nullptr);
+    // TODO: DCHECK(!IsExceptionPending());
+    tlsPtr_.exception = new_exception;
+  }
 
   void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
     tlsPtr_.exception = nullptr;