Move MethodInfo to CodeInfo.

There is no need to treat it specially any more,
because of the de-duplication at BitTable level.

This saves 0.6% of oat file size.

Test: test-art-host-gtest
Change-Id: Ife7927d736243879a41d6f325d49ebf6930a63f6
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 4824763..87197be 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -65,23 +65,17 @@
     ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
     const uint32_t vmap_table_offset = vmap_table.empty() ? 0u
         : sizeof(OatQuickMethodHeader) + vmap_table.size();
-    // The method info is directly before the vmap table.
-    ArrayRef<const uint8_t> method_info = compiled_method->GetMethodInfo();
-    const uint32_t method_info_offset = method_info.empty() ? 0u
-        : vmap_table_offset + method_info.size();
-
-    OatQuickMethodHeader method_header(vmap_table_offset, method_info_offset, code_size);
+    OatQuickMethodHeader method_header(vmap_table_offset, code_size);
 
     header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
     std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
     const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
-    const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size;
+    const size_t size = vmap_table.size() + sizeof(method_header) + code_size;
     chunk->reserve(size + max_padding);
     chunk->resize(sizeof(method_header));
     static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
     memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
     chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
-    chunk->insert(chunk->begin(), method_info.begin(), method_info.end());
     chunk->insert(chunk->end(), code.begin(), code.end());
     CHECK_EQ(chunk->size(), size);
     const void* unaligned_code_ptr = chunk->data() + (size - code_size);
diff --git a/compiler/compiled_method-inl.h b/compiler/compiled_method-inl.h
index c432747..e60b30f 100644
--- a/compiler/compiled_method-inl.h
+++ b/compiler/compiled_method-inl.h
@@ -38,10 +38,6 @@
   return ArrayRef<const T>(&array->At(0), array->size());
 }
 
-inline ArrayRef<const uint8_t> CompiledMethod::GetMethodInfo() const {
-  return GetArray(method_info_);
-}
-
 inline ArrayRef<const uint8_t> CompiledMethod::GetVmapTable() const {
   return GetArray(vmap_table_);
 }
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 5b93316..29f004c 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -102,12 +102,10 @@
 CompiledMethod::CompiledMethod(CompilerDriver* driver,
                                InstructionSet instruction_set,
                                const ArrayRef<const uint8_t>& quick_code,
-                               const ArrayRef<const uint8_t>& method_info,
                                const ArrayRef<const uint8_t>& vmap_table,
                                const ArrayRef<const uint8_t>& cfi_info,
                                const ArrayRef<const linker::LinkerPatch>& patches)
     : CompiledCode(driver, instruction_set, quick_code),
-      method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)),
       vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
       cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
       patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
@@ -117,7 +115,6 @@
     CompilerDriver* driver,
     InstructionSet instruction_set,
     const ArrayRef<const uint8_t>& quick_code,
-    const ArrayRef<const uint8_t>& method_info,
     const ArrayRef<const uint8_t>& vmap_table,
     const ArrayRef<const uint8_t>& cfi_info,
     const ArrayRef<const linker::LinkerPatch>& patches) {
@@ -127,7 +124,6 @@
                   driver,
                   instruction_set,
                   quick_code,
-                  method_info,
                   vmap_table,
                   cfi_info, patches);
   return ret;
@@ -144,7 +140,6 @@
   storage->ReleaseLinkerPatches(patches_);
   storage->ReleaseCFIInfo(cfi_info_);
   storage->ReleaseVMapTable(vmap_table_);
-  storage->ReleaseMethodInfo(method_info_);
 }
 
 }  // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index aa6fd3e..f880280 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -112,7 +112,6 @@
   CompiledMethod(CompilerDriver* driver,
                  InstructionSet instruction_set,
                  const ArrayRef<const uint8_t>& quick_code,
-                 const ArrayRef<const uint8_t>& method_info,
                  const ArrayRef<const uint8_t>& vmap_table,
                  const ArrayRef<const uint8_t>& cfi_info,
                  const ArrayRef<const linker::LinkerPatch>& patches);
@@ -123,7 +122,6 @@
       CompilerDriver* driver,
       InstructionSet instruction_set,
       const ArrayRef<const uint8_t>& quick_code,
-      const ArrayRef<const uint8_t>& method_info,
       const ArrayRef<const uint8_t>& vmap_table,
       const ArrayRef<const uint8_t>& cfi_info,
       const ArrayRef<const linker::LinkerPatch>& patches);
@@ -142,8 +140,6 @@
     SetPackedField<IsIntrinsicField>(/* value */ true);
   }
 
-  ArrayRef<const uint8_t> GetMethodInfo() const;
-
   ArrayRef<const uint8_t> GetVmapTable() const;
 
   ArrayRef<const uint8_t> GetCFIInfo() const;
@@ -159,8 +155,6 @@
 
   using IsIntrinsicField = BitField<bool, kIsIntrinsicLsb, kIsIntrinsicSize>;
 
-  // For quick code, method specific information that is not very dedupe friendly (method indices).
-  const LengthPrefixedArray<uint8_t>* const method_info_;
   // For quick code, holds code infos which contain stack maps, inline information, and etc.
   const LengthPrefixedArray<uint8_t>* const vmap_table_;
   // For quick code, a FDE entry for the debug_frame section.
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 0800ab3..ad9a30f 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -619,7 +619,6 @@
       driver_,
       instruction_set,
       ArrayRef<const uint8_t>(),                   // no code
-      ArrayRef<const uint8_t>(),                   // method_info
       ArrayRef<const uint8_t>(quicken_data),       // vmap_table
       ArrayRef<const uint8_t>(),                   // cfi data
       ArrayRef<const linker::LinkerPatch>());
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index d56b135..31062fb 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -148,8 +148,6 @@
     : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
       dedupe_enabled_(true),
       dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
-      dedupe_method_info_("dedupe method info",
-                          LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
       dedupe_vmap_table_("dedupe vmap table",
                          LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
       dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
@@ -185,15 +183,6 @@
   ReleaseArrayIfNotDeduplicated(code);
 }
 
-const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMethodInfo(
-    const ArrayRef<const uint8_t>& src_map) {
-  return AllocateOrDeduplicateArray(src_map, &dedupe_method_info_);
-}
-
-void CompiledMethodStorage::ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info) {
-  ReleaseArrayIfNotDeduplicated(method_info);
-}
-
 const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
     const ArrayRef<const uint8_t>& table) {
   return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_);
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 1634fac..a5a7691 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -54,10 +54,6 @@
   const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
   void ReleaseCode(const LengthPrefixedArray<uint8_t>* code);
 
-  const LengthPrefixedArray<uint8_t>* DeduplicateMethodInfo(
-      const ArrayRef<const uint8_t>& method_info);
-  void ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info);
-
   const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
   void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
 
@@ -120,7 +116,6 @@
   bool dedupe_enabled_;
 
   ArrayDedupeSet<uint8_t> dedupe_code_;
-  ArrayDedupeSet<uint8_t> dedupe_method_info_;
   ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
   ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
   ArrayDedupeSet<linker::LinkerPatch> dedupe_linker_patches_;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 14d1e19..5e2f444 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -45,12 +45,6 @@
       ArrayRef<const uint8_t>(raw_code1),
       ArrayRef<const uint8_t>(raw_code2),
   };
-  const uint8_t raw_method_info_map1[] = { 1u, 2u, 3u, 4u, 5u, 6u };
-  const uint8_t raw_method_info_map2[] = { 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u };
-  ArrayRef<const uint8_t> method_info[] = {
-      ArrayRef<const uint8_t>(raw_method_info_map1),
-      ArrayRef<const uint8_t>(raw_method_info_map2),
-  };
   const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
   const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
   ArrayRef<const uint8_t> vmap_table[] = {
@@ -77,38 +71,32 @@
   };
 
   std::vector<CompiledMethod*> compiled_methods;
-  compiled_methods.reserve(1u << 7);
+  compiled_methods.reserve(1u << 4);
   for (auto&& c : code) {
-    for (auto&& s : method_info) {
-      for (auto&& v : vmap_table) {
-        for (auto&& f : cfi_info) {
-          for (auto&& p : patches) {
-            compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
-                &driver, InstructionSet::kNone, c, s, v, f, p));
-          }
+    for (auto&& v : vmap_table) {
+      for (auto&& f : cfi_info) {
+        for (auto&& p : patches) {
+          compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
+              &driver, InstructionSet::kNone, c, v, f, p));
         }
       }
     }
   }
-  constexpr size_t code_bit = 1u << 4;
-  constexpr size_t src_map_bit = 1u << 3;
+  constexpr size_t code_bit = 1u << 3;
   constexpr size_t vmap_table_bit = 1u << 2;
   constexpr size_t cfi_info_bit = 1u << 1;
   constexpr size_t patches_bit = 1u << 0;
-  CHECK_EQ(compiled_methods.size(), 1u << 5);
+  CHECK_EQ(compiled_methods.size(), 1u << 4);
   for (size_t i = 0; i != compiled_methods.size(); ++i) {
     for (size_t j = 0; j != compiled_methods.size(); ++j) {
       CompiledMethod* lhs = compiled_methods[i];
       CompiledMethod* rhs = compiled_methods[j];
       bool same_code = ((i ^ j) & code_bit) == 0u;
-      bool same_src_map = ((i ^ j) & src_map_bit) == 0u;
       bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u;
       bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u;
       bool same_patches = ((i ^ j) & patches_bit) == 0u;
       ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
           << i << " " << j;
-      ASSERT_EQ(same_src_map, lhs->GetMethodInfo().data() == rhs->GetMethodInfo().data())
-          << i << " " << j;
       ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
           << i << " " << j;
       ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data())
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 6d95203..b0e0337 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -92,7 +92,7 @@
 
     MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
     stack_maps.FillInCodeInfo(stack_maps_region);
-    OatQuickMethodHeader method_header(code_ptr - stack_maps_region.begin(), 0u, code_size);
+    OatQuickMethodHeader method_header(code_ptr - stack_maps_region.begin(), code_size);
     static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
     memcpy(code_ptr - header_size, &method_header, header_size);
     memcpy(code_ptr, fake_code_.data(), fake_code_.size());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0ebf4be..b0a05da 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -963,13 +963,10 @@
 
 CodeGenerator::~CodeGenerator() {}
 
-void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
-                                                     size_t* method_info_size) {
+void CodeGenerator::ComputeStackMapSize(size_t* stack_map_size) {
   DCHECK(stack_map_size != nullptr);
-  DCHECK(method_info_size != nullptr);
   StackMapStream* stack_map_stream = GetStackMapStream();
   *stack_map_size = stack_map_stream->PrepareForFillIn();
-  *method_info_size = stack_map_stream->ComputeMethodInfoSize();
 }
 
 size_t CodeGenerator::GetNumberOfJitRoots() const {
@@ -1039,11 +1036,9 @@
 }
 
 void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
-                                   MemoryRegion method_info_region,
                                    const DexFile::CodeItem* code_item_for_osr_check) {
   StackMapStream* stack_map_stream = GetStackMapStream();
   stack_map_stream->FillInCodeInfo(stack_map_region);
-  stack_map_stream->FillInMethodInfo(method_info_region);
   if (kIsDebugBuild && code_item_for_osr_check != nullptr) {
     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), *code_item_for_osr_check);
   }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 59f858e..3d58d29 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -351,9 +351,8 @@
   void AddSlowPath(SlowPathCode* slow_path);
 
   void BuildStackMaps(MemoryRegion stack_map_region,
-                      MemoryRegion method_info_region,
                       const DexFile::CodeItem* code_item_for_osr_check);
-  void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
+  void ComputeStackMapSize(size_t* stack_map_size);
   size_t GetNumberOfJitRoots() const;
 
   // Fills the `literals` array with literals collected during code generation.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9398026..d96746f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -715,18 +715,16 @@
   ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
   size_t stack_map_size = 0;
   size_t method_info_size = 0;
-  codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
+  codegen->ComputeStackMapSize(&stack_map_size);
   stack_map.resize(stack_map_size);
   method_info.resize(method_info_size);
   codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
-                          MemoryRegion(method_info.data(), method_info.size()),
                           code_item_for_osr_check);
 
   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
       GetCompilerDriver(),
       codegen->GetInstructionSet(),
       code_allocator->GetMemory(),
-      ArrayRef<const uint8_t>(method_info),
       ArrayRef<const uint8_t>(stack_map),
       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
       ArrayRef<const linker::LinkerPatch>(linker_patches));
@@ -1101,8 +1099,7 @@
 
 static void CreateJniStackMap(ArenaStack* arena_stack,
                               const JniCompiledMethod& jni_compiled_method,
-                              /* out */ ArenaVector<uint8_t>* stack_map,
-                              /* out */ ArenaVector<uint8_t>* method_info) {
+                              /* out */ ArenaVector<uint8_t>* stack_map) {
   ScopedArenaAllocator allocator(arena_stack);
   StackMapStream stack_map_stream(&allocator, jni_compiled_method.GetInstructionSet());
   stack_map_stream.BeginMethod(
@@ -1112,9 +1109,7 @@
       /* num_dex_registers */ 0);
   stack_map_stream.EndMethod();
   stack_map->resize(stack_map_stream.PrepareForFillIn());
-  method_info->resize(stack_map_stream.ComputeMethodInfoSize());
   stack_map_stream.FillInCodeInfo(MemoryRegion(stack_map->data(), stack_map->size()));
-  stack_map_stream.FillInMethodInfo(MemoryRegion(method_info->data(), method_info->size()));
 }
 
 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
@@ -1169,13 +1164,11 @@
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
 
   ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
-  ArenaVector<uint8_t> method_info(allocator.Adapter(kArenaAllocStackMaps));
-  CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map, &method_info);
+  CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
   return CompiledMethod::SwapAllocCompiledMethod(
       GetCompilerDriver(),
       jni_compiled_method.GetInstructionSet(),
       jni_compiled_method.GetCode(),
-      ArrayRef<const uint8_t>(method_info),
       ArrayRef<const uint8_t>(stack_map),
       jni_compiled_method.GetCfi(),
       /* patches */ ArrayRef<const linker::LinkerPatch>());
@@ -1237,34 +1230,28 @@
     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
         allocator.Adapter(kArenaAllocCHA));
     ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
-    ArenaVector<uint8_t> method_info(allocator.Adapter(kArenaAllocStackMaps));
     ArenaStack arena_stack(runtime->GetJitArenaPool());
     // StackMapStream is large and it does not fit into this frame, so we need helper method.
     // TODO: Try to avoid the extra memory copy that results from this.
-    CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map, &method_info);
+    CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
     uint8_t* stack_map_data = nullptr;
-    uint8_t* method_info_data = nullptr;
     uint8_t* roots_data = nullptr;
     uint32_t data_size = code_cache->ReserveData(self,
                                                  stack_map.size(),
-                                                 method_info.size(),
                                                  /* number_of_roots */ 0,
                                                  method,
                                                  &stack_map_data,
-                                                 &method_info_data,
                                                  &roots_data);
     if (stack_map_data == nullptr || roots_data == nullptr) {
       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
       return false;
     }
     memcpy(stack_map_data, stack_map.data(), stack_map.size());
-    memcpy(method_info_data, method_info.data(), method_info.size());
 
     const void* code = code_cache->CommitCode(
         self,
         method,
         stack_map_data,
-        method_info_data,
         roots_data,
         jni_compiled_method.GetCode().data(),
         jni_compiled_method.GetCode().size(),
@@ -1340,8 +1327,7 @@
   }
 
   size_t stack_map_size = 0;
-  size_t method_info_size = 0;
-  codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
+  codegen->ComputeStackMapSize(&stack_map_size);
   size_t number_of_roots = codegen->GetNumberOfJitRoots();
   // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
   // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
@@ -1357,30 +1343,24 @@
     return false;
   }
   uint8_t* stack_map_data = nullptr;
-  uint8_t* method_info_data = nullptr;
   uint8_t* roots_data = nullptr;
   uint32_t data_size = code_cache->ReserveData(self,
                                                stack_map_size,
-                                               method_info_size,
                                                number_of_roots,
                                                method,
                                                &stack_map_data,
-                                               &method_info_data,
                                                &roots_data);
   if (stack_map_data == nullptr || roots_data == nullptr) {
     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
     return false;
   }
-  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
-                          MemoryRegion(method_info_data, method_info_size),
-                          code_item);
+  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), code_item);
   codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
 
   const void* code = code_cache->CommitCode(
       self,
       method,
       stack_map_data,
-      method_info_data,
       roots_data,
       code_allocator.GetMemory().data(),
       code_allocator.GetMemory().size(),
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index e1b6575..429054c 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -196,7 +196,7 @@
       if (encode_art_method) {
         CHECK_EQ(inline_info.GetArtMethod(), method);
       } else {
-        CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()][0], method->GetDexMethodIndex());
+        CHECK_EQ(code_info.GetMethodIndexOf(inline_info), method->GetDexMethodIndex());
       }
     });
   }
@@ -274,24 +274,6 @@
   }
 }
 
-void StackMapStream::FillInMethodInfo(MemoryRegion region) {
-  {
-    MethodInfo info(region.begin(), method_infos_.size());
-    for (size_t i = 0; i < method_infos_.size(); ++i) {
-      info.SetMethodIndex(i, method_infos_[i][0]);
-    }
-  }
-  if (kVerifyStackMaps) {
-    // Check the data matches.
-    MethodInfo info(region.begin());
-    const size_t count = info.NumMethodIndices();
-    DCHECK_EQ(count, method_infos_.size());
-    for (size_t i = 0; i < count; ++i) {
-      DCHECK_EQ(info.GetMethodIndex(i), method_infos_[i][0]);
-    }
-  }
-}
-
 template<typename Writer, typename Builder>
 ALWAYS_INLINE static void EncodeTable(Writer& out, const Builder& bit_table) {
   out.WriteBit(false);  // Is not deduped.
@@ -317,6 +299,7 @@
   BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
   EncodeTable(out, stack_maps_);
   EncodeTable(out, inline_infos_);
+  EncodeTable(out, method_infos_);
   EncodeTable(out, register_masks_);
   EncodeTable(out, stack_masks_);
   EncodeTable(out, dex_register_masks_);
@@ -347,9 +330,4 @@
   }
 }
 
-size_t StackMapStream::ComputeMethodInfoSize() const {
-  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before " << __FUNCTION__;
-  return MethodInfo::ComputeSize(method_infos_.size());
-}
-
 }  // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 203c2cd..de79f49 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -25,7 +25,6 @@
 #include "base/scoped_arena_containers.h"
 #include "base/value_object.h"
 #include "dex_register_location.h"
-#include "method_info.h"
 #include "nodes.h"
 #include "stack_map.h"
 
@@ -40,14 +39,14 @@
   explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
       : instruction_set_(instruction_set),
         stack_maps_(allocator),
+        inline_infos_(allocator),
+        method_infos_(allocator),
         register_masks_(allocator),
         stack_masks_(allocator),
-        inline_infos_(allocator),
         dex_register_masks_(allocator),
         dex_register_maps_(allocator),
         dex_register_catalog_(allocator),
         out_(allocator->Adapter(kArenaAllocStackMapStream)),
-        method_infos_(allocator),
         lazy_stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
         current_stack_map_(),
         current_inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -92,9 +91,6 @@
   // Returns the size (in bytes) needed to store this stream.
   size_t PrepareForFillIn();
   void FillInCodeInfo(MemoryRegion region);
-  void FillInMethodInfo(MemoryRegion region);
-
-  size_t ComputeMethodInfoSize() const;
 
  private:
   static constexpr uint32_t kNoValue = -1;
@@ -107,16 +103,15 @@
   uint32_t fp_spill_mask_ = 0;
   uint32_t num_dex_registers_ = 0;
   BitTableBuilder<StackMap> stack_maps_;
+  BitTableBuilder<InlineInfo> inline_infos_;
+  BitTableBuilder<MethodInfo> method_infos_;
   BitTableBuilder<RegisterMask> register_masks_;
   BitmapTableBuilder stack_masks_;
-  BitTableBuilder<InlineInfo> inline_infos_;
   BitmapTableBuilder dex_register_masks_;
   BitTableBuilder<MaskInfo> dex_register_maps_;
   BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
   ScopedArenaVector<uint8_t> out_;
 
-  BitTableBuilderBase<1> method_infos_;
-
   ScopedArenaVector<BitVector*> lazy_stack_masks_;
 
   // Variables which track the current state between Begin/End calls;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index d34f7b5..a1a547c 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -1316,7 +1316,6 @@
     DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
     OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
     uint32_t vmap_table_offset = method_header->GetVmapTableOffset();
-    uint32_t method_info_offset = method_header->GetMethodInfoOffset();
     // The code offset was 0 when the mapping/vmap table offset was set, so it's set
     // to 0-offset and we need to adjust it by code_offset.
     uint32_t code_offset = quick_code_offset - thumb_offset;
@@ -1327,11 +1326,7 @@
       vmap_table_offset += code_offset;
       DCHECK_LT(vmap_table_offset, code_offset);
     }
-    if (method_info_offset != 0u) {
-      method_info_offset += code_offset;
-      DCHECK_LT(method_info_offset, code_offset);
-    }
-    *method_header = OatQuickMethodHeader(vmap_table_offset, method_info_offset, code_size);
+    *method_header = OatQuickMethodHeader(vmap_table_offset, code_size);
 
     if (!deduped) {
       // Update offsets. (Checksum is updated when writing.)
@@ -1403,9 +1398,6 @@
       if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
         return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
       }
-      if (UNLIKELY(lhs->GetMethodInfo().data() != rhs->GetMethodInfo().data())) {
-        return lhs->GetMethodInfo().data() < rhs->GetMethodInfo().data();
-      }
       if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
         return lhs->GetPatches().data() < rhs->GetPatches().data();
       }
@@ -1467,13 +1459,12 @@
           size_t deduped_offset = CodeInfo::Dedupe(data, map.data(), &dedupe_bit_table_);
           if (kDebugVerifyDedupedCodeInfo) {
             InstructionSet isa = writer_->GetCompilerOptions().GetInstructionSet();
-            MethodInfo method_info(compiled_method->GetMethodInfo().data());
             std::stringstream old_code_info;
             VariableIndentationOutputStream old_vios(&old_code_info);
             std::stringstream new_code_info;
             VariableIndentationOutputStream new_vios(&new_code_info);
-            CodeInfo(map.data()).Dump(&old_vios, 0, true, isa, method_info);
-            CodeInfo(data->data() + deduped_offset).Dump(&new_vios, 0, true, isa, method_info);
+            CodeInfo(map.data()).Dump(&old_vios, 0, true, isa);
+            CodeInfo(data->data() + deduped_offset).Dump(&new_vios, 0, true, isa);
             DCHECK_EQ(old_code_info.str(), new_code_info.str());
           }
           return offset_ + deduped_offset;
@@ -1498,44 +1489,6 @@
   std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> dedupe_bit_table_;
 };
 
-class OatWriter::InitMethodInfoVisitor : public OatDexMethodVisitor {
- public:
-  InitMethodInfoVisitor(OatWriter* writer, size_t offset) : OatDexMethodVisitor(writer, offset) {}
-
-  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
-    OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
-    CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
-
-    if (HasCompiledCode(compiled_method)) {
-      DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
-      DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset(), 0u);
-      ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
-      const uint32_t map_size = map.size() * sizeof(map[0]);
-      if (map_size != 0u) {
-        size_t offset = dedupe_map_.GetOrCreate(
-            map.data(),
-            [this, map_size]() {
-              uint32_t new_offset = offset_;
-              offset_ += map_size;
-              return new_offset;
-            });
-        // Code offset is not initialized yet, so set the map offset to 0u-offset.
-        DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
-        oat_class->method_headers_[method_offsets_index_].SetMethodInfoOffset(0u - offset);
-      }
-      ++method_offsets_index_;
-    }
-
-    return true;
-  }
-
- private:
-  // Deduplication is already done on a pointer basis by the compiler driver,
-  // so we can simply compare the pointers to find out if things are duplicated.
-  SafeMap<const uint8_t*, uint32_t> dedupe_map_;
-};
-
 class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
  public:
   InitImageMethodVisitor(OatWriter* writer,
@@ -2045,63 +1998,6 @@
   }
 };
 
-class OatWriter::WriteMethodInfoVisitor : public OatDexMethodVisitor {
- public:
-  WriteMethodInfoVisitor(OatWriter* writer,
-                         OutputStream* out,
-                         const size_t file_offset,
-                         size_t relative_offset)
-      : OatDexMethodVisitor(writer, relative_offset),
-        out_(out),
-        file_offset_(file_offset) {}
-
-  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE {
-    OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
-    const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
-
-    if (HasCompiledCode(compiled_method)) {
-      size_t file_offset = file_offset_;
-      OutputStream* out = out_;
-      uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset();
-      uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_;
-      ++method_offsets_index_;
-      DCHECK((compiled_method->GetMethodInfo().size() == 0u && map_offset == 0u) ||
-             (compiled_method->GetMethodInfo().size() != 0u && map_offset != 0u))
-          << compiled_method->GetMethodInfo().size() << " " << map_offset << " "
-          << dex_file_->PrettyMethod(it.GetMemberIndex());
-      if (map_offset != 0u) {
-        // Transform map_offset to actual oat data offset.
-        map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
-        DCHECK_NE(map_offset, 0u);
-        DCHECK_LE(map_offset, offset_) << dex_file_->PrettyMethod(it.GetMemberIndex());
-
-        ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
-        size_t map_size = map.size() * sizeof(map[0]);
-        if (map_offset == offset_) {
-          // Write deduplicated map (code info for Optimizing or transformation info for dex2dex).
-          if (UNLIKELY(!out->WriteFully(map.data(), map_size))) {
-            ReportWriteFailure(it);
-            return false;
-          }
-          offset_ += map_size;
-        }
-      }
-      DCHECK_OFFSET_();
-    }
-
-    return true;
-  }
-
- private:
-  OutputStream* const out_;
-  size_t const file_offset_;
-
-  void ReportWriteFailure(const ClassDataItemIterator& it) {
-    PLOG(ERROR) << "Failed to write map for "
-        << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
-  }
-};
-
 // Visit all methods from all classes in all dex files with the specified visitor.
 bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
   for (const DexFile* dex_file : *dex_files_) {
@@ -2193,12 +2089,6 @@
     DCHECK(success);
     offset += code_info_data_.size();
   }
-  {
-    InitMethodInfoVisitor visitor(this, offset);
-    bool success = VisitDexMethods(&visitor);
-    DCHECK(success);
-    offset = visitor.GetOffset();
-  }
   return offset;
 }
 
@@ -3050,15 +2940,7 @@
     }
     relative_offset += code_info_data_.size();
     size_vmap_table_ = code_info_data_.size();
-  }
-  {
-    size_t method_infos_offset = relative_offset;
-    WriteMethodInfoVisitor visitor(this, out, file_offset, relative_offset);
-    if (UNLIKELY(!VisitDexMethods(&visitor))) {
-      return 0;
-    }
-    relative_offset = visitor.GetOffset();
-    size_method_info_ = relative_offset - method_infos_offset;
+    DCHECK_OFFSET();
   }
 
   return relative_offset;
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index d14ddab..9470f8c 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -96,11 +96,6 @@
 // ...
 // VmapTable
 //
-// MethodInfo        one variable sized blob with MethodInfo.
-// MethodInfo        MethodInfos are deduplicated.
-// ...
-// MethodInfo
-//
 // OatDexFile[0]     one variable sized OatDexFile with offsets to Dex and OatClasses
 // OatDexFile[1]
 // ...
@@ -284,11 +279,9 @@
   class OrderedMethodVisitor;
   class InitCodeMethodVisitor;
   class InitMapMethodVisitor;
-  class InitMethodInfoVisitor;
   class InitImageMethodVisitor;
   class WriteCodeMethodVisitor;
   class WriteMapMethodVisitor;
-  class WriteMethodInfoVisitor;
   class WriteQuickeningInfoMethodVisitor;
   class WriteQuickeningInfoOffsetsMethodVisitor;
 
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 37d0a3f..bb27e8c 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -472,7 +472,7 @@
   // it is time to update OatHeader::kOatVersion
   EXPECT_EQ(76U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
-  EXPECT_EQ(12U, sizeof(OatQuickMethodHeader));
+  EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
   EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
             sizeof(QuickEntryPoints));
 }
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 075771d..9556c5f 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -87,7 +87,6 @@
         compiler_driver_.get(),
         instruction_set_,
         code,
-        /* method_info */ ArrayRef<const uint8_t>(),
         /* vmap_table */ ArrayRef<const uint8_t>(),
         /* cfi_info */ ArrayRef<const uint8_t>(),
         patches));
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 271d37d..9d73879 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1315,8 +1315,7 @@
         CodeInfo code_info(raw_code_info);
         DCHECK(code_item_accessor.HasCodeItem());
         ScopedIndentation indent1(vios);
-        MethodInfo method_info = oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo();
-        DumpCodeInfo(vios, code_info, oat_method, method_info);
+        DumpCodeInfo(vios, code_info, oat_method);
       }
     } else if (IsMethodGeneratedByDexToDexCompiler(oat_method, code_item_accessor)) {
       // We don't encode the size in the table, so just emit that we have quickened
@@ -1331,13 +1330,11 @@
   // Display a CodeInfo object emitted by the optimizing compiler.
   void DumpCodeInfo(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
-                    const OatFile::OatMethod& oat_method,
-                    const MethodInfo& method_info) {
+                    const OatFile::OatMethod& oat_method) {
     code_info.Dump(vios,
                    oat_method.GetCodeOffset(),
                    options_.dump_code_info_stack_maps_,
-                   instruction_set_,
-                   method_info);
+                   instruction_set_);
   }
 
   static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
@@ -1579,16 +1576,10 @@
     } else if (!bad_input && IsMethodGeneratedByOptimizingCompiler(oat_method,
                                                                    code_item_accessor)) {
       // The optimizing compiler outputs its CodeInfo data in the vmap table.
-      const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
       StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
       if (AddStatsObject(oat_method.GetVmapTable())) {
         helper.GetCodeInfo().AddSizeStats(&stats_);
       }
-      MethodInfo method_info(method_header->GetOptimizedMethodInfo());
-      if (AddStatsObject(method_header->GetOptimizedMethodInfoPtr())) {
-        size_t method_info_size = MethodInfo::ComputeSize(method_info.NumMethodIndices());
-        stats_.Child("MethodInfo")->AddBytes(method_info_size);
-      }
       const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
       size_t offset = 0;
       while (offset < code_size) {
@@ -1599,7 +1590,6 @@
           DCHECK(stack_map.IsValid());
           stack_map.Dump(vios,
                          helper.GetCodeInfo(),
-                         method_info,
                          oat_method.GetCodeOffset(),
                          instruction_set_);
           do {
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 0ed26d3..e6f3d0b 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -45,7 +45,7 @@
 namespace art {
 
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
-                                    const MethodInfo& method_info,
+                                    const CodeInfo& code_info,
                                     const BitTableRange<InlineInfo>& inline_infos)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(!outer_method->IsObsolete());
@@ -62,7 +62,7 @@
       return inline_info.GetArtMethod();
     }
 
-    uint32_t method_index = inline_info.GetMethodIndex(method_info);
+    uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
     if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
       // "charAt" special case. It is the only non-leaf method we inline across dex files.
       ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
@@ -77,7 +77,7 @@
   for (InlineInfo inline_info : inline_infos) {
     DCHECK(!inline_info.EncodesArtMethod());
     DCHECK_NE(inline_info.GetDexPc(), static_cast<uint32_t>(-1));
-    uint32_t method_index = inline_info.GetMethodIndex(method_info);
+    uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
     ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
                                                                    method->GetDexCache(),
                                                                    method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index a5ebce5..5421f69 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -202,12 +202,11 @@
       DCHECK(current_code->IsOptimized());
       uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
       CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly);
-      MethodInfo method_info = current_code->GetOptimizedMethodInfo();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(stack_map.IsValid());
       BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
       if (!inline_infos.empty()) {
-        caller = GetResolvedMethod(outer_method, method_info, inline_infos);
+        caller = GetResolvedMethod(outer_method, code_info, inline_infos);
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index be4e4e6..aca169b9 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1236,7 +1236,6 @@
   CHECK(current_code->IsOptimized());
   uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
   CodeInfo code_info(current_code);
-  MethodInfo method_info = current_code->GetOptimizedMethodInfo();
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   CHECK(stack_map.IsValid());
   uint32_t dex_pc = stack_map.GetDexPc();
@@ -1261,7 +1260,7 @@
       tag = "encoded ";
       caller = inline_info.GetArtMethod();
     } else {
-      uint32_t method_index = inline_info.GetMethodIndex(method_info);
+      uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
       if (dex_pc == static_cast<uint32_t>(-1)) {
         tag = "special ";
         CHECK(inline_info.Equals(inline_infos.back()));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 9aa05561..b92affa 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -411,7 +411,6 @@
 uint8_t* JitCodeCache::CommitCode(Thread* self,
                                   ArtMethod* method,
                                   uint8_t* stack_map,
-                                  uint8_t* method_info,
                                   uint8_t* roots_data,
                                   const uint8_t* code,
                                   size_t code_size,
@@ -423,7 +422,6 @@
   uint8_t* result = CommitCodeInternal(self,
                                        method,
                                        stack_map,
-                                       method_info,
                                        roots_data,
                                        code,
                                        code_size,
@@ -438,7 +436,6 @@
     result = CommitCodeInternal(self,
                                 method,
                                 stack_map,
-                                method_info,
                                 roots_data,
                                 code,
                                 code_size,
@@ -748,7 +745,6 @@
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
-                                          uint8_t* method_info,
                                           uint8_t* roots_data,
                                           const uint8_t* code,
                                           size_t code_size,
@@ -783,7 +779,6 @@
       method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       new (method_header) OatQuickMethodHeader(
           (stack_map != nullptr) ? code_ptr - stack_map : 0u,
-          (method_info != nullptr) ? code_ptr - method_info : 0u,
           code_size);
       // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
       // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
@@ -1046,14 +1041,12 @@
 
 size_t JitCodeCache::ReserveData(Thread* self,
                                  size_t stack_map_size,
-                                 size_t method_info_size,
                                  size_t number_of_roots,
                                  ArtMethod* method,
                                  uint8_t** stack_map_data,
-                                 uint8_t** method_info_data,
                                  uint8_t** roots_data) {
   size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + method_info_size + table_size, sizeof(void*));
+  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
   uint8_t* result = nullptr;
 
   {
@@ -1083,13 +1076,11 @@
   if (result != nullptr) {
     *roots_data = result;
     *stack_map_data = result + table_size;
-    *method_info_data = *stack_map_data + stack_map_size;
     FillRootTableLength(*roots_data, number_of_roots);
     return size;
   } else {
     *roots_data = nullptr;
     *stack_map_data = nullptr;
-    *method_info_data = nullptr;
     return 0;
   }
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index d17fb26..29f9c9c 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -136,7 +136,6 @@
   uint8_t* CommitCode(Thread* self,
                       ArtMethod* method,
                       uint8_t* stack_map,
-                      uint8_t* method_info,
                       uint8_t* roots_data,
                       const uint8_t* code,
                       size_t code_size,
@@ -166,11 +165,9 @@
   // Return the number of bytes allocated.
   size_t ReserveData(Thread* self,
                      size_t stack_map_size,
-                     size_t method_info_size,
                      size_t number_of_roots,
                      ArtMethod* method,
                      uint8_t** stack_map_data,
-                     uint8_t** method_info_data,
                      uint8_t** roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
@@ -295,7 +292,6 @@
   uint8_t* CommitCodeInternal(Thread* self,
                               ArtMethod* method,
                               uint8_t* stack_map,
-                              uint8_t* method_info,
                               uint8_t* roots_data,
                               const uint8_t* code,
                               size_t code_size,
diff --git a/runtime/method_info.h b/runtime/method_info.h
deleted file mode 100644
index 6f74678..0000000
--- a/runtime/method_info.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_METHOD_INFO_H_
-#define ART_RUNTIME_METHOD_INFO_H_
-
-#include <android-base/logging.h>
-
-#include "base/leb128.h"
-#include "base/macros.h"
-#include "base/bit_memory_region.h"
-
-namespace art {
-
-// Method info is for not dedupe friendly data of a method. Currently it only holds methods indices.
-// Putting this data in MethodInfo instead of code infos saves ~5% oat size.
-class MethodInfo {
-  using MethodIndexType = uint16_t;
-
- public:
-  // Reading mode
-  explicit MethodInfo(const uint8_t* ptr) {
-    if (ptr != nullptr) {
-      num_method_indices_ = DecodeUnsignedLeb128(&ptr);
-      region_ = BitMemoryRegion(
-          MemoryRegion(const_cast<uint8_t*>(ptr), num_method_indices_ * sizeof(MethodIndexType)));
-    }
-  }
-
-  // Writing mode
-  MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
-    DCHECK(ptr != nullptr);
-    ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
-    region_ = BitMemoryRegion(MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)));
-  }
-
-  static size_t ComputeSize(size_t num_method_indices) {
-    uint8_t temp[8];
-    uint8_t* ptr = temp;
-    ptr = EncodeUnsignedLeb128(ptr, num_method_indices);
-    return (ptr - temp) + num_method_indices * sizeof(MethodIndexType);
-  }
-
-  ALWAYS_INLINE MethodIndexType GetMethodIndex(size_t index) const {
-    // Use bit functions to avoid pesky alignment requirements.
-    return region_.LoadBits(index * BitSizeOf<MethodIndexType>(), BitSizeOf<MethodIndexType>());
-  }
-
-  void SetMethodIndex(size_t index, MethodIndexType method_index) {
-    region_.StoreBits(index * BitSizeOf<MethodIndexType>(),
-                      method_index,
-                      BitSizeOf<MethodIndexType>());
-  }
-
-  size_t NumMethodIndices() const {
-    return num_method_indices_;
-  }
-
- private:
-  size_t num_method_indices_ = 0u;
-  BitMemoryRegion region_;
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_METHOD_INFO_H_
diff --git a/runtime/oat.h b/runtime/oat.h
index c286f46..69aaceb 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Deduplicate stackmaps at BitTable level.
-  static constexpr uint8_t kOatVersion[] = { '1', '5', '7', '\0' };
+  // Last oat version changed reason: Move MethodInfo into CodeInfo.
+  static constexpr uint8_t kOatVersion[] = { '1', '5', '8', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 3b9f466..8798c69 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -20,7 +20,6 @@
 #include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "base/utils.h"
-#include "method_info.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
 
@@ -33,10 +32,8 @@
  public:
   OatQuickMethodHeader() = default;
   OatQuickMethodHeader(uint32_t vmap_table_offset,
-                       uint32_t method_info_offset,
                        uint32_t code_size)
       : vmap_table_offset_(vmap_table_offset),
-        method_info_offset_(method_info_offset),
         code_size_(code_size) {
   }
 
@@ -74,20 +71,6 @@
     return code_ - vmap_table_offset_;
   }
 
-  const void* GetOptimizedMethodInfoPtr() const {
-    DCHECK(IsOptimized());
-    return reinterpret_cast<const void*>(code_ - method_info_offset_);
-  }
-
-  uint8_t* GetOptimizedMethodInfoPtr() {
-    DCHECK(IsOptimized());
-    return code_ - method_info_offset_;
-  }
-
-  MethodInfo GetOptimizedMethodInfo() const {
-    return MethodInfo(reinterpret_cast<const uint8_t*>(GetOptimizedMethodInfoPtr()));
-  }
-
   const uint8_t* GetCode() const {
     return code_;
   }
@@ -112,18 +95,6 @@
     return &vmap_table_offset_;
   }
 
-  uint32_t GetMethodInfoOffset() const {
-    return method_info_offset_;
-  }
-
-  void SetMethodInfoOffset(uint32_t offset) {
-    method_info_offset_ = offset;
-  }
-
-  const uint32_t* GetMethodInfoOffsetAddr() const {
-    return &method_info_offset_;
-  }
-
   const uint8_t* GetVmapTable() const {
     CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
     return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
@@ -186,11 +157,6 @@
 
   // The offset in bytes from the start of the vmap table to the end of the header.
   uint32_t vmap_table_offset_ = 0u;
-  // The offset in bytes from the start of the method info to the end of the header.
-  // The method info offset is not in the CodeInfo since CodeInfo has good dedupe properties that
-  // would be lost from doing so. The method info memory region contains method indices since they
-  // are hard to dedupe.
-  uint32_t method_info_offset_ = 0u;
   // The code size in bytes. The highest bit is used to signify if the compiled
   // code with the method header has should_deoptimize flag.
   uint32_t code_size_ = 0u;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 85b1ea0..ce99fb9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -81,9 +81,9 @@
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      MethodInfo method_info = method_header->GetOptimizedMethodInfo();
+      CodeInfo code_info(method_header);
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
-      return GetResolvedMethod(*GetCurrentQuickFrame(), method_info, current_inline_frames_);
+      return GetResolvedMethod(*GetCurrentQuickFrame(), code_info, current_inline_frames_);
     } else {
       return *cur_quick_frame_;
     }
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index e8746bc..cd82284 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -58,6 +58,7 @@
   BitMemoryReader reader(data, /* bit_offset */ 0);
   DecodeTable(stack_maps_, reader, data);
   DecodeTable(inline_infos_, reader, data);
+  DecodeTable(method_infos_, reader, data);
   if (flags & DecodeFlags::InlineInfoOnly) {
     return;
   }
@@ -99,6 +100,7 @@
   BitMemoryWriter<std::vector<uint8_t>> writer(out, /* bit_offset */ out->size() * kBitsPerByte);
   DedupeTable<StackMap>(writer, reader, dedupe_map);
   DedupeTable<InlineInfo>(writer, reader, dedupe_map);
+  DedupeTable<MethodInfo>(writer, reader, dedupe_map);
   DedupeTable<RegisterMask>(writer, reader, dedupe_map);
   DedupeTable<MaskInfo>(writer, reader, dedupe_map);
   DedupeTable<MaskInfo>(writer, reader, dedupe_map);
@@ -211,9 +213,10 @@
   Stats* stats = parent->Child("CodeInfo");
   stats->AddBytes(Size());
   AddTableSizeStats<StackMap>("StackMaps", stack_maps_, stats);
+  AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
+  AddTableSizeStats<MethodInfo>("MethodInfo", method_infos_, stats);
   AddTableSizeStats<RegisterMask>("RegisterMasks", register_masks_, stats);
   AddTableSizeStats<MaskInfo>("StackMasks", stack_masks_, stats);
-  AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
   AddTableSizeStats<MaskInfo>("DexRegisterMasks", dex_register_masks_, stats);
   AddTableSizeStats<DexRegisterMapInfo>("DexRegisterMaps", dex_register_maps_, stats);
   AddTableSizeStats<DexRegisterInfo>("DexRegisterCatalog", dex_register_catalog_, stats);
@@ -271,14 +274,14 @@
 void CodeInfo::Dump(VariableIndentationOutputStream* vios,
                     uint32_t code_offset,
                     bool verbose,
-                    InstructionSet instruction_set,
-                    const MethodInfo& method_info) const {
+                    InstructionSet instruction_set) const {
   vios->Stream() << "CodeInfo\n";
   ScopedIndentation indent1(vios);
   DumpTable<StackMap>(vios, "StackMaps", stack_maps_, verbose);
+  DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
+  DumpTable<MethodInfo>(vios, "MethodInfo", method_infos_, verbose);
   DumpTable<RegisterMask>(vios, "RegisterMasks", register_masks_, verbose);
   DumpTable<MaskInfo>(vios, "StackMasks", stack_masks_, verbose, true /* is_mask */);
-  DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
   DumpTable<MaskInfo>(vios, "DexRegisterMasks", dex_register_masks_, verbose, true /* is_mask */);
   DumpTable<DexRegisterMapInfo>(vios, "DexRegisterMaps", dex_register_maps_, verbose);
   DumpTable<DexRegisterInfo>(vios, "DexRegisterCatalog", dex_register_catalog_, verbose);
@@ -286,14 +289,13 @@
   // Display stack maps along with (live) Dex register maps.
   if (verbose) {
     for (StackMap stack_map : stack_maps_) {
-      stack_map.Dump(vios, *this, method_info, code_offset, instruction_set);
+      stack_map.Dump(vios, *this, code_offset, instruction_set);
     }
   }
 }
 
 void StackMap::Dump(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
-                    const MethodInfo& method_info,
                     uint32_t code_offset,
                     InstructionSet instruction_set) const {
   const uint32_t pc_offset = GetNativePcOffset(instruction_set);
@@ -312,14 +314,13 @@
   vios->Stream() << ")\n";
   code_info.GetDexRegisterMapOf(*this).Dump(vios);
   for (InlineInfo inline_info : code_info.GetInlineInfosOf(*this)) {
-    inline_info.Dump(vios, code_info, *this, method_info);
+    inline_info.Dump(vios, code_info, *this);
   }
 }
 
 void InlineInfo::Dump(VariableIndentationOutputStream* vios,
                       const CodeInfo& code_info,
-                      const StackMap& stack_map,
-                      const MethodInfo& method_info) const {
+                      const StackMap& stack_map) const {
   uint32_t depth = Row() - stack_map.GetInlineInfoIndex();
   vios->Stream()
       << "InlineInfo[" << Row() << "]"
@@ -332,7 +333,7 @@
   } else {
     vios->Stream()
         << std::dec
-        << ", method_index=" << GetMethodIndex(method_info);
+        << ", method_index=" << code_info.GetMethodIndexOf(*this);
   }
   vios->Stream() << ")\n";
   code_info.GetInlineDexRegisterMapOf(stack_map, *this).Dump(vios);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 909aaa5..8bfae7c 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -28,7 +28,6 @@
 #include "base/memory_region.h"
 #include "dex/dex_file_types.h"
 #include "dex_register_location.h"
-#include "method_info.h"
 #include "quick/quick_method_frame_info.h"
 
 namespace art {
@@ -164,7 +163,6 @@
 
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& code_info,
-            const MethodInfo& method_info,
             uint32_t code_offset,
             InstructionSet instruction_set) const;
 };
@@ -188,10 +186,6 @@
   static constexpr uint32_t kLast = -1;
   static constexpr uint32_t kMore = 0;
 
-  uint32_t GetMethodIndex(const MethodInfo& method_info) const {
-    return method_info.GetMethodIndex(GetMethodInfoIndex());
-  }
-
   bool EncodesArtMethod() const {
     return HasArtMethodLo();
   }
@@ -204,8 +198,7 @@
 
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& info,
-            const StackMap& stack_map,
-            const MethodInfo& method_info) const;
+            const StackMap& stack_map) const;
 };
 
 class MaskInfo : public BitTableAccessor<1> {
@@ -262,6 +255,14 @@
   }
 };
 
+// Method indices are not very dedup friendly.
+// Separating them greatly improves dedup efficiency of the other tables.
+class MethodInfo : public BitTableAccessor<1> {
+ public:
+  BIT_TABLE_HEADER()
+  BIT_TABLE_COLUMN(0, MethodIndex)
+};
+
 /**
  * Wrapper around all compiler information collected for a method.
  * See the Decode method at the end for the precise binary format.
@@ -329,6 +330,10 @@
     return stack_maps_.NumRows();
   }
 
+  uint32_t GetMethodIndexOf(InlineInfo inline_info) const {
+    return method_infos_.GetRow(inline_info.GetMethodInfoIndex()).GetMethodIndex();
+  }
+
   ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
     if (stack_map.HasDexRegisterMap()) {
       DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
@@ -405,8 +410,7 @@
   void Dump(VariableIndentationOutputStream* vios,
             uint32_t code_offset,
             bool verbose,
-            InstructionSet instruction_set,
-            const MethodInfo& method_info) const;
+            InstructionSet instruction_set) const;
 
   // Accumulate code info size statistics into the given Stats tree.
   void AddSizeStats(/*out*/ Stats* parent) const;
@@ -446,6 +450,7 @@
   uint32_t number_of_dex_registers_;
   BitTable<StackMap> stack_maps_;
   BitTable<InlineInfo> inline_infos_;
+  BitTable<MethodInfo> method_infos_;
   BitTable<RegisterMask> register_masks_;
   BitTable<MaskInfo> stack_masks_;
   BitTable<MaskInfo> dex_register_masks_;