Merge "Disable test 964 with gcstress since it often times out."
diff --git a/compiler/Android.mk b/compiler/Android.mk
index e74a68f..42ddfd8 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -67,7 +67,6 @@
 	optimizing/builder.cc \
 	optimizing/code_generator.cc \
 	optimizing/code_generator_utils.cc \
-	optimizing/constant_area_fixups_x86.cc \
 	optimizing/constant_folding.cc \
 	optimizing/dead_code_elimination.cc \
 	optimizing/graph_checker.cc \
@@ -85,6 +84,7 @@
 	optimizing/optimization.cc \
 	optimizing/optimizing_compiler.cc \
 	optimizing/parallel_move_resolver.cc \
+	optimizing/pc_relative_fixups_x86.cc \
 	optimizing/prepare_for_register_allocation.cc \
 	optimizing/primitive_type_propagation.cc \
 	optimizing/reference_type_propagation.cc \
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ecbc3a2..bf3a865 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1552,7 +1552,7 @@
       *type = sharp_type;
     }
   } else {
-    auto* image_space = heap->GetImageSpace();
+    auto* image_space = heap->GetBootImageSpace();
     bool method_in_image = false;
     if (image_space != nullptr) {
       const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index a412a99..6bb22ed 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -237,7 +237,7 @@
   std::vector<uintptr_t> debug_line_patches;
   std::vector<uintptr_t> expected_patches { 87 };  // NOLINT
   WriteDebugLineTable(include_directories, files, opcodes,
-                      &debug_line_data_, &debug_line_patches);
+                      0, &debug_line_data_, &debug_line_patches);
 
   EXPECT_EQ(expected_patches, debug_line_patches);
   CheckObjdumpOutput(is64bit, "-W");
@@ -276,7 +276,7 @@
   std::vector<FileEntry> files { { "file.c", 0, 1000, 2000 } };  // NOLINT
   std::vector<uintptr_t> debug_line_patches;
   WriteDebugLineTable(directories, files, opcodes,
-                      &debug_line_data_, &debug_line_patches);
+                      0, &debug_line_data_, &debug_line_patches);
 
   CheckObjdumpOutput(is64bit, "-W -WL");
 }
@@ -332,7 +332,7 @@
   std::vector<uintptr_t> debug_info_patches;
   std::vector<uintptr_t> expected_patches { 16, 20, 29, 33, 42, 46 };  // NOLINT
   dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
-                          &debug_info_data_, &debug_info_patches);
+                          0, &debug_info_data_, &debug_info_patches);
 
   EXPECT_EQ(expected_patches, debug_info_patches);
   CheckObjdumpOutput(is64bit, "-W");
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index 883d756..633e2f7 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -126,6 +126,7 @@
 template<typename Vector>
 void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
                       const DebugInfoEntryWriter<Vector>& entries,
+                      size_t debug_info_offset,  // offset from start of .debug_info.
                       std::vector<uint8_t>* debug_info,
                       std::vector<uintptr_t>* debug_info_patches) {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
@@ -141,7 +142,7 @@
   writer.UpdateUint32(start, writer.data()->size() - start - 4);
   // Copy patch locations and make them relative to .debug_info section.
   for (uintptr_t patch_location : entries.GetPatchLocations()) {
-    debug_info_patches->push_back(entries_offset + patch_location);
+    debug_info_patches->push_back(debug_info_offset + entries_offset + patch_location);
   }
 }
 
@@ -157,6 +158,7 @@
 void WriteDebugLineTable(const std::vector<std::string>& include_directories,
                          const std::vector<FileEntry>& files,
                          const DebugLineOpCodeWriter<Vector>& opcodes,
+                         size_t debug_line_offset,  // offset from start of .debug_line.
                          std::vector<uint8_t>* debug_line,
                          std::vector<uintptr_t>* debug_line_patches) {
   static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
@@ -197,7 +199,7 @@
   writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4);
   // Copy patch locations and make them relative to .debug_line section.
   for (uintptr_t patch_location : opcodes.GetPatchLocations()) {
-    debug_line_patches->push_back(opcodes_offset + patch_location);
+    debug_line_patches->push_back(debug_line_offset + opcodes_offset + patch_location);
   }
 }
 
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 895dfcc..6e8dfd6 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -156,8 +156,13 @@
 
     // Returns the size of the content of this section.
     Elf_Word GetSize() const {
-      CHECK(finished_);
-      return header_.sh_size;
+      if (finished_) {
+        return header_.sh_size;
+      } else {
+        CHECK(started_);
+        CHECK_NE(header_.sh_type, (Elf_Word)SHT_NOBITS);
+        return owner_->Seek(0, kSeekCurrent) - header_.sh_offset;
+      }
     }
 
     // Set desired allocation size for .bss section.
@@ -281,6 +286,8 @@
       strtab_(this, ".strtab", 0, kPageSize),
       symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_),
       debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0),
+      debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
+      debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
       shstrtab_(this, ".shstrtab", 0, 1),
       virtual_address_(0) {
     text_.phdr_flags_ = PF_R | PF_X;
@@ -300,6 +307,8 @@
   Section* GetEhFrame() { return &eh_frame_; }
   Section* GetEhFrameHdr() { return &eh_frame_hdr_; }
   Section* GetDebugFrame() { return &debug_frame_; }
+  Section* GetDebugInfo() { return &debug_info_; }
+  Section* GetDebugLine() { return &debug_line_; }
 
   // Encode patch locations as LEB128 list of deltas between consecutive addresses.
   // (exposed publicly for tests)
@@ -667,6 +676,8 @@
   StringSection strtab_;
   SymbolSection symtab_;
   Section debug_frame_;
+  Section debug_info_;
+  Section debug_line_;
   StringSection shstrtab_;
   std::vector<std::unique_ptr<Section>> other_sections_;
 
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 7326233..e1ab340 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -250,84 +250,98 @@
 }
 
 template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
-                        const std::vector<OatWriter::DebugInfo>& method_infos) {
+class DebugInfoWriter {
   typedef typename ElfTypes::Addr Elf_Addr;
-  const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
-  Elf_Addr text_address = builder->GetText()->GetAddress();
 
-  // Find all addresses (low_pc) which contain deduped methods.
-  // The first instance of method is not marked deduped_, but the rest is.
-  std::unordered_set<uint32_t> deduped_addresses;
-  for (const OatWriter::DebugInfo& mi : method_infos) {
-    if (mi.deduped_) {
-      deduped_addresses.insert(mi.low_pc_);
-    }
+ public:
+  explicit DebugInfoWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
   }
 
-  // Group the methods into compilation units based on source file.
-  std::vector<std::vector<const OatWriter::DebugInfo*>> compilation_units;
-  const char* last_source_file = nullptr;
-  for (const OatWriter::DebugInfo& mi : method_infos) {
-    // Attribute given instruction range only to single method.
-    // Otherwise the debugger might get really confused.
-    if (!mi.deduped_) {
-      auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
-      const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
-      if (compilation_units.empty() || source_file != last_source_file) {
-        compilation_units.push_back(std::vector<const OatWriter::DebugInfo*>());
-      }
-      compilation_units.back().push_back(&mi);
-      last_source_file = source_file;
-    }
+  void Start() {
+    builder_->GetDebugInfo()->Start();
   }
 
-  // Write .debug_info section.
-  std::vector<uint8_t> debug_info;
-  std::vector<uintptr_t> debug_info_patches;
-  std::vector<uint8_t> debug_abbrev;
-  std::vector<uint8_t> debug_str;
-  std::vector<uint8_t> debug_line;
-  std::vector<uintptr_t> debug_line_patches;
-  for (const auto& compilation_unit : compilation_units) {
+  void Write(const std::vector<const OatWriter::DebugInfo*>& method_infos,
+             size_t debug_line_offset) {
+    const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+    const Elf_Addr text_address = builder_->GetText()->GetAddress();
     uint32_t cunit_low_pc = 0xFFFFFFFFU;
     uint32_t cunit_high_pc = 0;
-    for (auto method_info : compilation_unit) {
+    for (auto method_info : method_infos) {
       cunit_low_pc = std::min(cunit_low_pc, method_info->low_pc_);
       cunit_high_pc = std::max(cunit_high_pc, method_info->high_pc_);
     }
 
-    size_t debug_abbrev_offset = debug_abbrev.size();
-    DebugInfoEntryWriter<> info(is64bit, &debug_abbrev);
+    size_t debug_abbrev_offset = debug_abbrev_.size();
+    DebugInfoEntryWriter<> info(is64bit, &debug_abbrev_);
     info.StartTag(DW_TAG_compile_unit, DW_CHILDREN_yes);
-    info.WriteStrp(DW_AT_producer, "Android dex2oat", &debug_str);
+    info.WriteStrp(DW_AT_producer, "Android dex2oat", &debug_str_);
     info.WriteData1(DW_AT_language, DW_LANG_Java);
     info.WriteAddr(DW_AT_low_pc, text_address + cunit_low_pc);
     info.WriteAddr(DW_AT_high_pc, text_address + cunit_high_pc);
-    info.WriteData4(DW_AT_stmt_list, debug_line.size());
-    for (auto method_info : compilation_unit) {
+    info.WriteData4(DW_AT_stmt_list, debug_line_offset);
+    for (auto method_info : method_infos) {
       std::string method_name = PrettyMethod(method_info->dex_method_index_,
                                              *method_info->dex_file_, true);
-      if (deduped_addresses.find(method_info->low_pc_) != deduped_addresses.end()) {
-        method_name += " [DEDUPED]";
-      }
       info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
-      info.WriteStrp(DW_AT_name, method_name.data(), &debug_str);
+      info.WriteStrp(DW_AT_name, method_name.data(), &debug_str_);
       info.WriteAddr(DW_AT_low_pc, text_address + method_info->low_pc_);
       info.WriteAddr(DW_AT_high_pc, text_address + method_info->high_pc_);
       info.EndTag();  // DW_TAG_subprogram
     }
     info.EndTag();  // DW_TAG_compile_unit
-    WriteDebugInfoCU(debug_abbrev_offset, info, &debug_info, &debug_info_patches);
+    std::vector<uint8_t> buffer;
+    buffer.reserve(info.data()->size() + KB);
+    size_t offset = builder_->GetDebugInfo()->GetSize();
+    WriteDebugInfoCU(debug_abbrev_offset, info, offset, &buffer, &debug_info_patches_);
+    builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+  }
 
-    // Write .debug_line section.
+  void End() {
+    builder_->GetDebugInfo()->End();
+    builder_->WritePatches(".debug_info.oat_patches", &debug_info_patches_);
+    builder_->WriteSection(".debug_abbrev", &debug_abbrev_);
+    builder_->WriteSection(".debug_str", &debug_str_);
+  }
+
+ private:
+  ElfBuilder<ElfTypes>* builder_;
+  std::vector<uintptr_t> debug_info_patches_;
+  std::vector<uint8_t> debug_abbrev_;
+  std::vector<uint8_t> debug_str_;
+};
+
+template<typename ElfTypes>
+class DebugLineWriter {
+  typedef typename ElfTypes::Addr Elf_Addr;
+
+ public:
+  explicit DebugLineWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
+  }
+
+  void Start() {
+    builder_->GetDebugLine()->Start();
+  }
+
+  // Write line table for given set of methods.
+  // Returns the number of bytes written.
+  size_t Write(const std::vector<const OatWriter::DebugInfo*>& method_infos) {
+    const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+    const Elf_Addr text_address = builder_->GetText()->GetAddress();
+    uint32_t cunit_low_pc = 0xFFFFFFFFU;
+    uint32_t cunit_high_pc = 0;
+    for (auto method_info : method_infos) {
+      cunit_low_pc = std::min(cunit_low_pc, method_info->low_pc_);
+      cunit_high_pc = std::max(cunit_high_pc, method_info->high_pc_);
+    }
+
     std::vector<FileEntry> files;
     std::unordered_map<std::string, size_t> files_map;
     std::vector<std::string> directories;
     std::unordered_map<std::string, size_t> directories_map;
     int code_factor_bits_ = 0;
     int dwarf_isa = -1;
-    switch (builder->GetIsa()) {
+    switch (builder_->GetIsa()) {
       case kArm:  // arm actually means thumb2.
       case kThumb2:
         code_factor_bits_ = 1;  // 16-bit instuctions
@@ -348,7 +362,7 @@
     if (dwarf_isa != -1) {
       opcodes.SetISA(dwarf_isa);
     }
-    for (const OatWriter::DebugInfo* mi : compilation_unit) {
+    for (const OatWriter::DebugInfo* mi : method_infos) {
       struct DebugInfoCallbacks {
         static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
           auto* context = reinterpret_cast<DebugInfoCallbacks*>(ctx);
@@ -449,14 +463,70 @@
     }
     opcodes.AdvancePC(text_address + cunit_high_pc);
     opcodes.EndSequence();
-    WriteDebugLineTable(directories, files, opcodes, &debug_line, &debug_line_patches);
+    std::vector<uint8_t> buffer;
+    buffer.reserve(opcodes.data()->size() + KB);
+    size_t offset = builder_->GetDebugLine()->GetSize();
+    WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches);
+    builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size());
+    return buffer.size();
   }
-  builder->WriteSection(".debug_line", &debug_line);
-  builder->WritePatches(".debug_line.oat_patches", &debug_line_patches);
-  builder->WriteSection(".debug_info", &debug_info);
-  builder->WritePatches(".debug_info.oat_patches", &debug_info_patches);
-  builder->WriteSection(".debug_abbrev", &debug_abbrev);
-  builder->WriteSection(".debug_str", &debug_str);
+
+  void End() {
+    builder_->GetDebugLine()->End();
+    builder_->WritePatches(".debug_line.oat_patches", &debug_line_patches);
+  }
+
+ private:
+  ElfBuilder<ElfTypes>* builder_;
+  std::vector<uintptr_t> debug_line_patches;
+};
+
+template<typename ElfTypes>
+void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+                        const std::vector<OatWriter::DebugInfo>& method_infos) {
+  struct CompilationUnit {
+    std::vector<const OatWriter::DebugInfo*> methods_;
+    size_t debug_line_offset_ = 0;
+  };
+
+  // Group the methods into compilation units based on source file.
+  std::vector<CompilationUnit> compilation_units;
+  const char* last_source_file = nullptr;
+  for (const OatWriter::DebugInfo& mi : method_infos) {
+    // Attribute given instruction range only to single method.
+    // Otherwise the debugger might get really confused.
+    if (!mi.deduped_) {
+      auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
+      const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
+      if (compilation_units.empty() || source_file != last_source_file) {
+        compilation_units.push_back(CompilationUnit());
+      }
+      compilation_units.back().methods_.push_back(&mi);
+      last_source_file = source_file;
+    }
+  }
+
+  // Write .debug_line section.
+  {
+    DebugLineWriter<ElfTypes> line_writer(builder);
+    line_writer.Start();
+    size_t offset = 0;
+    for (auto& compilation_unit : compilation_units) {
+      compilation_unit.debug_line_offset_ = offset;
+      offset += line_writer.Write(compilation_unit.methods_);
+    }
+    line_writer.End();
+  }
+
+  // Write .debug_info section.
+  {
+    DebugInfoWriter<ElfTypes> info_writer(builder);
+    info_writer.Start();
+    for (const auto& compilation_unit : compilation_units) {
+      info_writer.Write(compilation_unit.methods_, compilation_unit.debug_line_offset_);
+    }
+    info_writer.End();
+  }
 }
 
 // Explicit instantiations
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a38e1f5..6df1527 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -181,7 +181,7 @@
   ASSERT_TRUE(heap->HasImageSpace());
   ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
 
-  gc::space::ImageSpace* image_space = heap->GetImageSpace();
+  gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
   ASSERT_TRUE(image_space != nullptr);
   ASSERT_LE(image_space->Size(), image_file_size);
 
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index dd2ea9e..3f18d9a 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1589,7 +1589,7 @@
   // If we are compiling an app image, we need to use the stubs of the boot image.
   if (compile_app_image_) {
     // Use the current image pointers.
-    gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+    gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
     DCHECK(image_space != nullptr);
     const OatFile* oat_file = image_space->GetOatFile();
     CHECK(oat_file != nullptr);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 3f2271e..40a3f14 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -899,7 +899,7 @@
       // NOTE: We're using linker patches for app->boot references when the image can
       // be relocated and therefore we need to emit .oat_patches. We're not using this
       // for app->app references, so check that the method is an image method.
-      gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+      gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
       size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
       CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
     }
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ce92470..5188e11 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -208,6 +208,7 @@
 void CodeGenerator::GenerateSlowPaths() {
   size_t code_start = 0;
   for (SlowPathCode* slow_path : slow_paths_) {
+    current_slow_path_ = slow_path;
     if (disasm_info_ != nullptr) {
       code_start = GetAssembler()->CodeSize();
     }
@@ -216,6 +217,7 @@
       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
     }
   }
+  current_slow_path_ = nullptr;
 }
 
 void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a92014d..2108abe 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -490,6 +490,7 @@
         compiler_options_(compiler_options),
         src_map_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+        current_slow_path_(nullptr),
         current_block_index_(0),
         is_leaf_(true),
         requires_current_method_(false) {
@@ -557,6 +558,10 @@
     return raw_pointer_to_labels_array + block->GetBlockId();
   }
 
+  SlowPathCode* GetCurrentSlowPath() {
+    return current_slow_path_;
+  }
+
   // Frame size required for this method.
   uint32_t frame_size_;
   uint32_t core_spill_mask_;
@@ -605,6 +610,9 @@
   ArenaVector<SrcMapElem> src_map_;
   ArenaVector<SlowPathCode*> slow_paths_;
 
+  // The current slow path that we're generating code for.
+  SlowPathCode* current_slow_path_;
+
   // The current block index in `block_order_` of the block
   // we are generating code for.
   size_t current_block_index_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e30aa6e..54c6cc8 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2694,7 +2694,7 @@
     case Primitive::kPrimInt: {
       if (div->InputAt(1)->IsConstant()) {
         locations->SetInAt(0, Location::RequiresRegister());
-        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
+        locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
         locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
         int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
         if (abs_imm <= 1) {
@@ -2818,7 +2818,7 @@
     case Primitive::kPrimInt: {
       if (rem->InputAt(1)->IsConstant()) {
         locations->SetInAt(0, Location::RequiresRegister());
-        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
+        locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
         locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
         int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
         if (abs_imm <= 1) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b0be446..7e248b4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -598,7 +598,7 @@
       call_patches_(MethodReferenceComparator(),
                     graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+      pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Save the link register (containing the return address) to mimic Quick.
   AddAllocatedRegister(LocationFrom(lr));
 }
@@ -2872,21 +2872,21 @@
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       // Add ADRP with its PC-relative DexCache access patch.
-      pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                             invoke->GetDexCacheArrayOffset());
-      vixl::Label* pc_insn_label = &pc_rel_dex_cache_patches_.back().label;
+      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+                                                  invoke->GetDexCacheArrayOffset());
+      vixl::Label* pc_insn_label = &pc_relative_dex_cache_patches_.back().label;
       {
         vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
         __ adrp(XRegisterFrom(temp).X(), 0);
       }
       __ Bind(pc_insn_label);  // Bind after ADRP.
-      pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
+      pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
       // Add LDR with its PC-relative DexCache access patch.
-      pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                             invoke->GetDexCacheArrayOffset());
+      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+                                                  invoke->GetDexCacheArrayOffset());
       __ Ldr(XRegisterFrom(temp).X(), MemOperand(XRegisterFrom(temp).X(), 0));
-      __ Bind(&pc_rel_dex_cache_patches_.back().label);  // Bind after LDR.
-      pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
+      __ Bind(&pc_relative_dex_cache_patches_.back().label);  // Bind after LDR.
+      pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -2973,7 +2973,7 @@
       method_patches_.size() +
       call_patches_.size() +
       relative_call_patches_.size() +
-      pc_rel_dex_cache_patches_.size();
+      pc_relative_dex_cache_patches_.size();
   linker_patches->reserve(size);
   for (const auto& entry : method_patches_) {
     const MethodReference& target_method = entry.first;
@@ -2994,7 +2994,7 @@
                                                              info.target_method.dex_file,
                                                              info.target_method.dex_method_index));
   }
-  for (const PcRelativeDexCacheAccessInfo& info : pc_rel_dex_cache_patches_) {
+  for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
     linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location() - 4u,
                                                               &info.target_dex_file,
                                                               info.pc_insn_label->location() - 4u,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ab684ea..aa5ad38 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -447,7 +447,7 @@
   // Using ArenaDeque<> which retains element addresses on push/emplace_back().
   ArenaDeque<MethodPatchInfo<vixl::Label>> relative_call_patches_;
   // PC-relative DexCache access info.
-  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_rel_dex_cache_patches_;
+  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
 };
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b36a042..9b78dec 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2977,7 +2977,7 @@
   CodeGenerator::CreateLoadClassLocationSummary(
       cls,
       Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-      Location::RegisterLocation(A0));
+      calling_convention.GetReturnLocation(cls->GetType()));
 }
 
 void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 58c6e0f..ac3162f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -119,9 +119,12 @@
   Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
     return Location::RegisterLocation(V0);
   }
-  Location GetSetValueLocation(
-      Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
-    return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterLocation(A2)
+        : (is_instance
+            ? Location::RegisterLocation(A2)
+            : Location::RegisterLocation(A1));
   }
   Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
     return Location::FpuRegisterLocation(F0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8308d9e..0147b01 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -19,7 +19,6 @@
 #include "art_method.h"
 #include "code_generator_utils.h"
 #include "compiled_method.h"
-#include "constant_area_fixups_x86.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
@@ -27,6 +26,7 @@
 #include "intrinsics_x86.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
+#include "pc_relative_fixups_x86.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/stack_checks.h"
@@ -533,6 +533,7 @@
       isa_features_(isa_features),
       method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Use a fake return address register to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
@@ -1696,11 +1697,20 @@
 
   IntrinsicLocationsBuilderX86 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
+    if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
+      invoke->GetLocations()->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::Any());
+    }
     return;
   }
 
   HandleInvoke(invoke);
 
+  // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
+  if (invoke->HasPcRelativeDexCache()) {
+    invoke->GetLocations()->SetInAt(invoke->GetCurrentMethodInputIndex(),
+                                    Location::RequiresRegister());
+  }
+
   if (codegen_->IsBaseline()) {
     // Baseline does not have enough registers if the current method also
     // needs a register. We therefore do not require a register for it, and let
@@ -3014,7 +3024,7 @@
       DCHECK_EQ(EAX, first.AsRegister<Register>());
       DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
 
-      if (instruction->InputAt(1)->IsIntConstant()) {
+      if (second.IsConstant()) {
         int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
 
         if (imm == 0) {
@@ -3779,16 +3789,6 @@
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
       MethodReference target_method ATTRIBUTE_UNUSED) {
-  if (desired_dispatch_info.method_load_kind ==
-      HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
-    // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
-    return HInvokeStaticOrDirect::DispatchInfo {
-      HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
-      HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-      0u,
-      0u
-    };
-  }
   switch (desired_dispatch_info.code_ptr_location) {
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
@@ -3805,6 +3805,32 @@
   }
 }
 
+Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
+                                                                 Register temp) {
+  DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+  Location location = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+  if (!invoke->GetLocations()->Intrinsified()) {
+    return location.AsRegister<Register>();
+  }
+  // For intrinsics we allow any location, so it may be on the stack.
+  if (!location.IsRegister()) {
+    __ movl(temp, Address(ESP, location.GetStackIndex()));
+    return temp;
+  }
+  // For register locations, check if the register was saved. If so, get it from the stack.
+  // Note: There is a chance that the register was saved but not overwritten, so we could
+  // save one load. However, since this is just an intrinsic slow path we prefer this
+  // simple and more robust approach rather that trying to determine if that's the case.
+  SlowPathCode* slow_path = GetCurrentSlowPath();
+  DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path.
+  if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+    int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+    __ movl(temp, Address(ESP, stack_offset));
+    return temp;
+  }
+  return location.AsRegister<Register>();
+}
+
 void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   switch (invoke->GetMethodLoadKind()) {
@@ -3823,11 +3849,16 @@
       method_patches_.emplace_back(invoke->GetTargetMethod());
       __ Bind(&method_patches_.back().label);  // Bind the label at the end of the "movl" insn.
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
-      // TODO: Implement this type.
-      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
-      LOG(FATAL) << "Unsupported";
-      UNREACHABLE();
+    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+      Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
+                                                                temp.AsRegister<Register>());
+      uint32_t offset = invoke->GetDexCacheArrayOffset();
+      __ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
+      // Add the patch entry and bind its label at the end of the instruction.
+      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file, offset);
+      __ Bind(&pc_relative_dex_cache_patches_.back().label);
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
       Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
       Register method_reg;
@@ -3898,23 +3929,33 @@
 
 void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
-  linker_patches->reserve(method_patches_.size() + relative_call_patches_.size());
+  size_t size =
+      method_patches_.size() +
+      relative_call_patches_.size() +
+      pc_relative_dex_cache_patches_.size();
+  linker_patches->reserve(size);
+  // The label points to the end of the "movl" insn but the literal offset for method
+  // patch needs to point to the embedded constant which occupies the last 4 bytes.
+  constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
   for (const MethodPatchInfo<Label>& info : method_patches_) {
-    // The label points to the end of the "movl" insn but the literal offset for method
-    // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
-    uint32_t literal_offset = info.label.Position() - 4;
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
     linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
                                                        info.target_method.dex_file,
                                                        info.target_method.dex_method_index));
   }
   for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
-    // The label points to the end of the "call" insn but the literal offset for method
-    // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
-    uint32_t literal_offset = info.label.Position() - 4;
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
     linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
                                                              info.target_method.dex_file,
                                                              info.target_method.dex_method_index));
   }
+  for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+    linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
+                                                              &info.target_dex_file,
+                                                              GetMethodAddressOffset(),
+                                                              info.element_offset));
+  }
 }
 
 void CodeGeneratorX86::MarkGCCard(Register temp,
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ac3d06c..177a059 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -398,6 +398,18 @@
   void Finalize(CodeAllocator* allocator) OVERRIDE;
 
  private:
+  Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
+
+  struct PcRelativeDexCacheAccessInfo {
+    PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
+        : target_dex_file(dex_file), element_offset(element_off), label() { }
+
+    const DexFile& target_dex_file;
+    uint32_t element_offset;
+    // NOTE: Label is bound to the end of the instruction that has an embedded 32-bit offset.
+    Label label;
+  };
+
   // Labels for each block that will be compiled.
   Label* block_labels_;  // Indexed by block id.
   Label frame_entry_label_;
@@ -410,6 +422,8 @@
   // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
   ArenaDeque<MethodPatchInfo<Label>> method_patches_;
   ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+  // PC-relative DexCache access info.
+  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
 
   // Offset to the start of the constant area in the assembled code.
   // Used for fixups to the constant area.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ee8a299..e2ad667 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -514,12 +514,12 @@
       __ Bind(&method_patches_.back().label);  // Bind the label at the end of the "movl" insn.
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
-      pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                             invoke->GetDexCacheArrayOffset());
+      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+                                                  invoke->GetDexCacheArrayOffset());
       __ movq(temp.AsRegister<CpuRegister>(),
               Address::Absolute(kDummy32BitOffset, false /* no_rip */));
       // Bind the label at the end of the "movl" insn.
-      __ Bind(&pc_rel_dex_cache_patches_.back().label);
+      __ Bind(&pc_relative_dex_cache_patches_.back().label);
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
       Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
@@ -593,28 +593,27 @@
 void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() + relative_call_patches_.size() + pc_rel_dex_cache_patches_.size();
+      method_patches_.size() +
+      relative_call_patches_.size() +
+      pc_relative_dex_cache_patches_.size();
   linker_patches->reserve(size);
+  // The label points to the end of the "movl" insn but the literal offset for method
+  // patch needs to point to the embedded constant which occupies the last 4 bytes.
+  constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
   for (const MethodPatchInfo<Label>& info : method_patches_) {
-    // The label points to the end of the "movl" instruction but the literal offset for method
-    // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
-    uint32_t literal_offset = info.label.Position() - 4;
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
     linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
                                                        info.target_method.dex_file,
                                                        info.target_method.dex_method_index));
   }
   for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
-    // The label points to the end of the "call" instruction but the literal offset for method
-    // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
-    uint32_t literal_offset = info.label.Position() - 4;
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
     linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
                                                              info.target_method.dex_file,
                                                              info.target_method.dex_method_index));
   }
-  for (const PcRelativeDexCacheAccessInfo& info : pc_rel_dex_cache_patches_) {
-    // The label points to the end of the "mov" instruction but the literal offset for method
-    // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
-    uint32_t literal_offset = info.label.Position() - 4;
+  for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
+    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
     linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
                                                               &info.target_dex_file,
                                                               info.label.Position(),
@@ -695,7 +694,7 @@
         constant_area_start_(0),
         method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-        pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+        pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
 }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 7a52473..7084508 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -401,7 +401,7 @@
   ArenaDeque<MethodPatchInfo<Label>> method_patches_;
   ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
   // PC-relative DexCache access info.
-  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_rel_dex_cache_patches_;
+  ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 716888a..73a44ee 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2048,6 +2048,16 @@
   return !opt.GetDoesNotNeedDexCache();
 }
 
+void HInvokeStaticOrDirect::InsertInputAt(size_t index, HInstruction* input) {
+  inputs_.insert(inputs_.begin() + index, HUserRecord<HInstruction*>(input));
+  input->AddUseAt(this, index);
+  // Update indexes in use nodes of inputs that have been pushed further back by the insert().
+  for (size_t i = index + 1u, size = inputs_.size(); i != size; ++i) {
+    DCHECK_EQ(InputRecordAt(i).GetUseNode()->GetIndex(), i - 1u);
+    InputRecordAt(i).GetUseNode()->SetIndex(i);
+  }
+}
+
 void HInvokeStaticOrDirect::RemoveInputAt(size_t index) {
   RemoveAsUserOfInput(index);
   inputs_.erase(inputs_.begin() + index);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 79b79d7..2878ac9 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3440,6 +3440,7 @@
     dispatch_info_ = dispatch_info;
   }
 
+  void InsertInputAt(size_t index, HInstruction* input);
   void RemoveInputAt(size_t index);
 
   bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -3460,7 +3461,7 @@
   bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
   uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
   bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
-  bool HasPcRelDexCache() const {
+  bool HasPcRelativeDexCache() const {
     return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
   }
   bool HasCurrentMethodInput() const {
@@ -3488,7 +3489,7 @@
   }
 
   uint32_t GetDexCacheArrayOffset() const {
-    DCHECK(HasPcRelDexCache());
+    DCHECK(HasPcRelativeDexCache());
     return dispatch_info_.method_load_data;
   }
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 368a30e..2be0680 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -24,7 +24,7 @@
 #endif
 
 #ifdef ART_ENABLE_CODEGEN_x86
-#include "constant_area_fixups_x86.h"
+#include "pc_relative_fixups_x86.h"
 #endif
 
 #include "art_method-inl.h"
@@ -435,10 +435,9 @@
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case kX86: {
-      x86::ConstantAreaFixups* constant_area_fixups =
-          new (arena) x86::ConstantAreaFixups(graph, stats);
+      x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats);
       HOptimization* x86_optimizations[] = {
-        constant_area_fixups
+          pc_relative_fixups
       };
       RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
       break;
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
similarity index 75%
rename from compiler/optimizing/constant_area_fixups_x86.cc
rename to compiler/optimizing/pc_relative_fixups_x86.cc
index c347000..c2894c7 100644
--- a/compiler/optimizing/constant_area_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "constant_area_fixups_x86.h"
+#include "pc_relative_fixups_x86.h"
 
 namespace art {
 namespace x86 {
@@ -22,9 +22,9 @@
 /**
  * Finds instructions that need the constant area base as an input.
  */
-class ConstantHandlerVisitor : public HGraphVisitor {
+class PCRelativeHandlerVisitor : public HGraphVisitor {
  public:
-  explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+  explicit PCRelativeHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
 
  private:
   void VisitAdd(HAdd* add) OVERRIDE {
@@ -72,7 +72,7 @@
   void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
     // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
     // address the constant area.
-    InitializeConstantAreaPointer(switch_insn);
+    InitializePCRelativeBasePointer(switch_insn);
     HGraph* graph = GetGraph();
     HBasicBlock* block = switch_insn->GetBlock();
     HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
@@ -84,7 +84,7 @@
     block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
   }
 
-  void InitializeConstantAreaPointer(HInstruction* user) {
+  void InitializePCRelativeBasePointer(HInstruction* user) {
     // Ensure we only initialize the pointer once.
     if (base_ != nullptr) {
       return;
@@ -99,16 +99,24 @@
   }
 
   void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
-    InitializeConstantAreaPointer(insn);
-    HGraph* graph = GetGraph();
-    HBasicBlock* block = insn->GetBlock();
+    InitializePCRelativeBasePointer(insn);
     HX86LoadFromConstantTable* load_constant =
-        new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
-    block->InsertInstructionBefore(load_constant, insn);
+        new (GetGraph()->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+    insn->GetBlock()->InsertInstructionBefore(load_constant, insn);
     insn->ReplaceInput(load_constant, input_index);
   }
 
   void HandleInvoke(HInvoke* invoke) {
+    // If this is an invoke-static/-direct with PC-relative dex cache array
+    // addressing, we need the PC-relative address base.
+    HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+    if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasPcRelativeDexCache()) {
+      InitializePCRelativeBasePointer(invoke);
+      // Add the extra parameter base_.
+      uint32_t index = invoke_static_or_direct->GetCurrentMethodInputIndex();
+      DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
+      invoke_static_or_direct->InsertInputAt(index, base_);
+    }
     // Ensure that we can load FP arguments from the constant area.
     for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
       HConstant* input = invoke->InputAt(i)->AsConstant();
@@ -123,8 +131,8 @@
   HX86ComputeBaseMethodAddress* base_;
 };
 
-void ConstantAreaFixups::Run() {
-  ConstantHandlerVisitor visitor(graph_);
+void PcRelativeFixups::Run() {
+  PCRelativeHandlerVisitor visitor(graph_);
   visitor.VisitInsertionOrder();
 }
 
diff --git a/compiler/optimizing/constant_area_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
similarity index 67%
rename from compiler/optimizing/constant_area_fixups_x86.h
rename to compiler/optimizing/pc_relative_fixups_x86.h
index 4138039..af708ac 100644
--- a/compiler/optimizing/constant_area_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
-#define ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#ifndef ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
+#define ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
 
 #include "nodes.h"
 #include "optimization.h"
@@ -23,10 +23,10 @@
 namespace art {
 namespace x86 {
 
-class ConstantAreaFixups : public HOptimization {
+class PcRelativeFixups : public HOptimization {
  public:
-  ConstantAreaFixups(HGraph* graph, OptimizingCompilerStats* stats)
-      : HOptimization(graph, "constant_area_fixups_x86", stats) {}
+  PcRelativeFixups(HGraph* graph, OptimizingCompilerStats* stats)
+      : HOptimization(graph, "pc_relative_fixups_x86", stats) {}
 
   void Run() OVERRIDE;
 };
@@ -34,4 +34,4 @@
 }  // namespace x86
 }  // namespace art
 
-#endif  // ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#endif  // ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 92ed58c..68cf6d9 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1357,7 +1357,7 @@
       int32_t image_patch_delta = 0;
 
       if (app_image_ && image_base_ == 0) {
-        gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+        gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
         image_base_ = RoundUp(
             reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
             kPageSize);
@@ -1370,7 +1370,7 @@
 
       if (!IsBootImage()) {
         TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
-        gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+        gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
         image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
         image_file_location_oat_data_begin =
             reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 304d4e5..5e71053 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -814,7 +814,7 @@
 
   static const ImageHeader& GetBootImageHeader() {
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    gc::space::ImageSpace* image_space = heap->GetImageSpace();
+    gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
     CHECK(image_space != nullptr);
     const ImageHeader& image_header = image_space->GetImageHeader();
     return image_header;
@@ -838,7 +838,7 @@
                      std::ostream* os, pid_t image_diff_pid) {
   ScopedObjectAccess soa(Thread::Current());
   gc::Heap* heap = runtime->GetHeap();
-  gc::space::ImageSpace* image_space = heap->GetImageSpace();
+  gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
   CHECK(image_space != nullptr);
   const ImageHeader& image_header = image_space->GetImageHeader();
   if (!image_header.IsValid()) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 82bc8b9..0d6a8c9 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -42,7 +42,7 @@
     CommonRuntimeTest::SetUp();
 
     // We loaded the runtime with an explicit image. Therefore the image space must exist.
-    gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+    gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
     ASSERT_TRUE(image_space != nullptr);
     boot_image_location_ = image_space->GetImageLocation();
   }
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a163380..5a060af 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2374,7 +2374,7 @@
 
   ScopedObjectAccess soa(Thread::Current());
   gc::Heap* heap = runtime->GetHeap();
-  gc::space::ImageSpace* image_space = heap->GetImageSpace();
+  gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
   CHECK(image_space != nullptr);
   const ImageHeader& image_header = image_space->GetImageHeader();
   if (!image_header.IsValid()) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 64b4722..c587f68 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -189,7 +189,7 @@
     LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
     return false;
   }
-  gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+  gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
 
   PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings);
   t.NewTiming("Patching files");
@@ -288,7 +288,7 @@
     LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
     return false;
   }
-  gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+  gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
 
   std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat,
                                              PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index ace6c38..36334c4 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -288,6 +288,11 @@
   DISALLOW_COPY_AND_ASSIGN(ArenaPool);
 };
 
+// Fast single-threaded allocator for zero-initialized memory chunks.
+//
+// Memory is allocated from ArenaPool in large chunks and then rationed through
+// the ArenaAllocator. It's returned to the ArenaPool only when the ArenaAllocator
+// is destroyed.
 class ArenaAllocator
     : private DebugStackRefCounter, private ArenaAllocatorStats, private ArenaAllocatorMemoryTool {
  public:
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index a30c73d..a87153b 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -42,6 +42,7 @@
 static constexpr size_t kArenaAlignment = 8;
 
 // Holds a list of Arenas for use by ScopedArenaAllocator stack.
+// The memory is returned to the ArenaPool when the ArenaStack is destroyed.
 class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
  public:
   explicit ArenaStack(ArenaPool* arena_pool);
@@ -121,6 +122,12 @@
   DISALLOW_COPY_AND_ASSIGN(ArenaStack);
 };
 
+// Fast single-threaded allocator. Allocated chunks are _not_ guaranteed to be zero-initialized.
+//
+// Unlike the ArenaAllocator, ScopedArenaAllocator is intended for relatively short-lived
+// objects and allows nesting multiple allocators. Only the top allocator can be used but
+// once it's destroyed, its memory can be reused by the next ScopedArenaAllocator on the
+// stack. This is facilitated by returning the memory to the ArenaStack.
 class ScopedArenaAllocator
     : private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
  public:
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f252c1e..f649972 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -303,7 +303,7 @@
 ClassLinker::ClassLinker(InternTable* intern_table)
     // dex_lock_ is recursive as it may be used in stack dumping.
     : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
-      dex_cache_image_class_lookup_required_(false),
+      dex_cache_boot_image_class_lookup_required_(false),
       failed_dex_cache_class_lookups_(0),
       class_roots_(nullptr),
       array_iftable_(nullptr),
@@ -794,7 +794,7 @@
       CHECK_EQ(field.GetDeclaringClass(), klass);
     }
     auto* runtime = Runtime::Current();
-    auto* image_space = runtime->GetHeap()->GetImageSpace();
+    auto* image_space = runtime->GetHeap()->GetBootImageSpace();
     auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
     for (auto& m : klass->GetDirectMethods(pointer_size)) {
       SanityCheckArtMethod(&m, klass, image_space);
@@ -855,10 +855,10 @@
   Runtime* const runtime = Runtime::Current();
   Thread* const self = Thread::Current();
   gc::Heap* const heap = runtime->GetHeap();
-  gc::space::ImageSpace* const space = heap->GetImageSpace();
+  gc::space::ImageSpace* const space = heap->GetBootImageSpace();
   CHECK(space != nullptr);
   image_pointer_size_ = space->GetImageHeader().GetPointerSize();
-  dex_cache_image_class_lookup_required_ = true;
+  dex_cache_boot_image_class_lookup_required_ = true;
   const OatFile* oat_file = runtime->GetOatFileManager().RegisterImageOatFile(space);
   DCHECK(oat_file != nullptr);
   CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
@@ -1086,8 +1086,8 @@
 }
 
 void ClassLinker::VisitClasses(ClassVisitor* visitor) {
-  if (dex_cache_image_class_lookup_required_) {
-    MoveImageClassesToClassTable();
+  if (dex_cache_boot_image_class_lookup_required_) {
+    AddBootImageClassesToClassTable();
   }
   Thread* const self = Thread::Current();
   ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2643,11 +2643,13 @@
   if (existing != nullptr) {
     return existing;
   }
-  if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr &&
-      dex_cache_image_class_lookup_required_) {
+  if (kIsDebugBuild &&
+      !klass->IsTemp() &&
+      class_loader == nullptr &&
+      dex_cache_boot_image_class_lookup_required_) {
     // Check a class loaded with the system class loader matches one in the image if the class
     // is in the image.
-    existing = LookupClassFromImage(descriptor);
+    existing = LookupClassFromBootImage(descriptor);
     if (existing != nullptr) {
       CHECK_EQ(klass, existing);
     }
@@ -2691,11 +2693,11 @@
       }
     }
   }
-  if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) {
+  if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
     return nullptr;
   }
   // Lookup failed but need to search dex_caches_.
-  mirror::Class* result = LookupClassFromImage(descriptor);
+  mirror::Class* result = LookupClassFromBootImage(descriptor);
   if (result != nullptr) {
     result = InsertClass(descriptor, result, hash);
   } else {
@@ -2704,37 +2706,43 @@
     // classes into the class table.
     constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
     if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
-      MoveImageClassesToClassTable();
+      AddBootImageClassesToClassTable();
     }
   }
   return result;
 }
 
-static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
+static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches(gc::space::ImageSpace* image_space)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
-  CHECK(image != nullptr);
-  mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+  CHECK(image_space != nullptr);
+  mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+  DCHECK(root != nullptr);
   return root->AsObjectArray<mirror::DexCache>();
 }
 
-void ClassLinker::MoveImageClassesToClassTable() {
+void ClassLinker::AddBootImageClassesToClassTable() {
+  if (dex_cache_boot_image_class_lookup_required_) {
+    AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpace(),
+                                /*class_loader*/nullptr);
+    dex_cache_boot_image_class_lookup_required_ = false;
+  }
+}
+
+void ClassLinker::AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+                                              mirror::ClassLoader* class_loader) {
   Thread* self = Thread::Current();
   WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
-  if (!dex_cache_image_class_lookup_required_) {
-    return;  // All dex cache classes are already in the class table.
-  }
   ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table");
-  mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
+  mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(image_space);
   std::string temp;
-  ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
+  ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
   for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
     mirror::DexCache* dex_cache = dex_caches->Get(i);
     GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
     for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
       mirror::Class* klass = types[j].Read();
       if (klass != nullptr) {
-        DCHECK(klass->GetClassLoader() == nullptr);
+        DCHECK_EQ(klass->GetClassLoader(), class_loader);
         const char* descriptor = klass->GetDescriptor(&temp);
         size_t hash = ComputeModifiedUtf8Hash(descriptor);
         mirror::Class* existing = class_table->Lookup(descriptor, hash);
@@ -2750,7 +2758,6 @@
       }
     }
   }
-  dex_cache_image_class_lookup_required_ = false;
 }
 
 class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
@@ -2774,9 +2781,10 @@
   VisitClassLoaders(&visitor);
 }
 
-mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
+mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
   ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
-  mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
+  mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(
+      Runtime::Current()->GetHeap()->GetBootImageSpace());
   for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
     mirror::DexCache* dex_cache = dex_caches->Get(i);
     const DexFile* dex_file = dex_cache->GetDexFile();
@@ -2818,8 +2826,8 @@
 
 void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
   result.clear();
-  if (dex_cache_image_class_lookup_required_) {
-    MoveImageClassesToClassTable();
+  if (dex_cache_boot_image_class_lookup_required_) {
+    AddBootImageClassesToClassTable();
   }
   Thread* const self = Thread::Current();
   ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -4142,10 +4150,10 @@
         Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
       }
       CHECK_EQ(existing, klass.Get());
-      if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) {
+      if (kIsDebugBuild && class_loader == nullptr && dex_cache_boot_image_class_lookup_required_) {
         // Check a class loaded with the system class loader matches one in the image if the class
         // is in the image.
-        mirror::Class* const image_class = LookupClassFromImage(descriptor);
+        mirror::Class* const image_class = LookupClassFromBootImage(descriptor);
         if (image_class != nullptr) {
           CHECK_EQ(klass.Get(), existing) << descriptor;
         }
@@ -6423,8 +6431,8 @@
 
 void ClassLinker::DumpForSigQuit(std::ostream& os) {
   ScopedObjectAccess soa(Thread::Current());
-  if (dex_cache_image_class_lookup_required_) {
-    MoveImageClassesToClassTable();
+  if (dex_cache_boot_image_class_lookup_required_) {
+    AddBootImageClassesToClassTable();
   }
   ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
   os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
@@ -6461,8 +6469,8 @@
 }
 
 size_t ClassLinker::NumLoadedClasses() {
-  if (dex_cache_image_class_lookup_required_) {
-    MoveImageClassesToClassTable();
+  if (dex_cache_boot_image_class_lookup_required_) {
+    AddBootImageClassesToClassTable();
   }
   ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
   // Only return non zygote classes since these are the ones which apps which care about.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d6a27b1..21f9e7b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -487,10 +487,17 @@
     return class_roots;
   }
 
-  // Move all of the image classes into the class table for faster lookups.
-  void MoveImageClassesToClassTable()
+  // Move all of the boot image classes into the class table for faster lookups.
+  void AddBootImageClassesToClassTable()
       REQUIRES(!Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Add image classes to the class table.
+  void AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+                                   mirror::ClassLoader* class_loader)
+      REQUIRES(!Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
   // that no more classes are ever added to the pre zygote table which makes it that the pages
   // always remain shared dirty instead of private dirty.
@@ -909,7 +916,7 @@
   void EnsurePreverifiedMethods(Handle<mirror::Class> c)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::Class* LookupClassFromImage(const char* descriptor)
+  mirror::Class* LookupClassFromBootImage(const char* descriptor)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns null if not found.
@@ -973,8 +980,8 @@
   // New class roots, only used by CMS since the GC needs to mark these in the pause.
   std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
 
-  // Do we need to search dex caches to find image classes?
-  bool dex_cache_image_class_lookup_required_;
+  // Do we need to search dex caches to find boot image classes?
+  bool dex_cache_boot_image_class_lookup_required_;
   // Number of times we've searched dex caches for a class. After a certain number of misses we move
   // the classes into the class_table_ to avoid dex cache based searches.
   Atomic<uint32_t> failed_dex_cache_class_lookups_;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 1361f7b..8f7bb94 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -487,7 +487,7 @@
 
 // Mark all references to the alloc space(s).
 void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
-  auto* image_space = heap_->GetImageSpace();
+  auto* image_space = heap_->GetBootImageSpace();
   // If we don't have an image space, just pass in space_ as the immune space. Pass in the same
   // space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
   CardBitVisitor bit_visitor(visitor, space_, image_space != nullptr ? image_space : space_,
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 4a49712..f4cf3ae 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -23,7 +23,7 @@
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/reference_processor.h"
 #include "gc/space/image_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
 #include "intern_table.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
@@ -358,13 +358,17 @@
     // scan the image objects from roots by relying on the card table,
     // but it's necessary for the RB to-space invariant to hold.
     TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
-    gc::space::ImageSpace* image = heap_->GetImageSpace();
-    if (image != nullptr) {
-      mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
-      mirror::Object* marked_image_root = Mark(image_root);
-      CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
-      if (ReadBarrier::kEnableToSpaceInvariantChecks) {
-        AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
+    for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
+      if (space->IsImageSpace()) {
+        gc::space::ImageSpace* image = space->AsImageSpace();
+        if (image != nullptr) {
+          mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
+          mirror::Object* marked_image_root = Mark(image_root);
+          CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
+          if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+            AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
+          }
+        }
       }
     }
   }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index fd2426b..da9a79e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -233,7 +233,8 @@
       backtrace_lock_(nullptr),
       seen_backtrace_count_(0u),
       unique_backtrace_count_(0u),
-      gc_disabled_for_shutdown_(false) {
+      gc_disabled_for_shutdown_(false),
+      boot_image_space_(nullptr) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -262,15 +263,16 @@
   if (!image_file_name.empty()) {
     ATRACE_BEGIN("ImageSpace::Create");
     std::string error_msg;
-    auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
+    boot_image_space_ = space::ImageSpace::Create(image_file_name.c_str(),
+                                                  image_instruction_set,
                                                   &error_msg);
     ATRACE_END();
-    if (image_space != nullptr) {
-      AddSpace(image_space);
+    if (boot_image_space_ != nullptr) {
+      AddSpace(boot_image_space_);
       // Oat files referenced by image files immediately follow them in memory, ensure alloc space
       // isn't going to get in the middle
-      uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
-      CHECK_GT(oat_file_end_addr, image_space->End());
+      uint8_t* oat_file_end_addr = boot_image_space_->GetImageHeader().GetOatFileEnd();
+      CHECK_GT(oat_file_end_addr, boot_image_space_->End());
       requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
     } else {
       LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
@@ -454,11 +456,11 @@
     rb_table_.reset(new accounting::ReadBarrierTable());
     DCHECK(rb_table_->IsAllCleared());
   }
-  if (GetImageSpace() != nullptr) {
+  if (GetBootImageSpace() != nullptr) {
     // Don't add the image mod union table if we are running without an image, this can crash if
     // we use the CardCache implementation.
     accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
-        "Image mod-union table", this, GetImageSpace());
+        "Image mod-union table", this, GetBootImageSpace());
     CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
     AddModUnionTable(mod_union_table);
   }
@@ -523,12 +525,12 @@
       garbage_collectors_.push_back(mark_compact_collector_);
     }
   }
-  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
+  if (GetBootImageSpace() != nullptr && non_moving_space_ != nullptr &&
       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
     // Check that there's no gap between the image space and the non moving space so that the
     // immune region won't break (eg. due to a large object allocated in the gap). This is only
     // required when we're the zygote or using GSS.
-    bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
+    bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(),
                                       non_moving_space_->GetMemMap());
     if (!no_gap) {
       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
@@ -748,15 +750,6 @@
   return true;
 }
 
-bool Heap::HasImageSpace() const {
-  for (const auto& space : continuous_spaces_) {
-    if (space->IsImageSpace()) {
-      return true;
-    }
-  }
-  return false;
-}
-
 void Heap::IncrementDisableMovingGC(Thread* self) {
   // Need to do this holding the lock to prevent races where the GC is about to run / running when
   // we attempt to disable it.
@@ -1209,13 +1202,8 @@
   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
 }
 
-space::ImageSpace* Heap::GetImageSpace() const {
-  for (const auto& space : continuous_spaces_) {
-    if (space->IsImageSpace()) {
-      return space->AsImageSpace();
-    }
-  }
-  return nullptr;
+space::ImageSpace* Heap::GetBootImageSpace() const {
+  return boot_image_space_;
 }
 
 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index cc48172..e23b1a3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -580,9 +580,9 @@
   // Unbind any bound bitmaps.
   void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
 
-  // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
-  // Assumes there is only one image space.
-  space::ImageSpace* GetImageSpace() const;
+  // Returns the boot image space. There may be multiple image spaces, but there is only one boot
+  // image space.
+  space::ImageSpace* GetBootImageSpace() const;
 
   // Permenantly disable moving garbage collection.
   void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
@@ -660,7 +660,9 @@
   void RemoveRememberedSet(space::Space* space);
 
   bool IsCompilingBoot() const;
-  bool HasImageSpace() const;
+  bool HasImageSpace() const {
+    return boot_image_space_ != nullptr;
+  }
 
   ReferenceProcessor* GetReferenceProcessor() {
     return reference_processor_.get();
@@ -1320,6 +1322,9 @@
   // allocating.
   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
 
+  // Boot image space.
+  space::ImageSpace* boot_image_space_;
+
   friend class CollectorTransitionTask;
   friend class collector::GarbageCollector;
   friend class collector::MarkCompact;
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f4658d5..e2e4782 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -187,7 +187,7 @@
   if (image_added_to_intern_table_) {
     return nullptr;
   }
-  gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
+  gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetBootImageSpace();
   if (image == nullptr) {
     return nullptr;  // No image present.
   }
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index df6936b..f1f4a03 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -745,6 +745,15 @@
   return ERR_NONE;
 }
 
+// Default implementation for IDEs relying on this command.
+static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  request->ReadRefTypeId();  // unused reference type ID
+  request->ReadMethodId();   // unused method ID
+  expandBufAdd1(reply, false);  // a method is never obsolete.
+  return ERR_NONE;
+}
+
 /*
  * Given an object reference, return the runtime type of the object
  * (class or array).
@@ -1477,7 +1486,7 @@
   { 6,    1,  M_LineTable,                "Method.LineTable" },
   { 6,    2,  M_VariableTable,            "Method.VariableTable" },
   { 6,    3,  M_Bytecodes,                "Method.Bytecodes" },
-  { 6,    4,  nullptr,                    "Method.IsObsolete" },
+  { 6,    4,  M_IsObsolete,               "Method.IsObsolete" },
   { 6,    5,  M_VariableTableWithGeneric, "Method.VariableTableWithGeneric" },
 
   /* Field command set (8) */
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 45610dc..be869d4 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -254,7 +254,11 @@
   StackHandleScope<1> hs(self);
   Handle<String> string(hs.NewHandle(this));
   CharArray* result = CharArray::Alloc(self, GetLength());
-  memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+  if (result != nullptr) {
+    memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+  } else {
+    self->AssertPendingOOMException();
+  }
   return result;
 }
 
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da21fee..19c71f6 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -485,10 +485,7 @@
         DCHECK(why == kTimedWaiting || why == kSleeping) << why;
         self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
       }
-      if (self->IsInterruptedLocked()) {
-        was_interrupted = true;
-      }
-      self->SetInterruptedLocked(false);
+      was_interrupted = self->IsInterruptedLocked();
     }
   }
 
@@ -522,7 +519,7 @@
 
   monitor_lock_.Unlock(self);
 
-  if (was_interrupted) {
+  if (was_interrupted && interruptShouldThrow) {
     /*
      * We were interrupted while waiting, or somebody interrupted an
      * un-interruptible thread earlier and we're bailing out immediately.
@@ -534,9 +531,7 @@
       MutexLock mu(self, *self->GetWaitMutex());
       self->SetInterruptedLocked(false);
     }
-    if (interruptShouldThrow) {
-      self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
-    }
+    self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
   }
 }
 
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 8b2f4d8..e85434ee 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -149,8 +149,13 @@
   void operator=(const NullableScopedUtfChars&);
 };
 
-static jobject DexFile_openDexFileNative(
-    JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(JNIEnv* env,
+                                         jclass,
+                                         jstring javaSourceName,
+                                         jstring javaOutputName,
+                                         jint flags ATTRIBUTE_UNUSED,
+                                         // class_loader will be used for app images.
+                                         jobject class_loader ATTRIBUTE_UNUSED) {
   ScopedUtfChars sourceName(env, javaSourceName);
   if (sourceName.c_str() == nullptr) {
     return 0;
@@ -441,7 +446,7 @@
   NATIVE_METHOD(DexFile, getDexOptNeeded,
                 "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
   NATIVE_METHOD(DexFile, openDexFileNative,
-                "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+                "(Ljava/lang/String;Ljava/lang/String;ILjava/lang/ClassLoader;)Ljava/lang/Object;"),
 };
 
 void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 99080f6..0f3a013 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -846,7 +846,7 @@
 
 std::string OatFileAssistant::ImageLocation() {
   Runtime* runtime = Runtime::Current();
-  const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+  const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
   if (image_space == nullptr) {
     return "";
   }
@@ -949,21 +949,23 @@
     image_info_load_attempted_ = true;
 
     Runtime* runtime = Runtime::Current();
-    const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+    const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
     if (image_space != nullptr) {
       cached_image_info_.location = image_space->GetImageLocation();
 
       if (isa_ == kRuntimeISA) {
         const ImageHeader& image_header = image_space->GetImageHeader();
         cached_image_info_.oat_checksum = image_header.GetOatChecksum();
-        cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+        cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
+            image_header.GetOatDataBegin());
         cached_image_info_.patch_delta = image_header.GetPatchDelta();
       } else {
         std::unique_ptr<ImageHeader> image_header(
             gc::space::ImageSpace::ReadImageHeaderOrDie(
                 cached_image_info_.location.c_str(), isa_));
         cached_image_info_.oat_checksum = image_header->GetOatChecksum();
-        cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+        cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
+            image_header->GetOatDataBegin());
         cached_image_info_.patch_delta = image_header->GetPatchDelta();
       }
     }
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index c54d7f8..8c7efb2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -223,7 +223,7 @@
         false, dex_location.c_str(), &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
 
-    const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+    const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
     ASSERT_TRUE(image_space != nullptr);
     const ImageHeader& image_header = image_space->GetImageHeader();
     const OatHeader& oat_header = odex_file->GetOatHeader();
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 9eee156..ea6d3ff 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -79,11 +79,8 @@
 }
 
 const OatFile* OatFileManager::GetBootOatFile() const {
-  gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
-  if (image_space == nullptr) {
-    return nullptr;
-  }
-  return image_space->GetOatFile();
+  gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
+  return (image_space == nullptr) ? nullptr : image_space->GetOatFile();
 }
 
 const OatFile* OatFileManager::GetPrimaryOatFile() const {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 556ba56..17f34c0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -528,13 +528,13 @@
   // Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
   if (!IsAotCompiler()) {
     ScopedObjectAccess soa(self);
-    gc::space::ImageSpace* image_space = heap_->GetImageSpace();
+    gc::space::ImageSpace* image_space = heap_->GetBootImageSpace();
     if (image_space != nullptr) {
       ATRACE_BEGIN("AddImageStringsToTable");
       GetInternTable()->AddImageStringsToTable(image_space);
       ATRACE_END();
       ATRACE_BEGIN("MoveImageClassesToClassTable");
-      GetClassLinker()->MoveImageClassesToClassTable();
+      GetClassLinker()->AddBootImageClassesToClassTable();
       ATRACE_END();
     }
   }
@@ -930,7 +930,7 @@
                        runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
   ATRACE_END();
 
-  if (heap_->GetImageSpace() == nullptr && !allow_dex_file_fallback_) {
+  if (heap_->GetBootImageSpace() == nullptr && !allow_dex_file_fallback_) {
     LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
     ATRACE_END();
     return false;
@@ -1043,7 +1043,7 @@
     class_linker_->InitFromImage();
     ATRACE_END();
     if (kIsDebugBuild) {
-      GetHeap()->GetImageSpace()->VerifyImageAllocations();
+      GetHeap()->GetBootImageSpace()->VerifyImageAllocations();
     }
     if (boot_class_path_string_.empty()) {
       // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
diff --git a/test/061-out-of-memory/expected.txt b/test/061-out-of-memory/expected.txt
index ca87629..c31980c 100644
--- a/test/061-out-of-memory/expected.txt
+++ b/test/061-out-of-memory/expected.txt
@@ -4,4 +4,5 @@
 testOomeLarge succeeded
 testOomeSmall beginning
 testOomeSmall succeeded
+Got expected toCharArray OOM
 tests succeeded
diff --git a/test/061-out-of-memory/src/Main.java b/test/061-out-of-memory/src/Main.java
index c812c81..bda978e 100644
--- a/test/061-out-of-memory/src/Main.java
+++ b/test/061-out-of-memory/src/Main.java
@@ -26,6 +26,7 @@
         testHugeArray();
         testOomeLarge();
         testOomeSmall();
+        testOomeToCharArray();
         System.out.println("tests succeeded");
     }
 
@@ -106,4 +107,21 @@
         }
         System.out.println("testOomeSmall succeeded");
     }
+
+    private static void testOomeToCharArray() {
+        Object[] o = new Object[2000000];
+        String test = "test";
+        int i = 0;
+        try {
+            for (; i < o.length; ++i) o[i] = new char[1000000];
+        } catch (OutOfMemoryError oom) {}
+        try {
+            for (; i < o.length; ++i) {
+                o[i] = test.toCharArray();
+            }
+        } catch (OutOfMemoryError oom) {
+            o = null;
+            System.out.println("Got expected toCharArray OOM");
+        }
+    }
 }
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 3e533ad..b6b1c43 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -35,7 +35,7 @@
   }
 
   static bool isRelocationDeltaZero() {
-    gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetImageSpace();
+    gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetBootImageSpace();
     return space != nullptr && space->GetImageHeader().GetPatchDelta() == 0;
   }
 
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 78f8842..7762b2d 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -92,7 +92,7 @@
 // detecting this.
 #if __linux__
 static bool IsPicImage() {
-  gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+  gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
   CHECK(image_space != nullptr);  // We should be running with an image.
   const OatFile* oat_file = image_space->GetOatFile();
   CHECK(oat_file != nullptr);     // We should have an oat file to go with the image.
diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py
index 3c138ab..c0ba157 100755
--- a/test/964-default-iface-init-generated/util-src/generate_smali.py
+++ b/test/964-default-iface-init-generated/util-src/generate_smali.py
@@ -477,12 +477,12 @@
       ifaces = []
       for sub in split:
         ifaces.append(list(create_interface_trees(sub)))
-    for supers in itertools.product(*ifaces):
-      yield TestClass(clone_all(supers))
-      for i in range(len(set(dump_tree(supers)) - set(supers))):
-        ns = clone_all(supers)
-        selected = sorted(set(dump_tree(ns)) - set(ns))[i]
-        yield TestClass(tuple([selected] + list(ns)))
+      for supers in itertools.product(*ifaces):
+        yield TestClass(clone_all(supers))
+        for i in range(len(set(dump_tree(supers)) - set(supers))):
+          ns = clone_all(supers)
+          selected = sorted(set(dump_tree(ns)) - set(ns))[i]
+          yield TestClass(tuple([selected] + list(ns)))
 
 def create_interface_trees(num):
   """
diff --git a/test/968-default-partial-compilation-generated/build b/test/968-default-partial-compile-generated/build
similarity index 100%
rename from test/968-default-partial-compilation-generated/build
rename to test/968-default-partial-compile-generated/build
diff --git a/test/968-default-partial-compilation-generated/expected.txt b/test/968-default-partial-compile-generated/expected.txt
similarity index 100%
rename from test/968-default-partial-compilation-generated/expected.txt
rename to test/968-default-partial-compile-generated/expected.txt
diff --git a/test/968-default-partial-compilation-generated/info.txt b/test/968-default-partial-compile-generated/info.txt
similarity index 100%
rename from test/968-default-partial-compilation-generated/info.txt
rename to test/968-default-partial-compile-generated/info.txt
diff --git a/test/968-default-partial-compilation-generated/run b/test/968-default-partial-compile-generated/run
similarity index 100%
rename from test/968-default-partial-compilation-generated/run
rename to test/968-default-partial-compile-generated/run
diff --git a/test/968-default-partial-compilation-generated/util-src/generate_java.py b/test/968-default-partial-compile-generated/util-src/generate_java.py
similarity index 100%
rename from test/968-default-partial-compilation-generated/util-src/generate_java.py
rename to test/968-default-partial-compile-generated/util-src/generate_java.py
diff --git a/test/968-default-partial-compilation-generated/util-src/generate_smali.py b/test/968-default-partial-compile-generated/util-src/generate_smali.py
similarity index 100%
rename from test/968-default-partial-compilation-generated/util-src/generate_smali.py
rename to test/968-default-partial-compile-generated/util-src/generate_smali.py
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0a0b6c4..9d13da7 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -226,7 +226,7 @@
   960-default-smali \
   961-default-iface-resolution-generated \
   964-default-iface-init-generated \
-  968-default-partial-compilation-generated
+  968-default-partial-compile-generated
 
 # Check if we have python3 to run our tests.
 ifeq ($(wildcard /usr/bin/python3),)
diff --git a/test/run-test b/test/run-test
index 9b0261e..b2f6546 100755
--- a/test/run-test
+++ b/test/run-test
@@ -708,10 +708,7 @@
 # To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
 build_file_size_limit=2048
 run_file_size_limit=2048
-if echo "$test_dir" | grep 089; then
-  build_file_size_limit=5120
-  run_file_size_limit=5120
-elif echo "$test_dir" | grep 083; then
+if echo "$test_dir" | grep -Eq "(083|089|964)" > /dev/null; then
   build_file_size_limit=5120
   run_file_size_limit=5120
 fi