Merge "Ensure that BitTableAccessor refers to non-null table."
diff --git a/compiler/Android.bp b/compiler/Android.bp
index be963fb..11521e6 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -63,6 +63,7 @@
         "optimizing/inliner.cc",
         "optimizing/instruction_builder.cc",
         "optimizing/instruction_simplifier.cc",
+        "optimizing/intrinsic_objects.cc",
         "optimizing/intrinsics.cc",
         "optimizing/licm.cc",
         "optimizing/linear_order.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1d4f020..c37d452 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -136,9 +136,9 @@
 
 // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
 // driver assumes ownership of the set, so the test should properly release the set.
-std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+std::unique_ptr<HashSet<std::string>> CommonCompilerTest::GetImageClasses() {
   // Empty set: by default no classes are retained in the image.
-  return new std::unordered_set<std::string>();
+  return std::make_unique<HashSet<std::string>>();
 }
 
 // Get ProfileCompilationInfo that should be passed to the driver.
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 39c8bd8..46b59a3 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,9 +18,9 @@
 #define ART_COMPILER_COMMON_COMPILER_TEST_H_
 
 #include <list>
-#include <unordered_set>
 #include <vector>
 
+#include "base/hash_set.h"
 #include "common_runtime_test.h"
 #include "compiler.h"
 #include "oat_file.h"
@@ -63,9 +63,8 @@
 
   InstructionSet GetInstructionSet() const;
 
-  // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
-  // driver assumes ownership of the set, so the test should properly release the set.
-  virtual std::unordered_set<std::string>* GetImageClasses();
+  // Get the set of image classes given to the compiler-driver in SetUp.
+  virtual std::unique_ptr<HashSet<std::string>> GetImageClasses();
 
   virtual ProfileCompilationInfo* GetProfileCompilationInfo();
 
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index aa8277e..d56b135 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -21,6 +21,7 @@
 
 #include <android-base/logging.h>
 
+#include "base/data_hash.h"
 #include "base/utils.h"
 #include "compiled_method.h"
 #include "linker/linker_patch.h"
@@ -80,65 +81,7 @@
 
  public:
   size_t operator()(const ArrayRef<ContentType>& array) const {
-    const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
-    // TODO: More reasonable assertion.
-    // static_assert(IsPowerOfTwo(sizeof(ContentType)),
-    //    "ContentType is not power of two, don't know whether array layout is as assumed");
-    uint32_t len = sizeof(ContentType) * array.size();
-    if (kUseMurmur3Hash) {
-      static constexpr uint32_t c1 = 0xcc9e2d51;
-      static constexpr uint32_t c2 = 0x1b873593;
-      static constexpr uint32_t r1 = 15;
-      static constexpr uint32_t r2 = 13;
-      static constexpr uint32_t m = 5;
-      static constexpr uint32_t n = 0xe6546b64;
-
-      uint32_t hash = 0;
-
-      const int nblocks = len / 4;
-      typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
-      const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
-      int i;
-      for (i = 0; i < nblocks; i++) {
-        uint32_t k = blocks[i];
-        k *= c1;
-        k = (k << r1) | (k >> (32 - r1));
-        k *= c2;
-
-        hash ^= k;
-        hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
-      }
-
-      const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
-      uint32_t k1 = 0;
-
-      switch (len & 3) {
-        case 3:
-          k1 ^= tail[2] << 16;
-          FALLTHROUGH_INTENDED;
-        case 2:
-          k1 ^= tail[1] << 8;
-          FALLTHROUGH_INTENDED;
-        case 1:
-          k1 ^= tail[0];
-
-          k1 *= c1;
-          k1 = (k1 << r1) | (k1 >> (32 - r1));
-          k1 *= c2;
-          hash ^= k1;
-      }
-
-      hash ^= len;
-      hash ^= (hash >> 16);
-      hash *= 0x85ebca6b;
-      hash ^= (hash >> 13);
-      hash *= 0xc2b2ae35;
-      hash ^= (hash >> 16);
-
-      return hash;
-    } else {
-      return HashBytes(data, len);
-    }
+    return DataHash()(array);
   }
 };
 
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6cb3936..bd2b107 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -264,7 +264,7 @@
     Compiler::Kind compiler_kind,
     InstructionSet instruction_set,
     const InstructionSetFeatures* instruction_set_features,
-    std::unordered_set<std::string>* image_classes,
+    std::unique_ptr<HashSet<std::string>>&& image_classes,
     size_t thread_count,
     int swap_fd,
     const ProfileCompilationInfo* profile_compilation_info)
@@ -277,7 +277,7 @@
       instruction_set_features_(instruction_set_features),
       requires_constructor_barrier_lock_("constructor barrier lock"),
       non_relative_linker_patch_count_(0u),
-      image_classes_(image_classes),
+      image_classes_(std::move(image_classes)),
       number_of_soft_verifier_failures_(0),
       had_hard_verifier_failure_(false),
       parallel_thread_count_(thread_count),
@@ -991,7 +991,7 @@
 bool CompilerDriver::IsImageClass(const char* descriptor) const {
   if (image_classes_ != nullptr) {
     // If we have a set of image classes, use those.
-    return image_classes_->find(descriptor) != image_classes_->end();
+    return image_classes_->find(StringPiece(descriptor)) != image_classes_->end();
   }
   // No set of image classes, assume we include all the classes.
   // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
@@ -1002,7 +1002,7 @@
   if (classes_to_compile_ == nullptr) {
     return true;
   }
-  return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+  return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
 }
 
 bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
@@ -1091,7 +1091,7 @@
 
 class RecordImageClassesVisitor : public ClassVisitor {
  public:
-  explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+  explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
       : image_classes_(image_classes) {}
 
   bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1101,7 +1101,7 @@
   }
 
  private:
-  std::unordered_set<std::string>* const image_classes_;
+  HashSet<std::string>* const image_classes_;
 };
 
 // Make a list of descriptors for classes to include in the image
@@ -1124,7 +1124,7 @@
         hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
     if (klass == nullptr) {
       VLOG(compiler) << "Failed to find class " << descriptor;
-      image_classes_->erase(it++);
+      it = image_classes_->erase(it);
       self->ClearException();
     } else {
       ++it;
@@ -1177,12 +1177,12 @@
   RecordImageClassesVisitor visitor(image_classes_.get());
   class_linker->VisitClasses(&visitor);
 
-  CHECK_NE(image_classes_->size(), 0U);
+  CHECK(!image_classes_->empty());
 }
 
 static void MaybeAddToImageClasses(Thread* self,
                                    ObjPtr<mirror::Class> klass,
-                                   std::unordered_set<std::string>* image_classes)
+                                   HashSet<std::string>* image_classes)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK_EQ(self, Thread::Current());
   StackHandleScope<1> hs(self);
@@ -1190,11 +1190,10 @@
   const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   while (!klass->IsObjectClass()) {
     const char* descriptor = klass->GetDescriptor(&temp);
-    std::pair<std::unordered_set<std::string>::iterator, bool> result =
-        image_classes->insert(descriptor);
-    if (!result.second) {  // Previously inserted.
-      break;
+    if (image_classes->find(StringPiece(descriptor)) != image_classes->end()) {
+      break;  // Previously inserted.
     }
+    image_classes->insert(descriptor);
     VLOG(compiler) << "Adding " << descriptor << " to image classes";
     for (size_t i = 0, num_interfaces = klass->NumDirectInterfaces(); i != num_interfaces; ++i) {
       ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, klass, i);
@@ -1216,7 +1215,7 @@
 class ClinitImageUpdate {
  public:
   static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
-                                   std::unordered_set<std::string>* image_class_descriptors,
+                                   HashSet<std::string>* image_class_descriptors,
                                    Thread* self,
                                    ClassLinker* linker) {
     std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
@@ -1273,7 +1272,7 @@
 
     bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
-      const char* name = klass->GetDescriptor(&temp);
+      StringPiece name(klass->GetDescriptor(&temp));
       if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
         data_->image_classes_.push_back(hs_.NewHandle(klass));
       } else {
@@ -1292,7 +1291,7 @@
   };
 
   ClinitImageUpdate(VariableSizedHandleScope& hs,
-                    std::unordered_set<std::string>* image_class_descriptors,
+                    HashSet<std::string>* image_class_descriptors,
                     Thread* self,
                     ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
       : hs_(hs),
@@ -1339,7 +1338,7 @@
   VariableSizedHandleScope& hs_;
   mutable std::vector<Handle<mirror::Class>> to_insert_;
   mutable std::unordered_set<mirror::Object*> marked_objects_;
-  std::unordered_set<std::string>* const image_class_descriptors_;
+  HashSet<std::string>* const image_class_descriptors_;
   std::vector<Handle<mirror::Class>> image_classes_;
   Thread* const self_;
   const char* old_cause_;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 55f3561..ff70d96 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -20,7 +20,6 @@
 #include <atomic>
 #include <set>
 #include <string>
-#include <unordered_set>
 #include <vector>
 
 #include "android-base/strings.h"
@@ -28,6 +27,7 @@
 #include "arch/instruction_set.h"
 #include "base/array_ref.h"
 #include "base/bit_utils.h"
+#include "base/hash_set.h"
 #include "base/mutex.h"
 #include "base/os.h"
 #include "base/quasi_atomic.h"
@@ -99,7 +99,7 @@
                  Compiler::Kind compiler_kind,
                  InstructionSet instruction_set,
                  const InstructionSetFeatures* instruction_set_features,
-                 std::unordered_set<std::string>* image_classes,
+                 std::unique_ptr<HashSet<std::string>>&& image_classes,
                  size_t thread_count,
                  int swap_fd,
                  const ProfileCompilationInfo* profile_compilation_info);
@@ -144,7 +144,7 @@
     return compiler_.get();
   }
 
-  const std::unordered_set<std::string>* GetImageClasses() const {
+  const HashSet<std::string>* GetImageClasses() const {
     return image_classes_.get();
   }
 
@@ -493,12 +493,12 @@
 
   // If image_ is true, specifies the classes that will be included in the image.
   // Note if image_classes_ is null, all classes are included in the image.
-  std::unique_ptr<std::unordered_set<std::string>> image_classes_;
+  std::unique_ptr<HashSet<std::string>> image_classes_;
 
   // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
   // all classes are eligible for compilation (duplication filters etc. will still apply).
   // This option may be restricted to the boot image, depending on a flag in the implementation.
-  std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+  std::unique_ptr<HashSet<std::string>> classes_to_compile_;
 
   std::atomic<uint32_t> number_of_soft_verifier_failures_;
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 8aa790db..ad4b5cf 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -27,6 +27,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_arm64.h"
@@ -3343,61 +3344,25 @@
 #undef DEFINE_CONDITION_VISITORS
 #undef FOR_EACH_CONDITION_INSTRUCTION
 
-void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  Register out = OutputRegister(instruction);
-  Register dividend = InputRegisterAt(instruction, 0);
-  int64_t imm = Int64FromConstant(second.GetConstant());
-  DCHECK(imm == 1 || imm == -1);
-
-  if (instruction->IsRem()) {
-    __ Mov(out, 0);
-  } else {
-    if (imm == 1) {
-      __ Mov(out, dividend);
-    } else {
-      __ Neg(out, dividend);
-    }
-  }
-}
-
-void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  Register out = OutputRegister(instruction);
-  Register dividend = InputRegisterAt(instruction, 0);
-  int64_t imm = Int64FromConstant(second.GetConstant());
+void InstructionCodeGeneratorARM64::GenerateIntDivForPower2Denom(HDiv* instruction) {
+  int64_t imm = Int64ConstantFrom(instruction->GetLocations()->InAt(1));
   uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
+  DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm;
+
+  Register out = OutputRegister(instruction);
+  Register dividend = InputRegisterAt(instruction, 0);
   int ctz_imm = CTZ(abs_imm);
 
   UseScratchRegisterScope temps(GetVIXLAssembler());
   Register temp = temps.AcquireSameSizeAs(out);
 
-  if (instruction->IsDiv()) {
-    __ Add(temp, dividend, abs_imm - 1);
-    __ Cmp(dividend, 0);
-    __ Csel(out, temp, dividend, lt);
-    if (imm > 0) {
-      __ Asr(out, out, ctz_imm);
-    } else {
-      __ Neg(out, Operand(out, ASR, ctz_imm));
-    }
+  __ Add(temp, dividend, abs_imm - 1);
+  __ Cmp(dividend, 0);
+  __ Csel(out, temp, dividend, lt);
+  if (imm > 0) {
+    __ Asr(out, out, ctz_imm);
   } else {
-    int bits = instruction->GetResultType() == DataType::Type::kInt32 ? 32 : 64;
-    __ Asr(temp, dividend, bits - 1);
-    __ Lsr(temp, temp, bits - ctz_imm);
-    __ Add(out, dividend, temp);
-    __ And(out, out, abs_imm - 1);
-    __ Sub(out, out, temp);
+    __ Neg(out, Operand(out, ASR, ctz_imm));
   }
 }
 
@@ -3453,39 +3418,34 @@
   }
 }
 
-void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DataType::Type type = instruction->GetResultType();
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
+void InstructionCodeGeneratorARM64::GenerateIntDivForConstDenom(HDiv *instruction) {
+  int64_t imm = Int64ConstantFrom(instruction->GetLocations()->InAt(1));
 
-  LocationSummary* locations = instruction->GetLocations();
-  Register out = OutputRegister(instruction);
-  Location second = locations->InAt(1);
+  if (imm == 0) {
+    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+    return;
+  }
 
-  if (second.IsConstant()) {
-    int64_t imm = Int64FromConstant(second.GetConstant());
-
-    if (imm == 0) {
-      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
-    } else if (imm == 1 || imm == -1) {
-      DivRemOneOrMinusOne(instruction);
-    } else if (IsPowerOfTwo(AbsOrMin(imm))) {
-      DivRemByPowerOfTwo(instruction);
-    } else {
-      DCHECK(imm <= -2 || imm >= 2);
-      GenerateDivRemWithAnyConstant(instruction);
-    }
+  if (IsPowerOfTwo(AbsOrMin(imm))) {
+    GenerateIntDivForPower2Denom(instruction);
   } else {
+    // Cases imm == -1 or imm == 1 are handled by InstructionSimplifier.
+    DCHECK(imm < -2 || imm > 2) << imm;
+    GenerateDivRemWithAnyConstant(instruction);
+  }
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntDiv(HDiv *instruction) {
+  DCHECK(DataType::IsIntOrLongType(instruction->GetResultType()))
+       << instruction->GetResultType();
+
+  if (instruction->GetLocations()->InAt(1).IsConstant()) {
+    GenerateIntDivForConstDenom(instruction);
+  } else {
+    Register out = OutputRegister(instruction);
     Register dividend = InputRegisterAt(instruction, 0);
     Register divisor = InputRegisterAt(instruction, 1);
-    if (instruction->IsDiv()) {
-      __ Sdiv(out, dividend, divisor);
-    } else {
-      UseScratchRegisterScope temps(GetVIXLAssembler());
-      Register temp = temps.AcquireSameSizeAs(out);
-      __ Sdiv(temp, dividend, divisor);
-      __ Msub(out, temp, divisor, dividend);
-    }
+    __ Sdiv(out, dividend, divisor);
   }
 }
 
@@ -3517,7 +3477,7 @@
   switch (type) {
     case DataType::Type::kInt32:
     case DataType::Type::kInt64:
-      GenerateDivRemIntegral(div);
+      GenerateIntDiv(div);
       break;
 
     case DataType::Type::kFloat32:
@@ -4828,6 +4788,25 @@
   __ ldr(out, MemOperand(base, /* offset placeholder */ 0));
 }
 
+void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
+                                              uint32_t boot_image_offset) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+    vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
+    EmitAdrpPlaceholder(adrp_label, reg.X());
+    // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+    vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
+    EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+    __ Ldr(reg.W(), DeduplicateBootImageAddressLiteral(reinterpret_cast<uintptr_t>(address)));
+  }
+}
+
 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
 inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
     const ArenaDeque<PcRelativePatchInfo>& infos,
@@ -5649,13 +5628,78 @@
   }
 }
 
+void InstructionCodeGeneratorARM64::GenerateIntRemForPower2Denom(HRem *instruction) {
+  int64_t imm = Int64ConstantFrom(instruction->GetLocations()->InAt(1));
+  uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
+  DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm;
+
+  Register out = OutputRegister(instruction);
+  Register dividend = InputRegisterAt(instruction, 0);
+  int ctz_imm = CTZ(abs_imm);
+
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  Register temp = temps.AcquireSameSizeAs(out);
+
+  int bits = (instruction->GetResultType() == DataType::Type::kInt32) ? 32 : 64;
+  __ Asr(temp, dividend, bits - 1);
+  __ Lsr(temp, temp, bits - ctz_imm);
+  __ Add(out, dividend, temp);
+  __ And(out, out, abs_imm - 1);
+  __ Sub(out, out, temp);
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRemForOneOrMinusOneDenom(HRem *instruction) {
+  int64_t imm = Int64ConstantFrom(instruction->GetLocations()->InAt(1));
+  DCHECK(imm == 1 || imm == -1) << imm;
+
+  Register out = OutputRegister(instruction);
+  __ Mov(out, 0);
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRemForConstDenom(HRem *instruction) {
+  int64_t imm = Int64ConstantFrom(instruction->GetLocations()->InAt(1));
+
+  if (imm == 0) {
+    // Do not generate anything.
+    // DivZeroCheck would prevent any code to be executed.
+    return;
+  }
+
+  if (imm == 1 || imm == -1) {
+    // TODO: These cases need to be optimized in InstructionSimplifier
+    GenerateIntRemForOneOrMinusOneDenom(instruction);
+  } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+    GenerateIntRemForPower2Denom(instruction);
+  } else {
+    DCHECK(imm < -2 || imm > 2) << imm;
+    GenerateDivRemWithAnyConstant(instruction);
+  }
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRem(HRem* instruction) {
+  DCHECK(DataType::IsIntOrLongType(instruction->GetResultType()))
+         << instruction->GetResultType();
+
+  if (instruction->GetLocations()->InAt(1).IsConstant()) {
+    GenerateIntRemForConstDenom(instruction);
+  } else {
+    Register out = OutputRegister(instruction);
+    Register dividend = InputRegisterAt(instruction, 0);
+    Register divisor = InputRegisterAt(instruction, 1);
+    UseScratchRegisterScope temps(GetVIXLAssembler());
+    Register temp = temps.AcquireSameSizeAs(out);
+    __ Sdiv(temp, dividend, divisor);
+    __ Msub(out, temp, divisor, dividend);
+  }
+}
+
 void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
   DataType::Type type = rem->GetResultType();
 
   switch (type) {
     case DataType::Type::kInt32:
     case DataType::Type::kInt64: {
-      GenerateDivRemIntegral(rem);
+      GenerateIntRem(rem);
       break;
     }
 
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5afb712..dc4964d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -322,7 +322,13 @@
   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
-  void GenerateDivRemIntegral(HBinaryOperation* instruction);
+  void GenerateIntDiv(HDiv* instruction);
+  void GenerateIntDivForConstDenom(HDiv *instruction);
+  void GenerateIntDivForPower2Denom(HDiv *instruction);
+  void GenerateIntRem(HRem* instruction);
+  void GenerateIntRemForConstDenom(HRem *instruction);
+  void GenerateIntRemForOneOrMinusOneDenom(HRem *instruction);
+  void GenerateIntRemForPower2Denom(HRem *instruction);
   void HandleGoto(HInstruction* got, HBasicBlock* successor);
 
   vixl::aarch64::MemOperand VecAddress(
@@ -630,6 +636,8 @@
                                 vixl::aarch64::Register out,
                                 vixl::aarch64::Register base);
 
+  void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_offset);
+
   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
   bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
   void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 91c1315..6804340 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -27,6 +27,7 @@
 #include "compiled_method.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics_arm_vixl.h"
 #include "linker/linker_patch.h"
@@ -9536,6 +9537,22 @@
       });
 }
 
+void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg, uint32_t boot_image_offset) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
+    EmitMovwMovtPlaceholder(labels, reg);
+    __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    uintptr_t address =
+        reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+    __ Ldr(reg, DeduplicateBootImageAddressLiteral(dchecked_integral_cast<uint32_t>(address)));
+  }
+}
+
 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
 inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
     const ArenaDeque<PcRelativePatchInfo>& infos,
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index d5b739b..4893d3c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -600,6 +600,8 @@
                                                 dex::TypeIndex type_index,
                                                 Handle<mirror::Class> handle);
 
+  void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_offset);
+
   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
   bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
   void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 507db36..112eb51 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -26,6 +26,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_mips.h"
@@ -1739,6 +1740,22 @@
   // offset to `out` (e.g. lw, jialc, addiu).
 }
 
+void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_offset) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+    PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
+    __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+    __ LoadConst32(reg, dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address)));
+  }
+}
+
 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
     const DexFile& dex_file,
     dex::StringIndex string_index,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 2e7c736..9fdb385 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -645,6 +645,8 @@
                                             Register out,
                                             Register base);
 
+  void LoadBootImageAddress(Register reg, uint32_t boot_image_offset);
+
   // The JitPatchInfo is used for JIT string and class loads.
   struct JitPatchInfo {
     JitPatchInfo(const DexFile& dex_file, uint64_t idx)
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 08a6512..9f86364 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -24,6 +24,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_mips64.h"
@@ -1638,6 +1639,24 @@
   }
 }
 
+void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+    PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+    // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+    __ Lwu(reg, AT, /* placeholder */ 0x5678);
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    uintptr_t address =
+        reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+    __ LoadLiteral(reg, kLoadDoubleword, DeduplicateBootImageAddressLiteral(address));
+  }
+}
+
 Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
                                                           dex::StringIndex string_index,
                                                           Handle<mirror::String> handle) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 6e69e46..25c886f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -615,6 +615,8 @@
                                             GpuRegister out,
                                             PcRelativePatchInfo* info_low = nullptr);
 
+  void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset);
+
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
                        const Literal* literal,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9f42ac7..12872ed 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -23,6 +23,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_x86.h"
@@ -2188,7 +2189,9 @@
 
   IntrinsicLocationsBuilderX86 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
-    if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeMethodLoadKind()) {
+    if (invoke->GetLocations()->CanCall() &&
+        invoke->HasPcRelativeMethodLoadKind() &&
+        invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).IsInvalid()) {
       invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
     }
     return;
@@ -4969,6 +4972,28 @@
   return &string_bss_entry_patches_.back().label;
 }
 
+void CodeGeneratorX86::LoadBootImageAddress(Register reg,
+                                            uint32_t boot_image_offset,
+                                            HInvokeStaticOrDirect* invoke) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+    HX86ComputeBaseMethodAddress* method_address =
+        invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+    DCHECK(method_address != nullptr);
+    Register method_address_reg =
+        invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+    __ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+    RecordBootImageRelRoPatch(method_address, boot_image_offset);
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+    __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+  }
+}
+
 // The label points to the end of the "movl" or another instruction but the literal offset
 // for method patch needs to point to the embedded constant which occupies the last 4 bytes.
 constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6c76e27..7d18e2b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -426,6 +426,11 @@
   Label* NewTypeBssEntryPatch(HLoadClass* load_class);
   void RecordBootImageStringPatch(HLoadString* load_string);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
+
+  void LoadBootImageAddress(Register reg,
+                            uint32_t boot_image_offset,
+                            HInvokeStaticOrDirect* invoke);
+
   Label* NewJitRootStringPatch(const DexFile& dex_file,
                                dex::StringIndex string_index,
                                Handle<mirror::String> handle);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 05194b1..9631c15 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -22,6 +22,7 @@
 #include "compiled_method.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_x86_64.h"
@@ -1107,6 +1108,20 @@
   return &string_bss_entry_patches_.back().label;
 }
 
+void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset) {
+  DCHECK(!GetCompilerOptions().IsBootImage());
+  if (GetCompilerOptions().GetCompilePic()) {
+    DCHECK(Runtime::Current()->IsAotCompiler());
+    __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+    RecordBootImageRelRoPatch(boot_image_offset);
+  } else {
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    DCHECK(!heap->GetBootImageSpaces().empty());
+    const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+    __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+  }
+}
+
 // The label points to the end of the "movl" or another instruction but the literal offset
 // for method patch needs to point to the embedded constant which occupies the last 4 bytes.
 constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9a4c53b..cf862d3 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -429,7 +429,7 @@
                               dex::TypeIndex type_index,
                               Handle<mirror::Class> handle);
 
-  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+  void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset);
 
   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
 
@@ -566,6 +566,8 @@
   // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
   void Store64BitValueToStack(Location dest, int64_t value);
 
+  void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+
   // Assign a 64 bit constant to an address.
   void MoveInt64ToAddress(const Address& addr_low,
                           const Address& addr_high,
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 1a7f926..54bff22 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -47,7 +47,7 @@
     candidate_fences_.push_back(constructor_fence);
 
     for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
-      candidate_fence_targets_.Insert(constructor_fence->InputAt(input_idx));
+      candidate_fence_targets_.insert(constructor_fence->InputAt(input_idx));
     }
   }
 
@@ -208,13 +208,13 @@
     // there is no benefit to this extra complexity unless we also reordered
     // the stores to come later.
     candidate_fences_.clear();
-    candidate_fence_targets_.Clear();
+    candidate_fence_targets_.clear();
   }
 
   // A publishing 'store' is only interesting if the value being stored
   // is one of the fence `targets` in `candidate_fences`.
   bool IsInterestingPublishTarget(HInstruction* store_input) const {
-    return candidate_fence_targets_.Find(store_input) != candidate_fence_targets_.end();
+    return candidate_fence_targets_.find(store_input) != candidate_fence_targets_.end();
   }
 
   void MaybeMerge(HConstructorFence* target, HConstructorFence* src) {
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
new file mode 100644
index 0000000..3c20ad6
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsic_objects.h"
+
+#include "art_field-inl.h"
+#include "base/logging.h"
+#include "class_root.h"
+#include "handle.h"
+#include "obj_ptr-inl.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
+                                                                      ClassLinker* class_linker)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
+      self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+  if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
+    return nullptr;
+  }
+  ArtField* cache_field =
+      integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+  CHECK(cache_field != nullptr);
+  ObjPtr<mirror::ObjectArray<mirror::Object>> integer_cache =
+      ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+          cache_field->GetObject(integer_cache_class));
+  CHECK(integer_cache != nullptr);
+  return integer_cache;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::AllocateBootImageLiveObjects(
+    Thread* self,
+    ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
+  // The objects used for the Integer.valueOf() intrinsic must remain live even if references
+  // to them are removed using reflection. Image roots are not accessible through reflection,
+  // so the array we construct here shall keep them alive.
+  StackHandleScope<1> hs(self);
+  Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
+      hs.NewHandle(LookupIntegerCache(self, class_linker));
+  size_t live_objects_size =
+      (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
+  ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
+      mirror::ObjectArray<mirror::Object>::Alloc(
+          self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
+  int32_t index = 0;
+  if (integer_cache != nullptr) {
+    live_objects->Set(index++, integer_cache.Get());
+    for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
+      live_objects->Set(index++, integer_cache->Get(i));
+    }
+  }
+  CHECK_EQ(index, live_objects->GetLength());
+
+  if (kIsDebugBuild && integer_cache != nullptr) {
+    CHECK_EQ(integer_cache.Get(), GetIntegerValueOfCache(live_objects));
+    for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
+      CHECK_EQ(integer_cache->GetWithoutChecks(i), GetIntegerValueOfObject(live_objects, i));
+    }
+  }
+  return live_objects;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::GetIntegerValueOfCache(
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+  DCHECK(boot_image_live_objects != nullptr);
+  if (boot_image_live_objects->GetLength() == 0u) {
+    return nullptr;  // No intrinsic objects.
+  }
+  // No need for read barrier for boot image object or for verifying the value that was just stored.
+  ObjPtr<mirror::Object> result =
+      boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(0);
+  DCHECK(result != nullptr);
+  DCHECK(result->IsObjectArray());
+  DCHECK(result->GetClass()->DescriptorEquals("[Ljava/lang/Integer;"));
+  return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(result);
+}
+
+ObjPtr<mirror::Object> IntrinsicObjects::GetIntegerValueOfObject(
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+    uint32_t index) {
+  DCHECK(boot_image_live_objects != nullptr);
+  DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+  DCHECK_LT(index,
+            static_cast<uint32_t>(GetIntegerValueOfCache(boot_image_live_objects)->GetLength()));
+
+  // No need for read barrier for boot image object or for verifying the value that was just stored.
+  ObjPtr<mirror::Object> result =
+      boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
+          /* skip the IntegerCache.cache */ 1u + index);
+  DCHECK(result != nullptr);
+  DCHECK(result->GetClass()->DescriptorEquals("Ljava/lang/Integer;"));
+  return result;
+}
+
+MemberOffset IntrinsicObjects::GetIntegerValueOfArrayDataOffset(
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+  DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+  MemberOffset result = mirror::ObjectArray<mirror::Object>::OffsetOfElement(1u);
+  DCHECK_EQ(GetIntegerValueOfObject(boot_image_live_objects, 0u),
+            (boot_image_live_objects
+                 ->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(result)));
+  return result;
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
new file mode 100644
index 0000000..ffadd03
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+
+#include "base/mutex.h"
+
+namespace art {
+
+class ClassLinker;
+template <class MirrorType> class ObjPtr;
+class MemberOffset;
+class Thread;
+
+namespace mirror {
+class Object;
+template <class T> class ObjectArray;
+}  // namespace mirror
+
+class IntrinsicObjects {
+ public:
+  static ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
+      Thread* self,
+      ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Functions for retrieving data for Integer.valueOf().
+  static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerValueOfCache(
+      ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static ObjPtr<mirror::Object> GetIntegerValueOfObject(
+      ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+      uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+  static MemberOffset GetIntegerValueOfArrayDataOffset(
+      ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index f0c91f3..81b2b7b 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -21,10 +21,12 @@
 #include "base/utils.h"
 #include "class_linker.h"
 #include "dex/invoke_type.h"
-#include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "mirror/dex_cache-inl.h"
+#include "gc/space/image_space.h"
+#include "image-inl.h"
+#include "intrinsic_objects.h"
 #include "nodes.h"
+#include "obj_ptr-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
@@ -221,105 +223,223 @@
   return os;
 }
 
+static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const std::vector<gc::space::ImageSpace*>& boot_image_spaces = heap->GetBootImageSpaces();
+  DCHECK(!boot_image_spaces.empty());
+  const ImageHeader& main_header = boot_image_spaces[0]->GetImageHeader();
+  ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+      ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+          main_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kBootImageLiveObjects));
+  DCHECK(boot_image_live_objects != nullptr);
+  DCHECK(heap->ObjectIsInBootImageSpace(boot_image_live_objects));
+  return boot_image_live_objects;
+}
+
+static bool CheckIntegerCache(Thread* self,
+                              ClassLinker* class_linker,
+                              ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+                              ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_cache)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(boot_image_cache != nullptr);
+
+  // Since we have a cache in the boot image, both java.lang.Integer and
+  // java.lang.Integer$IntegerCache must be initialized in the boot image.
+  ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
+      self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+  DCHECK(cache_class != nullptr);
+  DCHECK(cache_class->IsInitialized());
+  ObjPtr<mirror::Class> integer_class =
+      class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr);
+  DCHECK(integer_class != nullptr);
+  DCHECK(integer_class->IsInitialized());
+
+  // Check that the current cache is the same as the `boot_image_cache`.
+  ArtField* cache_field = cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+  DCHECK(cache_field != nullptr);
+  ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache =
+      ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(cache_class));
+  if (current_cache != boot_image_cache) {
+    return false;  // Messed up IntegerCache.cache.
+  }
+
+  // Check that the range matches the boot image cache length.
+  ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+  DCHECK(low_field != nullptr);
+  int32_t low = low_field->GetInt(cache_class);
+  ArtField* high_field = cache_class->FindDeclaredStaticField("high", "I");
+  DCHECK(high_field != nullptr);
+  int32_t high = high_field->GetInt(cache_class);
+  if (boot_image_cache->GetLength() != high - low + 1) {
+    return false;  // Messed up IntegerCache.low or IntegerCache.high.
+  }
+
+  // Check that the elements match the boot image intrinsic objects and check their values as well.
+  ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+  DCHECK(value_field != nullptr);
+  for (int32_t i = 0, len = boot_image_cache->GetLength(); i != len; ++i) {
+    ObjPtr<mirror::Object> boot_image_object =
+        IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, i);
+    DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boot_image_object));
+    // No need for read barrier for comparison with a boot image object.
+    ObjPtr<mirror::Object> current_object =
+        boot_image_cache->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(i);
+    if (boot_image_object != current_object) {
+      return false;  // Messed up IntegerCache.cache[i]
+    }
+    if (value_field->GetInt(boot_image_object) != low + i) {
+      return false;  // Messed up IntegerCache.cache[i].value.
+    }
+  }
+
+  return true;
+}
+
 void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
                                                       CodeGenerator* codegen,
                                                       Location return_location,
                                                       Location first_argument_location) {
-  if (Runtime::Current()->IsAotCompiler()) {
-    if (codegen->GetCompilerOptions().IsBootImage() ||
-        codegen->GetCompilerOptions().GetCompilePic()) {
-      // TODO(ngeoffray): Support boot image compilation.
-      return;
-    }
-  }
-
-  IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
-
-  // Most common case is that we have found all we needed (classes are initialized
-  // and in the boot image). Bail if not.
-  if (info.integer_cache == nullptr ||
-      info.integer == nullptr ||
-      info.cache == nullptr ||
-      info.value_offset == 0 ||
-      // low and high cannot be 0, per the spec.
-      info.low == 0 ||
-      info.high == 0) {
-    LOG(INFO) << "Integer.valueOf will not be optimized";
+  if (codegen->GetCompilerOptions().IsBootImage()) {
+    // TODO: Implement for boot image. We need access to CompilerDriver::IsImageClass()
+    // to verify that the IntegerCache shall be in the image.
     return;
   }
+  Runtime* runtime = Runtime::Current();
+  gc::Heap* heap = runtime->GetHeap();
+  if (heap->GetBootImageSpaces().empty()) {
+    return;  // Running without boot image, cannot use required boot image objects.
+  }
 
   // The intrinsic will call if it needs to allocate a j.l.Integer.
-  LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
-      invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  if (!invoke->InputAt(0)->IsConstant()) {
-    locations->SetInAt(0, Location::RequiresRegister());
+  LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
+  {
+    Thread* self = Thread::Current();
+    ScopedObjectAccess soa(self);
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+    ObjPtr<mirror::ObjectArray<mirror::Object>> cache =
+        IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects);
+    if (cache == nullptr) {
+      return;  // No cache in the boot image.
+    }
+    if (runtime->UseJitCompilation()) {
+      if (!CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache)) {
+        return;  // The cache was somehow messed up, probably by using reflection.
+      }
+    } else {
+      DCHECK(runtime->IsAotCompiler());
+      DCHECK(CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache));
+      if (invoke->InputAt(0)->IsIntConstant()) {
+        int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+        // Retrieve the `value` from the lowest cached Integer.
+        ObjPtr<mirror::Object> low_integer =
+            IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+        ObjPtr<mirror::Class> integer_class =
+            low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+        ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+        DCHECK(value_field != nullptr);
+        int32_t low = value_field->GetInt(low_integer);
+        if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
+            static_cast<uint32_t>(cache->GetLength())) {
+          // No call, we shall use direct pointer to the Integer object. Note that we cannot
+          // do this for JIT as the "low" can change through reflection before emitting the code.
+          call_kind = LocationSummary::kNoCall;
+        }
+      }
+    }
   }
-  locations->AddTemp(first_argument_location);
-  locations->SetOut(return_location);
+
+  ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+  LocationSummary* locations = new (allocator) LocationSummary(invoke, call_kind, kIntrinsified);
+  if (call_kind == LocationSummary::kCallOnMainOnly) {
+    locations->SetInAt(0, Location::RegisterOrConstant(invoke->InputAt(0)));
+    locations->AddTemp(first_argument_location);
+    locations->SetOut(return_location);
+  } else {
+    locations->SetInAt(0, Location::ConstantLocation(invoke->InputAt(0)->AsConstant()));
+    locations->SetOut(Location::RequiresRegister());
+  }
 }
 
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+static int32_t GetIntegerCacheLowFromIntegerCache(Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Class> cache_class = Runtime::Current()->GetClassLinker()->LookupClass(
+      self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+  DCHECK(cache_class != nullptr);
+  DCHECK(cache_class->IsInitialized());
+  ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+  DCHECK(low_field != nullptr);
+  return low_field->GetInt(cache_class);
+}
+
+static uint32_t CalculateBootImageOffset(ObjPtr<mirror::Object> object)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  DCHECK(heap->ObjectIsInBootImageSpace(object));
+  return reinterpret_cast<const uint8_t*>(object.Ptr()) - heap->GetBootImageSpaces()[0]->Begin();
+}
+
+inline IntrinsicVisitor::IntegerValueOfInfo::IntegerValueOfInfo()
+    : integer_boot_image_offset(0u),
+      value_offset(0),
+      low(0),
+      length(0u),
+      value_boot_image_offset(0u) {}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo(HInvoke* invoke) {
   // Note that we could cache all of the data looked up here. but there's no good
   // location for it. We don't want to add it to WellKnownClasses, to avoid creating global
   // jni values. Adding it as state to the compiler singleton seems like wrong
   // separation of concerns.
   // The need for this data should be pretty rare though.
 
-  // The most common case is that the classes are in the boot image and initialized,
-  // which is easy to generate code for. We bail if not.
+  // Note that at this point we can no longer abort the code generation. Therefore,
+  // we need to provide data that shall not lead to a crash even if the fields were
+  // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
+
+  Runtime* runtime = Runtime::Current();
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
-  Runtime* runtime = Runtime::Current();
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  gc::Heap* heap = runtime->GetHeap();
+  ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+  ObjPtr<mirror::Object> low_integer =
+      IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+  ObjPtr<mirror::Class> integer_class = low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+  ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+  DCHECK(value_field != nullptr);
+
   IntegerValueOfInfo info;
-  info.integer_cache = class_linker->LookupClass(self,
-                                                 "Ljava/lang/Integer$IntegerCache;",
-                                                 /* class_loader */ nullptr).Ptr();
-  if (info.integer_cache == nullptr || !info.integer_cache->IsInitialized()) {
-    // Optimization only works if the class is initialized.
-    return info;
+  info.integer_boot_image_offset = CalculateBootImageOffset(integer_class);
+  info.value_offset = value_field->GetOffset().Uint32Value();
+  if (runtime->UseJitCompilation()) {
+    // Use the current `IntegerCache.low` for JIT to avoid truly surprising behavior if the
+    // code messes up the `value` field in the lowest cached Integer using reflection.
+    info.low = GetIntegerCacheLowFromIntegerCache(self);
+  } else {
+    // For AOT, the `low_integer->value` should be the same as `IntegerCache.low`.
+    info.low = value_field->GetInt(low_integer);
+    DCHECK_EQ(info.low, GetIntegerCacheLowFromIntegerCache(self));
   }
-  if (!heap->ObjectIsInBootImageSpace(info.integer_cache)) {
-    // Optimization only works if the class is in the boot image.
-    // TODO: Implement the intrinsic for boot image compilation.
-    return info;
-  }
-  info.integer =
-      class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr).Ptr();
-  DCHECK(info.integer != nullptr);
-  DCHECK(info.integer->IsInitialized());  // Must be initialized since IntegerCache is initialized.
-  if (!heap->ObjectIsInBootImageSpace(info.integer)) {
-    // Optimization only works if the class is in the boot image.
-    return info;
+  // Do not look at `IntegerCache.high`, use the immutable length of the cache array instead.
+  info.length = dchecked_integral_cast<uint32_t>(
+      IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects)->GetLength());
+
+  if (invoke->InputAt(0)->IsIntConstant()) {
+    int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+    uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
+    if (index < static_cast<uint32_t>(info.length)) {
+      ObjPtr<mirror::Object> integer =
+          IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, index);
+      DCHECK(runtime->GetHeap()->ObjectIsInBootImageSpace(integer));
+      info.value_boot_image_offset = CalculateBootImageOffset(integer);
+    } else {
+      info.value_boot_image_offset = 0u;  // Not in the cache.
+    }
+  } else {
+    info.array_data_boot_image_offset =
+        CalculateBootImageOffset(boot_image_live_objects) +
+        IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects).Uint32Value();
   }
 
-  ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
-  CHECK(field != nullptr);
-  info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
-      field->GetObject(info.integer_cache).Ptr());
-  if (info.cache == nullptr) {
-    return info;  // Did someone mess up the IntegerCache using reflection?
-  }
-
-  if (!heap->ObjectIsInBootImageSpace(info.cache)) {
-    // Optimization only works if the object is in the boot image.
-    return info;
-  }
-
-  field = info.integer->FindDeclaredInstanceField("value", "I");
-  CHECK(field != nullptr);
-  info.value_offset = field->GetOffset().Int32Value();
-
-  field = info.integer_cache->FindDeclaredStaticField("low", "I");
-  CHECK(field != nullptr);
-  info.low = field->GetInt(info.integer_cache);
-
-  field = info.integer_cache->FindDeclaredStaticField("high", "I");
-  CHECK(field != nullptr);
-  info.high = field->GetInt(info.integer_cache);
-
-  DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
   return info;
 }
 
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 30cffac..f2b7823 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -126,33 +126,32 @@
                                              Location return_location,
                                              Location first_argument_location);
 
-  // Temporary data structure for holding Integer.valueOf useful data. We only
-  // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
-  // mirror::Class pointers in this structure.
+  // Temporary data structure for holding Integer.valueOf data for generating code.
+  // We only use it if the boot image contains the IntegerCache objects.
   struct IntegerValueOfInfo {
-    IntegerValueOfInfo()
-        : integer_cache(nullptr),
-          integer(nullptr),
-          cache(nullptr),
-          low(0),
-          high(0),
-          value_offset(0) {}
+    IntegerValueOfInfo();
 
-    // The java.lang.IntegerCache class.
-    mirror::Class* integer_cache;
-    // The java.lang.Integer class.
-    mirror::Class* integer;
-    // Value of java.lang.IntegerCache#cache.
-    mirror::ObjectArray<mirror::Object>* cache;
-    // Value of java.lang.IntegerCache#low.
+    // Boot image offset of java.lang.Integer for allocating an instance.
+    uint32_t integer_boot_image_offset;
+    // Offset of the Integer.value field for initializing a newly allocated instance.
+    uint32_t value_offset;
+    // The low value in the cache.
     int32_t low;
-    // Value of java.lang.IntegerCache#high.
-    int32_t high;
-    // The offset of java.lang.Integer.value.
-    int32_t value_offset;
+    // The length of the cache array.
+    uint32_t length;
+
+    union {
+      // Boot image offset of the target Integer object for constant input in the cache range.
+      // If the input is out of range, this is set to 0u and the code must allocate a new Integer.
+      uint32_t value_boot_image_offset;
+
+      // Boot image offset of the cache array data used for non-constant input in the cache range.
+      // If the input is out of range, the code must allocate a new Integer.
+      uint32_t array_data_boot_image_offset;
+    };
   };
 
-  static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+  static IntegerValueOfInfo ComputeIntegerValueOfInfo(HInvoke* invoke);
 
  protected:
   IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c3d643a..b4890e4 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2791,7 +2791,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   MacroAssembler* masm = GetVIXLAssembler();
 
@@ -2802,20 +2802,15 @@
   Register argument = calling_convention.GetRegisterAt(0);
   if (invoke->InputAt(0)->IsConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+      codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      uint32_t address =
-          dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+      codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ Mov(temp.W(), value);
@@ -2825,16 +2820,15 @@
       codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
     }
   } else {
+    DCHECK(locations->CanCall());
     Register in = RegisterFrom(locations->InAt(0), DataType::Type::kInt32);
     // Check bounds of our cache.
     __ Add(out.W(), in.W(), -info.low);
-    __ Cmp(out.W(), info.high - info.low + 1);
+    __ Cmp(out.W(), info.length);
     vixl::aarch64::Label allocate, done;
     __ B(&allocate, hs);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+    codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
     MemOperand source = HeapOperand(
         temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
     codegen_->Load(DataType::Type::kReference, out, source);
@@ -2842,8 +2836,7 @@
     __ B(&done);
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+    codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     __ Str(in.W(), HeapOperand(out.W(), info.value_offset));
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index fecf1cc..0835060 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2940,7 +2940,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   ArmVIXLAssembler* const assembler = GetAssembler();
 
@@ -2951,20 +2951,15 @@
   vixl32::Register argument = calling_convention.GetRegisterAt(0);
   if (invoke->InputAt(0)->IsConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+      codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      uint32_t address =
-          dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+      codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ Mov(temp, value);
@@ -2974,23 +2969,21 @@
       codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
     }
   } else {
+    DCHECK(locations->CanCall());
     vixl32::Register in = RegisterFrom(locations->InAt(0));
     // Check bounds of our cache.
     __ Add(out, in, -info.low);
-    __ Cmp(out, info.high - info.low + 1);
+    __ Cmp(out, info.length);
     vixl32::Label allocate, done;
     __ B(hs, &allocate, /* is_far_target */ false);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+    codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
     codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
     assembler->MaybeUnpoisonHeapReference(out);
     __ B(&done);
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+    codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ae248a3..a3eb42b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2601,7 +2601,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   MipsAssembler* assembler = GetAssembler();
   InstructionCodeGeneratorMIPS* icodegen =
@@ -2609,22 +2609,18 @@
 
   Register out = locations->Out().AsRegister<Register>();
   InvokeRuntimeCallingConvention calling_convention;
+  Register argument = calling_convention.GetRegisterAt(0);
   if (invoke->InputAt(0)->IsConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ LoadConst32(out, address);
+      codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      uint32_t address =
-          dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+      codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2633,27 +2629,23 @@
       icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
     }
   } else {
+    DCHECK(locations->CanCall());
     Register in = locations->InAt(0).AsRegister<Register>();
     MipsLabel allocate, done;
-    int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
 
-    // Is (info.low <= in) && (in <= info.high)?
     __ Addiu32(out, in, -info.low);
-    // As unsigned quantities is out < (info.high - info.low + 1)?
-    if (IsInt<16>(count)) {
-      __ Sltiu(AT, out, count);
+    // As unsigned quantities is out < info.length ?
+    if (IsUint<15>(info.length)) {
+      __ Sltiu(AT, out, info.length);
     } else {
-      __ LoadConst32(AT, count);
+      __ LoadConst32(AT, info.length);
       __ Sltu(AT, out, AT);
     }
-    // Branch if out >= (info.high - info.low + 1).
-    // This means that "in" is outside of the range [info.low, info.high].
+    // Branch if out >= info.length. This means that "in" is outside of the valid range.
     __ Beqz(AT, &allocate);
 
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    __ LoadConst32(TMP, data_offset + address);
+    codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
     __ ShiftAndAdd(out, out, TMP, TIMES_4);
     __ Lw(out, out, 0);
     __ MaybeUnpoisonHeapReference(out);
@@ -2661,8 +2653,7 @@
 
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+    codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     __ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 9a9ae71..510040b 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2267,7 +2267,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   Mips64Assembler* assembler = GetAssembler();
   InstructionCodeGeneratorMIPS64* icodegen =
@@ -2275,22 +2275,18 @@
 
   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   InvokeRuntimeCallingConvention calling_convention;
+  GpuRegister argument = calling_convention.GetRegisterAt(0);
   if (invoke->InputAt(0)->IsConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ LoadConst64(out, address);
+      codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      uint32_t address =
-          dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+      codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2299,22 +2295,18 @@
       icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
     }
   } else {
+    DCHECK(locations->CanCall());
     GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
     Mips64Label allocate, done;
-    int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
 
-    // Is (info.low <= in) && (in <= info.high)?
     __ Addiu32(out, in, -info.low);
-    // As unsigned quantities is out < (info.high - info.low + 1)?
-    __ LoadConst32(AT, count);
-    // Branch if out >= (info.high - info.low + 1).
-    // This means that "in" is outside of the range [info.low, info.high].
+    // As unsigned quantities is out < info.length ?
+    __ LoadConst32(AT, info.length);
+    // Branch if out >= info.length . This means that "in" is outside of the valid range.
     __ Bgeuc(out, AT, &allocate);
 
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    __ LoadConst64(TMP, data_offset + address);
+    codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
     __ Dlsa(out, out, TMP, TIMES_4);
     __ Lwu(out, out, 0);
     __ MaybeUnpoisonHeapReference(out);
@@ -2322,8 +2314,7 @@
 
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+    codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     __ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f84a33b..645ca49 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2851,57 +2851,76 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+  DCHECK(invoke->IsInvokeStaticOrDirect());
   InvokeRuntimeCallingConvention calling_convention;
   IntrinsicVisitor::ComputeIntegerValueOfLocations(
       invoke,
       codegen_,
       Location::RegisterLocation(EAX),
       Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+
+  LocationSummary* locations = invoke->GetLocations();
+  if (locations != nullptr) {
+    HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+    if (invoke_static_or_direct->HasSpecialInput() &&
+        invoke->InputAt(invoke_static_or_direct->GetSpecialInputIndex())
+            ->IsX86ComputeBaseMethodAddress()) {
+      locations->SetInAt(invoke_static_or_direct->GetSpecialInputIndex(),
+                         Location::RequiresRegister());
+    }
+  }
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  DCHECK(invoke->IsInvokeStaticOrDirect());
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   X86Assembler* assembler = GetAssembler();
 
   Register out = locations->Out().AsRegister<Register>();
   InvokeRuntimeCallingConvention calling_convention;
+  Register argument = calling_convention.GetRegisterAt(0);
   if (invoke->InputAt(0)->IsConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ movl(out, Immediate(address));
+      codegen_->LoadBootImageAddress(
+          out, info.value_boot_image_offset, invoke->AsInvokeStaticOrDirect());
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+      codegen_->LoadBootImageAddress(
+          argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ movl(Address(out, info.value_offset), Immediate(value));
     }
   } else {
+    DCHECK(locations->CanCall());
     Register in = locations->InAt(0).AsRegister<Register>();
     // Check bounds of our cache.
     __ leal(out, Address(in, -info.low));
-    __ cmpl(out, Immediate(info.high - info.low + 1));
+    __ cmpl(out, Immediate(info.length));
     NearLabel allocate, done;
     __ j(kAboveEqual, &allocate);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    __ movl(out, Address(out, TIMES_4, data_offset + address));
+    constexpr size_t kElementSize = sizeof(mirror::HeapReference<mirror::Object>);
+    uint32_t mid_array_boot_image_offset =
+        info.array_data_boot_image_offset - info.low * kElementSize;
+    codegen_->LoadBootImageAddress(
+        out, mid_array_boot_image_offset, invoke->AsInvokeStaticOrDirect());
+    DCHECK_NE(out, in);
+    static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+                  "Check heap reference size.");
+    __ movl(out, Address(out, in, TIMES_4, 0));
     __ MaybeUnpoisonHeapReference(out);
     __ jmp(&done);
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+    codegen_->LoadBootImageAddress(
+        argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     __ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7627dc9..6d85f3a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2660,56 +2660,47 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+  IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
   LocationSummary* locations = invoke->GetLocations();
   X86_64Assembler* assembler = GetAssembler();
 
   CpuRegister out = locations->Out().AsRegister<CpuRegister>();
   InvokeRuntimeCallingConvention calling_convention;
-  if (invoke->InputAt(0)->IsConstant()) {
+  CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
+  if (invoke->InputAt(0)->IsIntConstant()) {
     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (value >= info.low && value <= info.high) {
+    if (info.value_boot_image_offset != 0u) {
       // Just embed the j.l.Integer in the code.
-      ScopedObjectAccess soa(Thread::Current());
-      mirror::Object* boxed = info.cache->Get(value + (-info.low));
-      DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
-      __ movl(out, Immediate(static_cast<int32_t>(address)));
+      codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
     } else {
+      DCHECK(locations->CanCall());
       // Allocate and initialize a new j.l.Integer.
       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
       // JIT object table.
-      CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
-      uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-      __ movl(argument, Immediate(static_cast<int32_t>(address)));
+      codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
       __ movl(Address(out, info.value_offset), Immediate(value));
     }
   } else {
+    DCHECK(locations->CanCall());
     CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
     // Check bounds of our cache.
     __ leal(out, Address(in, -info.low));
-    __ cmpl(out, Immediate(info.high - info.low + 1));
+    __ cmpl(out, Immediate(info.length));
     NearLabel allocate, done;
     __ j(kAboveEqual, &allocate);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
-    uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-    uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
-    if (data_offset + address <= std::numeric_limits<int32_t>::max()) {
-      __ movl(out, Address(out, TIMES_4, data_offset + address));
-    } else {
-      CpuRegister temp = CpuRegister(calling_convention.GetRegisterAt(0));
-      __ movl(temp, Immediate(static_cast<int32_t>(data_offset + address)));
-      __ movl(out, Address(temp, out, TIMES_4, 0));
-    }
+    DCHECK_NE(out.AsRegister(), argument.AsRegister());
+    codegen_->LoadBootImageAddress(argument, info.array_data_boot_image_offset);
+    static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+                  "Check heap reference size.");
+    __ movl(out, Address(argument, out, TIMES_4, 0));
     __ MaybeUnpoisonHeapReference(out);
     __ jmp(&done);
     __ Bind(&allocate);
     // Otherwise allocate and initialize a new j.l.Integer.
-    CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
-    address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
-    __ movl(argument, Immediate(static_cast<int32_t>(address)));
+    codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
     __ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 9049457..05ec765 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -193,18 +193,19 @@
   }
 
   void HandleInvoke(HInvoke* invoke) {
-    // If this is an invoke-static/-direct with PC-relative dex cache array
-    // addressing, we need the PC-relative address base.
     HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
-    // We can't add a pointer to the constant area if we already have a current
-    // method pointer. This may arise when sharpening doesn't remove the current
-    // method pointer from the invoke.
-    if (invoke_static_or_direct != nullptr &&
-        invoke_static_or_direct->HasCurrentMethodInput()) {
+
+    // We can't add the method address if we already have a current method pointer.
+    // This may arise when sharpening doesn't remove the current method pointer from the invoke.
+    if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasCurrentMethodInput()) {
+      // Note: This happens only for recursive calls (including compiling an intrinsic
+      // by faking a call to itself; we use kRuntimeCall for this case).
       DCHECK(!invoke_static_or_direct->HasPcRelativeMethodLoadKind());
       return;
     }
 
+    // If this is an invoke-static/-direct with PC-relative addressing (within boot image
+    // or using .bss or .data.bimg.rel.ro), we need the PC-relative address base.
     bool base_added = false;
     if (invoke_static_or_direct != nullptr &&
         invoke_static_or_direct->HasPcRelativeMethodLoadKind() &&
@@ -224,7 +225,6 @@
       }
     }
 
-    // These intrinsics need the constant area.
     switch (invoke->GetIntrinsic()) {
       case Intrinsics::kMathAbsDouble:
       case Intrinsics::kMathAbsFloat:
@@ -235,7 +235,15 @@
         LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
                       "to IR nodes by instruction simplifier";
         UNREACHABLE();
+      case Intrinsics::kIntegerValueOf:
+        // This intrinsic can be call free if it loads the address of the boot image object.
+        // If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
+        if (!codegen_->GetCompilerOptions().GetCompilePic()) {
+          break;
+        }
+        FALLTHROUGH_INTENDED;
       case Intrinsics::kMathRoundFloat:
+        // This intrinsic needs the constant area.
         if (!base_added) {
           DCHECK(invoke_static_or_direct != nullptr);
           DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index fa7ad82..42e6498 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1183,7 +1183,7 @@
 void ColoringIteration::BuildInterferenceGraph(
     const ScopedArenaVector<LiveInterval*>& intervals,
     const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
-  DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+  DCHECK(interval_node_map_.empty() && prunable_nodes_.empty());
   // Build the interference graph efficiently by ordering range endpoints
   // by position and doing a linear sweep to find interferences. (That is, we
   // jump from endpoint to endpoint, maintaining a set of intervals live at each
@@ -1208,7 +1208,7 @@
       if (range != nullptr) {
         InterferenceNode* node =
             new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
-        interval_node_map_.Insert(std::make_pair(sibling, node));
+        interval_node_map_.insert(std::make_pair(sibling, node));
 
         if (sibling->HasRegister()) {
           // Fixed nodes should alias the canonical node for the corresponding register.
@@ -1303,7 +1303,7 @@
     // Coalesce siblings.
     LiveInterval* next_sibling = interval->GetNextSibling();
     if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
-      auto it = interval_node_map_.Find(next_sibling);
+      auto it = interval_node_map_.find(next_sibling);
       if (it != interval_node_map_.end()) {
         InterferenceNode* sibling_node = it->second;
         CreateCoalesceOpportunity(node,
@@ -1318,7 +1318,7 @@
     if (parent->HasRegister()
         && parent->GetNextSibling() == interval
         && parent->GetEnd() == interval->GetStart()) {
-      auto it = interval_node_map_.Find(parent);
+      auto it = interval_node_map_.find(parent);
       if (it != interval_node_map_.end()) {
         InterferenceNode* parent_node = it->second;
         CreateCoalesceOpportunity(node,
@@ -1341,7 +1341,7 @@
         size_t position = predecessor->GetLifetimeEnd() - 1;
         LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
         if (existing != nullptr) {
-          auto it = interval_node_map_.Find(existing);
+          auto it = interval_node_map_.find(existing);
           if (it != interval_node_map_.end()) {
             InterferenceNode* existing_node = it->second;
             CreateCoalesceOpportunity(node,
@@ -1364,7 +1364,7 @@
         size_t position = predecessors[i]->GetLifetimeEnd() - 1;
         LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
 
-        auto it = interval_node_map_.Find(input_interval);
+        auto it = interval_node_map_.find(input_interval);
         if (it != interval_node_map_.end()) {
           InterferenceNode* input_node = it->second;
           CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
@@ -1380,7 +1380,7 @@
             = defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
         // TODO: Could we consider lifetime holes here?
         if (input_interval->GetEnd() == interval->GetStart()) {
-          auto it = interval_node_map_.Find(input_interval);
+          auto it = interval_node_map_.find(input_interval);
           if (it != interval_node_map_.end()) {
             InterferenceNode* input_node = it->second;
             CreateCoalesceOpportunity(node,
@@ -1407,7 +1407,7 @@
           LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
           if (input_interval != nullptr &&
               input_interval->HasHighInterval() == interval->HasHighInterval()) {
-            auto it = interval_node_map_.Find(input_interval);
+            auto it = interval_node_map_.find(input_interval);
             if (it != interval_node_map_.end()) {
               InterferenceNode* input_node = it->second;
               CreateCoalesceOpportunity(node,
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 8e98f19..c7683e0 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -262,14 +262,14 @@
     std::unique_ptr<SchedulingNode> node(
         new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
     SchedulingNode* result = node.get();
-    nodes_map_.Insert(std::make_pair(instr, std::move(node)));
+    nodes_map_.insert(std::make_pair(instr, std::move(node)));
     contains_scheduling_barrier_ |= is_scheduling_barrier;
     AddDependencies(instr, is_scheduling_barrier);
     return result;
   }
 
   void Clear() {
-    nodes_map_.Clear();
+    nodes_map_.clear();
     contains_scheduling_barrier_ = false;
   }
 
@@ -278,7 +278,7 @@
   }
 
   SchedulingNode* GetNode(const HInstruction* instr) const {
-    auto it = nodes_map_.Find(instr);
+    auto it = nodes_map_.find(instr);
     if (it == nodes_map_.end()) {
       return nullptr;
     } else {
@@ -294,7 +294,7 @@
   bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
 
   size_t Size() const {
-    return nodes_map_.Size();
+    return nodes_map_.size();
   }
 
   // Dump the scheduling graph, in dot file format, appending it to the file
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index e8b3330..57f47af 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -301,16 +301,16 @@
     }
   }
 
-  size_t bit_offset = 0;
-  stack_maps_.Encode(&out_, &bit_offset);
-  register_masks_.Encode(&out_, &bit_offset);
-  stack_masks_.Encode(&out_, &bit_offset);
-  invoke_infos_.Encode(&out_, &bit_offset);
-  inline_infos_.Encode(&out_, &bit_offset);
-  dex_register_masks_.Encode(&out_, &bit_offset);
-  dex_register_maps_.Encode(&out_, &bit_offset);
-  dex_register_catalog_.Encode(&out_, &bit_offset);
-  EncodeVarintBits(&out_, &bit_offset, num_dex_registers_);
+  BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_);
+  stack_maps_.Encode(out);
+  register_masks_.Encode(out);
+  stack_masks_.Encode(out);
+  invoke_infos_.Encode(out);
+  inline_infos_.Encode(out);
+  dex_register_masks_.Encode(out);
+  dex_register_maps_.Encode(out);
+  dex_register_catalog_.Encode(out);
+  EncodeVarintBits(out, num_dex_registers_);
 
   return UnsignedLeb128Size(out_.size()) +  out_.size();
 }
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index 1b43618..878967c 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -72,12 +72,12 @@
 
 // Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
 static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
-  if (set1->Size() != set2->Size()) {
+  if (set1->size() != set2->size()) {
     return false;
   }
 
   for (auto e : *set1) {
-    if (set2->Find(e) == set2->end()) {
+    if (set2->find(e) == set2->end()) {
       return false;
     }
   }
@@ -472,8 +472,8 @@
         continue;
       }
 
-      auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
-      auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+      auto orig_redir = remap_orig_internal_->find(HEdge(orig_block_id, orig_succ_id));
+      auto copy_redir = remap_copy_internal_->find(HEdge(orig_block_id, orig_succ_id));
 
       // Due to construction all successors of copied block were set to original.
       if (copy_redir != remap_copy_internal_->end()) {
@@ -864,9 +864,9 @@
                           EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
                           EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
 
-  remap_orig_internal.Clear();
-  remap_copy_internal.Clear();
-  remap_incoming.Clear();
+  remap_orig_internal.clear();
+  remap_copy_internal.clear();
+  remap_incoming.clear();
 
   // Check whether remapping info corresponds to loop peeling.
   CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
@@ -1022,16 +1022,16 @@
   for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
     HEdge e = HEdge(back_edge_block, loop_header);
     if (to_unroll) {
-      remap_orig_internal->Insert(e);
-      remap_copy_internal->Insert(e);
+      remap_orig_internal->insert(e);
+      remap_copy_internal->insert(e);
     } else {
-      remap_copy_internal->Insert(e);
+      remap_copy_internal->insert(e);
     }
   }
 
   // Set up remap_incoming edges set.
   if (!to_unroll) {
-    remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+    remap_incoming->insert(HEdge(loop_info->GetPreHeader(), loop_header));
   }
 }
 
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index df2e517..6f3bcda 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -708,8 +708,8 @@
   orig_bb_set.SetBit(preheader->GetBlockId());
 
   // Adjust incoming edges.
-  remap_incoming.Clear();
-  remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+  remap_incoming.clear();
+  remap_incoming.insert(HEdge(preheader->GetSinglePredecessor(), preheader));
 
   HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
   HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
index c866504..4e892f2 100644
--- a/compiler/utils/dedupe_set-inl.h
+++ b/compiler/utils/dedupe_set-inl.h
@@ -71,13 +71,13 @@
   const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
     MutexLock lock(self, lock_);
     HashedKey<InKey> hashed_in_key(hash, &in_key);
-    auto it = keys_.Find(hashed_in_key);
+    auto it = keys_.find(hashed_in_key);
     if (it != keys_.end()) {
       DCHECK(it->Key() != nullptr);
       return it->Key();
     }
     const StoreKey* store_key = alloc_.Copy(in_key);
-    keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+    keys_.insert(HashedKey<StoreKey> { hash, store_key });
     return store_key;
   }
 
@@ -90,7 +90,7 @@
       // Note: The total_probe_distance will be updated with the current state.
       // It may have been higher before a re-hash.
       global_stats->total_probe_distance += keys_.TotalProbeDistance();
-      global_stats->total_size += keys_.Size();
+      global_stats->total_size += keys_.size();
       for (const HashedKey<StoreKey>& key : keys_) {
         auto it = stats.find(key.Hash());
         if (it == stats.end()) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 00c893a..6cd947c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -27,7 +27,6 @@
 #include <sstream>
 #include <string>
 #include <type_traits>
-#include <unordered_set>
 #include <vector>
 
 #if defined(__linux__) && defined(__arm__)
@@ -954,9 +953,9 @@
     compiler_options_->force_determinism_ = force_determinism_;
 
     if (passes_to_run_filename_ != nullptr) {
-      passes_to_run_.reset(ReadCommentedInputFromFile<std::vector<std::string>>(
+      passes_to_run_ = ReadCommentedInputFromFile<std::vector<std::string>>(
           passes_to_run_filename_,
-          nullptr));         // No post-processing.
+          nullptr);         // No post-processing.
       if (passes_to_run_.get() == nullptr) {
         Usage("Failed to read list of passes to run.");
       }
@@ -1493,13 +1492,12 @@
     // If we don't have a profile, treat it as an empty set of classes. b/77340429
     if (image_classes_ == nullptr) {
       // May be non-null when --image-classes is passed in, in that case avoid clearing the list.
-      image_classes_.reset(new std::unordered_set<std::string>());
+      image_classes_.reset(new HashSet<std::string>());
     }
     if (profile_compilation_info_ != nullptr) {
       // Filter out class path classes since we don't want to include these in the image.
       image_classes_.reset(
-          new std::unordered_set<std::string>(
-              profile_compilation_info_->GetClassDescriptors(dex_files_)));
+          new HashSet<std::string>(profile_compilation_info_->GetClassDescriptors(dex_files_)));
       VLOG(compiler) << "Loaded " << image_classes_->size()
                      << " image class descriptors from profile";
       if (VLOG_IS_ON(compiler)) {
@@ -1850,7 +1848,7 @@
                                      compiler_kind_,
                                      instruction_set_,
                                      instruction_set_features_.get(),
-                                     image_classes_.release(),
+                                     std::move(image_classes_),
                                      thread_count_,
                                      swap_fd_,
                                      profile_compilation_info_.get()));
@@ -2390,20 +2388,20 @@
         return false;
       }
     } else if (IsBootImage()) {
-      image_classes_.reset(new std::unordered_set<std::string>);
+      image_classes_.reset(new HashSet<std::string>);
     }
     return true;
   }
 
-  static std::unique_ptr<std::unordered_set<std::string>> ReadClasses(const char* zip_filename,
-                                                                      const char* classes_filename,
-                                                                      const char* tag) {
-    std::unique_ptr<std::unordered_set<std::string>> classes;
+  static std::unique_ptr<HashSet<std::string>> ReadClasses(const char* zip_filename,
+                                                           const char* classes_filename,
+                                                           const char* tag) {
+    std::unique_ptr<HashSet<std::string>> classes;
     std::string error_msg;
     if (zip_filename != nullptr) {
-      classes.reset(ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg));
+      classes = ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg);
     } else {
-      classes.reset(ReadImageClassesFromFile(classes_filename));
+      classes = ReadImageClassesFromFile(classes_filename);
     }
     if (classes == nullptr) {
       LOG(ERROR) << "Failed to create list of " << tag << " classes from '"
@@ -2414,9 +2412,9 @@
 
   bool PrepareDirtyObjects() {
     if (dirty_image_objects_filename_ != nullptr) {
-      dirty_image_objects_.reset(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+      dirty_image_objects_ = ReadCommentedInputFromFile<HashSet<std::string>>(
           dirty_image_objects_filename_,
-          nullptr));
+          nullptr);
       if (dirty_image_objects_ == nullptr) {
         LOG(ERROR) << "Failed to create list of dirty objects from '"
             << dirty_image_objects_filename_ << "'";
@@ -2678,29 +2676,28 @@
   }
 
   // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
-  static std::unordered_set<std::string>* ReadImageClassesFromFile(
+  static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromFile(
       const char* image_classes_filename) {
     std::function<std::string(const char*)> process = DotToDescriptor;
-    return ReadCommentedInputFromFile<std::unordered_set<std::string>>(image_classes_filename,
-                                                                       &process);
+    return ReadCommentedInputFromFile<HashSet<std::string>>(image_classes_filename, &process);
   }
 
   // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
-  static std::unordered_set<std::string>* ReadImageClassesFromZip(
+  static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromZip(
         const char* zip_filename,
         const char* image_classes_filename,
         std::string* error_msg) {
     std::function<std::string(const char*)> process = DotToDescriptor;
-    return ReadCommentedInputFromZip<std::unordered_set<std::string>>(zip_filename,
-                                                                      image_classes_filename,
-                                                                      &process,
-                                                                      error_msg);
+    return ReadCommentedInputFromZip<HashSet<std::string>>(zip_filename,
+                                                           image_classes_filename,
+                                                           &process,
+                                                           error_msg);
   }
 
   // Read lines from the given file, dropping comments and empty lines. Post-process each line with
   // the given function.
   template <typename T>
-  static T* ReadCommentedInputFromFile(
+  static std::unique_ptr<T> ReadCommentedInputFromFile(
       const char* input_filename, std::function<std::string(const char*)>* process) {
     std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
     if (input_file.get() == nullptr) {
@@ -2710,13 +2707,13 @@
     std::unique_ptr<T> result(
         ReadCommentedInputStream<T>(*input_file, process));
     input_file->close();
-    return result.release();
+    return result;
   }
 
   // Read lines from the given file from the given zip file, dropping comments and empty lines.
   // Post-process each line with the given function.
   template <typename T>
-  static T* ReadCommentedInputFromZip(
+  static std::unique_ptr<T> ReadCommentedInputFromZip(
       const char* zip_filename,
       const char* input_filename,
       std::function<std::string(const char*)>* process,
@@ -2748,7 +2745,7 @@
   // Read lines from the given stream, dropping comments and empty lines. Post-process each line
   // with the given function.
   template <typename T>
-  static T* ReadCommentedInputStream(
+  static std::unique_ptr<T> ReadCommentedInputStream(
       std::istream& in_stream,
       std::function<std::string(const char*)>* process) {
     std::unique_ptr<T> output(new T());
@@ -2765,7 +2762,7 @@
         output->insert(output->end(), dot);
       }
     }
-    return output.release();
+    return output;
   }
 
   void LogCompletionTime() {
@@ -2854,10 +2851,8 @@
   ImageHeader::StorageMode image_storage_mode_;
   const char* passes_to_run_filename_;
   const char* dirty_image_objects_filename_;
-  std::unique_ptr<std::unordered_set<std::string>> image_classes_;
-  std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
-  std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
-  std::unique_ptr<std::unordered_set<std::string>> dirty_image_objects_;
+  std::unique_ptr<HashSet<std::string>> image_classes_;
+  std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
   std::unique_ptr<std::vector<std::string>> passes_to_run_;
   bool multi_image_;
   bool is_host_;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index a060fd2..ad44624 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1768,7 +1768,7 @@
     writer.Finish();
     ASSERT_EQ(apk_file.GetFile()->Flush(), 0);
   }
-  const std::string dex_location = apk_file.GetFilename();
+  const std::string& dex_location = apk_file.GetFilename();
   const std::string odex_location = GetOdexDir() + "/output.odex";
   GenerateOdexForTest(dex_location,
                       odex_location,
@@ -1974,7 +1974,7 @@
         << "Failed to find candidate code item with only one code unit in last instruction.";
   });
 
-  std::string dex_location = temp_dex.GetFilename();
+  const std::string& dex_location = temp_dex.GetFilename();
   std::string odex_location = GetOdexDir() + "/quickened.odex";
   std::string vdex_location = GetOdexDir() + "/quickened.vdex";
   std::unique_ptr<File> vdex_output(OS::CreateEmptyFile(vdex_location.c_str()));
@@ -2049,7 +2049,7 @@
     writer.Finish();
     ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0);
   }
-  const std::string dex_location = invalid_dex.GetFilename();
+  const std::string& dex_location = invalid_dex.GetFilename();
   const std::string odex_location = GetOdexDir() + "/output.odex";
   std::string error_msg;
   int status = GenerateOdexForTestWithStatus(
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.cc b/dex2oat/linker/arm/relative_patcher_arm_base.cc
index 7cb8ae5..a2ba339 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.cc
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.cc
@@ -251,7 +251,7 @@
       continue;
     }
     // Get the base name to use for the first occurrence of the thunk.
-    std::string base_name = data.GetDebugName();
+    const std::string& base_name = data.GetDebugName();
     for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
       debug::MethodDebugInfo info = {};
       if (i == 0u) {
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index f0daf69..12ecd3a 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -27,6 +27,7 @@
 
 #include "art_method-inl.h"
 #include "base/file_utils.h"
+#include "base/hash_set.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "class_linker-inl.h"
@@ -93,8 +94,8 @@
     options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
   }
 
-  std::unordered_set<std::string>* GetImageClasses() OVERRIDE {
-    return new std::unordered_set<std::string>(image_classes_);
+  std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+    return std::make_unique<HashSet<std::string>>(image_classes_);
   }
 
   ArtMethod* FindCopiedMethod(ArtMethod* origin, ObjPtr<mirror::Class> klass)
@@ -110,7 +111,7 @@
   }
 
  private:
-  std::unordered_set<std::string> image_classes_;
+  HashSet<std::string> image_classes_;
 };
 
 inline CompilationHelper::~CompilationHelper() {
@@ -426,7 +427,7 @@
   }
 
   ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
-  std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
+  HashSet<std::string> image_classes(*compiler_driver_->GetImageClasses());
 
   // Need to delete the compiler since it has worker threads which are attached to runtime.
   compiler_driver_.reset();
@@ -496,7 +497,7 @@
       ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
       EXPECT_TRUE(klass != nullptr) << descriptor;
       uint8_t* raw_klass = reinterpret_cast<uint8_t*>(klass.Ptr());
-      if (image_classes.find(descriptor) == image_classes.end()) {
+      if (image_classes.find(StringPiece(descriptor)) == image_classes.end()) {
         EXPECT_TRUE(raw_klass >= image_end || raw_klass < image_begin) << descriptor;
       } else {
         // Image classes should be located inside the image.
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index da69b83..a61ad8f 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -52,7 +52,6 @@
 #include "handle_scope-inl.h"
 #include "image.h"
 #include "imt_conflict_table.h"
-#include "subtype_check.h"
 #include "jni/jni_internal.h"
 #include "linear_alloc.h"
 #include "lock_word.h"
@@ -71,8 +70,10 @@
 #include "oat.h"
 #include "oat_file.h"
 #include "oat_file_manager.h"
+#include "optimizing/intrinsic_objects.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
+#include "subtype_check.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "well_known_classes.h"
 
@@ -1332,47 +1333,6 @@
   return dex_caches;
 }
 
-static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
-                                                                      ClassLinker* class_linker)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
-      self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
-  if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
-    return nullptr;
-  }
-  ArtField* cache_field =
-      integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
-  CHECK(cache_field != nullptr);
-  ObjPtr<ObjectArray<mirror::Object>> integer_cache =
-      ObjPtr<ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(integer_cache_class));
-  CHECK(integer_cache != nullptr);
-  return integer_cache;
-}
-
-static ObjPtr<mirror::ObjectArray<mirror::Object>> CollectBootImageLiveObjects(
-    Thread* self,
-    ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // The objects used for the Integer.valueOf() intrinsic must remain live even if references
-  // to them are removed using reflection. Image roots are not accessible through reflection,
-  // so the array we construct here shall keep them alive.
-  StackHandleScope<1> hs(self);
-  Handle<ObjectArray<mirror::Object>> integer_cache =
-      hs.NewHandle(LookupIntegerCache(self, class_linker));
-  size_t live_objects_size =
-      (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
-  ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects = ObjectArray<Object>::Alloc(
-      self, GetClassRoot<ObjectArray<Object>>(class_linker), live_objects_size);
-  int32_t index = 0;
-  if (integer_cache != nullptr) {
-    live_objects->Set(index++, integer_cache.Get());
-    for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
-      live_objects->Set(index++, integer_cache->Get(i));
-    }
-  }
-  CHECK_EQ(index, live_objects->GetLength());
-  return live_objects;
-}
-
 ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
@@ -1397,7 +1357,7 @@
                           runtime->GetPreAllocatedNoClassDefFoundError());
   if (!compile_app_image_) {
     ObjPtr<ObjectArray<Object>> boot_image_live_objects =
-        CollectBootImageLiveObjects(self, class_linker);
+        IntrinsicObjects::AllocateBootImageLiveObjects(self, class_linker);
     image_roots->Set<false>(ImageHeader::kBootImageLiveObjects, boot_image_live_objects);
   }
   for (int32_t i = 0, num = ImageHeader::NumberOfImageRoots(compile_app_image_); i != num; ++i) {
@@ -2869,7 +2829,7 @@
     ImageHeader::StorageMode image_storage_mode,
     const std::vector<const char*>& oat_filenames,
     const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
-    const std::unordered_set<std::string>* dirty_image_objects)
+    const HashSet<std::string>* dirty_image_objects)
     : compiler_driver_(compiler_driver),
       global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
       image_objects_offset_begin_(0),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index c282a2a..2fcf5fd 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -31,6 +31,7 @@
 #include "base/bit_utils.h"
 #include "base/dchecked_vector.h"
 #include "base/enums.h"
+#include "base/hash_set.h"
 #include "base/length_prefixed_array.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
@@ -80,7 +81,7 @@
               ImageHeader::StorageMode image_storage_mode,
               const std::vector<const char*>& oat_filenames,
               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
-              const std::unordered_set<std::string>* dirty_image_objects);
+              const HashSet<std::string>* dirty_image_objects);
 
   bool PrepareImageAddressSpace(TimingLogger* timings);
 
@@ -644,7 +645,7 @@
   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
 
   // Set of objects known to be dirty in the image. Can be nullptr if there are none.
-  const std::unordered_set<std::string>* dirty_image_objects_;
+  const HashSet<std::string>* dirty_image_objects_;
 
   class ComputeLazyFieldsForClassesVisitor;
   class FixupClassVisitor;
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index 4b142a8..e7d5ed9 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -22,7 +22,7 @@
 #include <memory>  // For unique_ptr
 #include <unordered_map>
 
-#include "base/utils.h"
+#include "base/data_hash.h"
 #include "dex_writer.h"
 
 namespace art {
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index f148b94..2b1352d 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -468,7 +468,7 @@
     }
 
     std::vector<std::string> test_files = { dex_file, profile_file, output_dex, second_output_dex };
-    for (auto test_file : test_files) {
+    for (const std::string& test_file : test_files) {
       if (!UnlinkFile(test_file)) {
         return false;
       }
@@ -501,7 +501,7 @@
     }
 
     std::vector<std::string> dex_files = { input_dex, output_dex };
-    for (auto dex_file : dex_files) {
+    for (const std::string& dex_file : dex_files) {
       if (!UnlinkFile(dex_file)) {
         return false;
       }
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index ddb8fe1..dea92e0 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -338,7 +338,7 @@
   ImgObjectVisitor(ComputeDirtyFunc dirty_func,
                    const uint8_t* begin_image_ptr,
                    const std::set<size_t>& dirty_pages) :
-    dirty_func_(dirty_func),
+    dirty_func_(std::move(dirty_func)),
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
 
@@ -356,7 +356,7 @@
   }
 
  private:
-  ComputeDirtyFunc dirty_func_;
+  const ComputeDirtyFunc dirty_func_;
   const uint8_t* begin_image_ptr_;
   const std::set<size_t>& dirty_pages_;
 };
@@ -649,7 +649,7 @@
   ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,
                       const uint8_t* begin_image_ptr,
                       const std::set<size_t>& dirty_pages) :
-    dirty_func_(dirty_func),
+    dirty_func_(std::move(dirty_func)),
     begin_image_ptr_(begin_image_ptr),
     dirty_pages_(dirty_pages) { }
   virtual ~ImgArtMethodVisitor() OVERRIDE { }
@@ -658,7 +658,7 @@
   }
 
  private:
-  ComputeDirtyFunc dirty_func_;
+  const ComputeDirtyFunc dirty_func_;
   const uint8_t* begin_image_ptr_;
   const std::set<size_t>& dirty_pages_;
 };
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index adf0ad6..4ee48da 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -31,6 +31,8 @@
         "base/malloc_arena_pool.cc",
         "base/memory_region.cc",
         "base/mem_map.cc",
+        // "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
+        "base/mem_map_unix.cc",
         "base/os_linux.cc",
         "base/runtime_debug.cc",
         "base/safe_copy.cc",
diff --git a/libartbase/base/arena_containers.h b/libartbase/base/arena_containers.h
index bd57fb1..41b3bb9 100644
--- a/libartbase/base/arena_containers.h
+++ b/libartbase/base/arena_containers.h
@@ -70,15 +70,15 @@
 
 template <typename T,
           typename EmptyFn = DefaultEmptyFn<T>,
-          typename HashFn = std::hash<T>,
-          typename Pred = std::equal_to<T>>
+          typename HashFn = DefaultHashFn<T>,
+          typename Pred = DefaultPred<T>>
 using ArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ArenaAllocatorAdapter<T>>;
 
 template <typename Key,
           typename Value,
           typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
-          typename HashFn = std::hash<Key>,
-          typename Pred = std::equal_to<Key>>
+          typename HashFn = DefaultHashFn<Key>,
+          typename Pred = DefaultPred<Key>>
 using ArenaHashMap = HashMap<Key,
                              Value,
                              EmptyFn,
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index b4764fd..6e491b0 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -99,13 +99,6 @@
     return value & mask;
   }
 
-  // Load bits starting at given `bit_offset`, and advance the `bit_offset`.
-  ALWAYS_INLINE uint32_t LoadBitsAndAdvance(size_t* bit_offset, size_t bit_length) const {
-    uint32_t result = LoadBits(*bit_offset, bit_length);
-    *bit_offset += bit_length;
-    return result;
-  }
-
   // Store `bit_length` bits in `data` starting at given `bit_offset`.
   // The least significant bit is stored in the smallest memory offset.
   ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
@@ -132,12 +125,6 @@
     DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
   }
 
-  // Store bits starting at given `bit_offset`, and advance the `bit_offset`.
-  ALWAYS_INLINE void StoreBitsAndAdvance(size_t* bit_offset, uint32_t value, size_t bit_length) {
-    StoreBits(*bit_offset, value, bit_length);
-    *bit_offset += bit_length;
-  }
-
   // Store bits from other bit region.
   ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) {
     DCHECK_LE(bit_offset, bit_size_);
@@ -178,6 +165,62 @@
   size_t bit_size_ = 0;
 };
 
+class BitMemoryReader {
+ public:
+  explicit BitMemoryReader(BitMemoryRegion region, size_t bit_offset = 0)
+      : region_(region), bit_offset_(bit_offset) { }
+
+  size_t GetBitOffset() const { return bit_offset_; }
+
+  ALWAYS_INLINE BitMemoryRegion Skip(size_t bit_length) {
+    BitMemoryRegion result = region_.Subregion(bit_offset_, bit_length);
+    bit_offset_ += bit_length;
+    return result;
+  }
+
+  ALWAYS_INLINE uint32_t ReadBits(size_t bit_length) {
+    uint32_t result = region_.LoadBits(bit_offset_, bit_length);
+    bit_offset_ += bit_length;
+    return result;
+  }
+
+ private:
+  BitMemoryRegion region_;
+  size_t bit_offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(BitMemoryReader);
+};
+
+template<typename Vector>
+class BitMemoryWriter {
+ public:
+  explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
+      : out_(out), bit_offset_(bit_offset) { }
+
+  size_t GetBitOffset() const { return bit_offset_; }
+
+  ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
+    out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
+    BitMemoryRegion region(MemoryRegion(out_->data(), out_->size()), bit_offset_, bit_length);
+    bit_offset_ += bit_length;
+    return region;
+  }
+
+  ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
+    Allocate(bit_length).StoreBits(0, value, bit_length);
+  }
+
+  BitMemoryRegion GetWrittenRegion() const {
+    return BitMemoryRegion(MemoryRegion(out_->data(), out_->size()), 0, bit_offset_);
+  }
+
+ private:
+  Vector* out_;
+  size_t bit_offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
+};
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index ef2cf21..418d7c4 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -39,28 +39,24 @@
 // The first four bits determine the variable length of the encoded integer:
 //   Values 0..11 represent the result as-is, with no further following bits.
 //   Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
-ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryRegion region, size_t* bit_offset) {
-  uint32_t x = region.LoadBitsAndAdvance(bit_offset, kVarintHeaderBits);
+ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryReader& reader) {
+  uint32_t x = reader.ReadBits(kVarintHeaderBits);
   if (x > kVarintSmallValue) {
-    x = region.LoadBitsAndAdvance(bit_offset, (x - kVarintSmallValue) * kBitsPerByte);
+    x = reader.ReadBits((x - kVarintSmallValue) * kBitsPerByte);
   }
   return x;
 }
 
 // Store variable-length bit-packed integer from `data` starting at `bit_offset`.
 template<typename Vector>
-ALWAYS_INLINE static inline void EncodeVarintBits(Vector* out, size_t* bit_offset, uint32_t value) {
+ALWAYS_INLINE static inline void EncodeVarintBits(BitMemoryWriter<Vector>& out, uint32_t value) {
   if (value <= kVarintSmallValue) {
-    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits));
-    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
-    region.StoreBitsAndAdvance(bit_offset, value, kVarintHeaderBits);
+    out.WriteBits(value, kVarintHeaderBits);
   } else {
     uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
-    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits + num_bits));
-    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
     uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
-    region.StoreBitsAndAdvance(bit_offset, header, kVarintHeaderBits);
-    region.StoreBitsAndAdvance(bit_offset, value, num_bits);
+    out.WriteBits(header, kVarintHeaderBits);
+    out.WriteBits(value, num_bits);
   }
 }
 
@@ -74,26 +70,25 @@
   static constexpr uint32_t kValueBias = kNoValue;  // Bias so that -1 is encoded as 0.
 
   BitTableBase() {}
-  BitTableBase(void* data, size_t size, size_t* bit_offset) {
-    Decode(BitMemoryRegion(MemoryRegion(data, size)), bit_offset);
+  explicit BitTableBase(BitMemoryReader& reader) {
+    Decode(reader);
   }
 
-  ALWAYS_INLINE void Decode(BitMemoryRegion region, size_t* bit_offset) {
+  ALWAYS_INLINE void Decode(BitMemoryReader& reader) {
     // Decode row count and column sizes from the table header.
-    size_t initial_bit_offset = *bit_offset;
-    num_rows_ = DecodeVarintBits(region, bit_offset);
+    size_t initial_bit_offset = reader.GetBitOffset();
+    num_rows_ = DecodeVarintBits(reader);
     if (num_rows_ != 0) {
       column_offset_[0] = 0;
       for (uint32_t i = 0; i < kNumColumns; i++) {
-        size_t column_end = column_offset_[i] + DecodeVarintBits(region, bit_offset);
+        size_t column_end = column_offset_[i] + DecodeVarintBits(reader);
         column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
       }
     }
-    header_bit_size_ = *bit_offset - initial_bit_offset;
+    header_bit_size_ = reader.GetBitOffset() - initial_bit_offset;
 
     // Record the region which contains the table data and skip past it.
-    table_data_ = region.Subregion(*bit_offset, num_rows_ * NumRowBits());
-    *bit_offset += table_data_.size_in_bits();
+    table_data_ = reader.Skip(num_rows_ * NumRowBits());
   }
 
   ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
@@ -337,25 +332,22 @@
 
   // Encode the stored data into a BitTable.
   template<typename Vector>
-  void Encode(Vector* out, size_t* bit_offset) const {
-    size_t initial_bit_offset = *bit_offset;
+  void Encode(BitMemoryWriter<Vector>& out) const {
+    size_t initial_bit_offset = out.GetBitOffset();
 
     std::array<uint32_t, kNumColumns> column_bits;
     Measure(&column_bits);
-    EncodeVarintBits(out, bit_offset, size());
+    EncodeVarintBits(out, size());
     if (size() != 0) {
       // Write table header.
       for (uint32_t c = 0; c < kNumColumns; c++) {
-        EncodeVarintBits(out, bit_offset, column_bits[c]);
+        EncodeVarintBits(out, column_bits[c]);
       }
 
       // Write table data.
-      uint32_t row_bits = std::accumulate(column_bits.begin(), column_bits.end(), 0u);
-      out->resize(BitsToBytesRoundUp(*bit_offset + row_bits * size()));
-      BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
       for (uint32_t r = 0; r < size(); r++) {
         for (uint32_t c = 0; c < kNumColumns; c++) {
-          region.StoreBitsAndAdvance(bit_offset, rows_[r][c] - kValueBias, column_bits[c]);
+          out.WriteBits(rows_[r][c] - kValueBias, column_bits[c]);
         }
       }
     }
@@ -363,8 +355,8 @@
     // Verify the written data.
     if (kIsDebugBuild) {
       BitTableBase<kNumColumns> table;
-      BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
-      table.Decode(region, &initial_bit_offset);
+      BitMemoryReader reader(out.GetWrittenRegion(), initial_bit_offset);
+      table.Decode(reader);
       DCHECK_EQ(size(), table.NumRows());
       for (uint32_t c = 0; c < kNumColumns; c++) {
         DCHECK_EQ(column_bits[c], table.NumColumnBits(c));
@@ -431,28 +423,26 @@
 
   // Encode the stored data into a BitTable.
   template<typename Vector>
-  void Encode(Vector* out, size_t* bit_offset) const {
-    size_t initial_bit_offset = *bit_offset;
+  void Encode(BitMemoryWriter<Vector>& out) const {
+    size_t initial_bit_offset = out.GetBitOffset();
 
-    EncodeVarintBits(out, bit_offset, size());
+    EncodeVarintBits(out, size());
     if (size() != 0) {
-      EncodeVarintBits(out, bit_offset, max_num_bits_);
+      EncodeVarintBits(out, max_num_bits_);
 
       // Write table data.
-      out->resize(BitsToBytesRoundUp(*bit_offset + max_num_bits_ * size()));
-      BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
       for (MemoryRegion row : rows_) {
         BitMemoryRegion src(row);
-        region.StoreBits(*bit_offset, src, std::min(max_num_bits_, src.size_in_bits()));
-        *bit_offset += max_num_bits_;
+        BitMemoryRegion dst = out.Allocate(max_num_bits_);
+        dst.StoreBits(/* bit_offset */ 0, src, std::min(max_num_bits_, src.size_in_bits()));
       }
     }
 
     // Verify the written data.
     if (kIsDebugBuild) {
       BitTableBase<1> table;
-      BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
-      table.Decode(region, &initial_bit_offset);
+      BitMemoryReader reader(out.GetWrittenRegion(), initial_bit_offset);
+      table.Decode(reader);
       DCHECK_EQ(size(), table.NumRows());
       DCHECK_EQ(max_num_bits_, table.NumColumnBits(0));
       for (uint32_t r = 0; r < size(); r++) {
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
index ee7cb3a..a694148 100644
--- a/libartbase/base/bit_table_test.cc
+++ b/libartbase/base/bit_table_test.cc
@@ -31,13 +31,12 @@
     uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
     for (uint32_t value : values) {
       std::vector<uint8_t> buffer;
-      size_t encode_bit_offset = start_bit_offset;
-      EncodeVarintBits(&buffer, &encode_bit_offset, value);
+      BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+      EncodeVarintBits(writer, value);
 
-      size_t decode_bit_offset = start_bit_offset;
-      BitMemoryRegion region(MemoryRegion(buffer.data(), buffer.size()));
-      uint32_t result = DecodeVarintBits(region, &decode_bit_offset);
-      EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+      BitMemoryReader reader(writer.GetWrittenRegion(), start_bit_offset);
+      uint32_t result = DecodeVarintBits(reader);
+      EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
       EXPECT_EQ(value, result);
     }
   }
@@ -49,13 +48,13 @@
   ScopedArenaAllocator allocator(&arena_stack);
 
   std::vector<uint8_t> buffer;
-  size_t encode_bit_offset = 0;
+  BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
   BitTableBuilderBase<1> builder(&allocator);
-  builder.Encode(&buffer, &encode_bit_offset);
+  builder.Encode(writer);
 
-  size_t decode_bit_offset = 0;
-  BitTableBase<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
-  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  BitMemoryReader reader(writer.GetWrittenRegion());
+  BitTableBase<1> table(reader);
+  EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
   EXPECT_EQ(0u, table.NumRows());
 }
 
@@ -66,17 +65,17 @@
 
   constexpr uint32_t kNoValue = -1;
   std::vector<uint8_t> buffer;
-  size_t encode_bit_offset = 0;
+  BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
   BitTableBuilderBase<1> builder(&allocator);
   builder.Add({42u});
   builder.Add({kNoValue});
   builder.Add({1000u});
   builder.Add({kNoValue});
-  builder.Encode(&buffer, &encode_bit_offset);
+  builder.Encode(writer);
 
-  size_t decode_bit_offset = 0;
-  BitTableBase<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
-  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  BitMemoryReader reader(writer.GetWrittenRegion());
+  BitTableBase<1> table(reader);
+  EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
   EXPECT_EQ(4u, table.NumRows());
   EXPECT_EQ(42u, table.Get(0));
   EXPECT_EQ(kNoValue, table.Get(1));
@@ -92,14 +91,14 @@
 
   for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
     std::vector<uint8_t> buffer;
-    size_t encode_bit_offset = start_bit_offset;
+    BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
     BitTableBuilderBase<1> builder(&allocator);
     builder.Add({42u});
-    builder.Encode(&buffer, &encode_bit_offset);
+    builder.Encode(writer);
 
-    size_t decode_bit_offset = start_bit_offset;
-    BitTableBase<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
-    EXPECT_EQ(encode_bit_offset, decode_bit_offset) << " start_bit_offset=" << start_bit_offset;
+    BitMemoryReader reader(writer.GetWrittenRegion(), start_bit_offset);
+    BitTableBase<1> table(reader);
+    EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
     EXPECT_EQ(1u, table.NumRows());
     EXPECT_EQ(42u, table.Get(0));
   }
@@ -112,15 +111,15 @@
 
   constexpr uint32_t kNoValue = -1;
   std::vector<uint8_t> buffer;
-  size_t encode_bit_offset = 0;
+  BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
   BitTableBuilderBase<4> builder(&allocator);
   builder.Add({42u, kNoValue, 0u, static_cast<uint32_t>(-2)});
   builder.Add({62u, kNoValue, 63u, static_cast<uint32_t>(-3)});
-  builder.Encode(&buffer, &encode_bit_offset);
+  builder.Encode(writer);
 
-  size_t decode_bit_offset = 0;
-  BitTableBase<4> table(buffer.data(), buffer.size(), &decode_bit_offset);
-  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  BitMemoryReader reader(writer.GetWrittenRegion());
+  BitTableBase<4> table(reader);
+  EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
   EXPECT_EQ(2u, table.NumRows());
   EXPECT_EQ(42u, table.Get(0, 0));
   EXPECT_EQ(kNoValue, table.Get(0, 1));
@@ -157,7 +156,7 @@
   ScopedArenaAllocator allocator(&arena_stack);
 
   std::vector<uint8_t> buffer;
-  size_t encode_bit_offset = 0;
+  BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
   const uint64_t value = 0xDEADBEEF0BADF00Dull;
   BitmapTableBuilder builder(&allocator);
   std::multimap<uint64_t, size_t> indicies;  // bitmap -> row.
@@ -165,12 +164,12 @@
     uint64_t bitmap = value & MaxInt<uint64_t>(bit_length);
     indicies.emplace(bitmap, builder.Dedup(&bitmap, MinimumBitsToStore(bitmap)));
   }
-  builder.Encode(&buffer, &encode_bit_offset);
+  builder.Encode(writer);
   EXPECT_EQ(1 + static_cast<uint32_t>(POPCOUNT(value)), builder.size());
 
-  size_t decode_bit_offset = 0;
-  BitTableBase<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
-  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  BitMemoryReader reader(writer.GetWrittenRegion());
+  BitTableBase<1> table(reader);
+  EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
   for (auto it : indicies) {
     uint64_t expected = it.first;
     BitMemoryRegion actual = table.GetBitMemoryRegion(it.second);
diff --git a/libartbase/base/data_hash.h b/libartbase/base/data_hash.h
new file mode 100644
index 0000000..5ad7779
--- /dev/null
+++ b/libartbase/base/data_hash.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_DATA_HASH_H_
+#define ART_LIBARTBASE_BASE_DATA_HASH_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+  size_t hash = 0x811c9dc5;
+  for (uint32_t i = 0; i < len; ++i) {
+    hash = (hash * 16777619) ^ data[i];
+  }
+  hash += hash << 13;
+  hash ^= hash >> 7;
+  hash += hash << 3;
+  hash ^= hash >> 17;
+  hash += hash << 5;
+  return hash;
+}
+
+class DataHash {
+ private:
+  static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+  template <class Container>
+  size_t operator()(const Container& array) const {
+    // Containers that provide the data() function use contiguous storage.
+    const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+    uint32_t len = sizeof(typename Container::value_type) * array.size();
+    if (kUseMurmur3Hash) {
+      static constexpr uint32_t c1 = 0xcc9e2d51;
+      static constexpr uint32_t c2 = 0x1b873593;
+      static constexpr uint32_t r1 = 15;
+      static constexpr uint32_t r2 = 13;
+      static constexpr uint32_t m = 5;
+      static constexpr uint32_t n = 0xe6546b64;
+
+      uint32_t hash = 0;
+
+      const int nblocks = len / 4;
+      typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+      const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+      int i;
+      for (i = 0; i < nblocks; i++) {
+        uint32_t k = blocks[i];
+        k *= c1;
+        k = (k << r1) | (k >> (32 - r1));
+        k *= c2;
+
+        hash ^= k;
+        hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+      }
+
+      const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+      uint32_t k1 = 0;
+
+      switch (len & 3) {
+        case 3:
+          k1 ^= tail[2] << 16;
+          FALLTHROUGH_INTENDED;
+        case 2:
+          k1 ^= tail[1] << 8;
+          FALLTHROUGH_INTENDED;
+        case 1:
+          k1 ^= tail[0];
+
+          k1 *= c1;
+          k1 = (k1 << r1) | (k1 >> (32 - r1));
+          k1 *= c2;
+          hash ^= k1;
+      }
+
+      hash ^= len;
+      hash ^= (hash >> 16);
+      hash *= 0x85ebca6b;
+      hash ^= (hash >> 13);
+      hash *= 0xc2b2ae35;
+      hash ^= (hash >> 16);
+
+      return hash;
+    } else {
+      return HashBytes(data, len);
+    }
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_DATA_HASH_H_
diff --git a/libartbase/base/fuchsia_compat.h b/libartbase/base/fuchsia_compat.h
new file mode 100644
index 0000000..018bac0
--- /dev/null
+++ b/libartbase/base/fuchsia_compat.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+#define ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+
+// stubs for features lacking in Fuchsia
+
+struct rlimit {
+  int rlim_cur;
+};
+
+#define RLIMIT_FSIZE (1)
+#define RLIM_INFINITY (-1)
+static int getrlimit(int resource, struct rlimit *rlim) {
+  LOG(FATAL) << "getrlimit not available for Fuchsia";
+}
+
+static int ashmem_create_region(const char *name, size_t size) {
+  LOG(FATAL) << "ashmem_create_region not available for Fuchsia";
+}
+
+#endif  // ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 39e0c50..cd0bf8f 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -74,7 +74,9 @@
 // ART_TARGET - Defined for target builds of ART.
 // ART_TARGET_LINUX - Defined for target Linux builds of ART.
 // ART_TARGET_ANDROID - Defined for target Android builds of ART.
-// Note: Either ART_TARGET_LINUX or ART_TARGET_ANDROID need to be set when ART_TARGET is set.
+// ART_TARGET_FUCHSIA - Defined for Fuchsia builds of ART.
+// Note: Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA
+//       need to be set when ART_TARGET is set.
 // Note: When ART_TARGET_LINUX is defined mem_map.h will not be using Ashmem for memory mappings
 // (usually only available on Android kernels).
 #if defined(ART_TARGET)
@@ -82,10 +84,16 @@
 static constexpr bool kIsTargetBuild = true;
 # if defined(ART_TARGET_LINUX)
 static constexpr bool kIsTargetLinux = true;
+static constexpr bool kIsTargetFuchsia = false;
 # elif defined(ART_TARGET_ANDROID)
 static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
+# elif defined(ART_TARGET_FUCHSIA)
+static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = true;
 # else
-# error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
+# error "Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA " \
+        "needs to be defined for target builds."
 # endif
 #else
 static constexpr bool kIsTargetBuild = false;
@@ -93,8 +101,11 @@
 # error "ART_TARGET_LINUX defined for host build."
 # elif defined(ART_TARGET_ANDROID)
 # error "ART_TARGET_ANDROID defined for host build."
+# elif defined(ART_TARGET_FUCHSIA)
+# error "ART_TARGET_FUCHSIA defined for host build."
 # else
 static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
 # endif
 #endif
 
diff --git a/libartbase/base/hash_map.h b/libartbase/base/hash_map.h
index 0d7198c..a3bb5b5 100644
--- a/libartbase/base/hash_map.h
+++ b/libartbase/base/hash_map.h
@@ -48,9 +48,12 @@
   Fn fn_;
 };
 
-template <class Key, class Value, class EmptyFn,
-    class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
-    class Alloc = std::allocator<std::pair<Key, Value>>>
+template <class Key,
+          class Value,
+          class EmptyFn,
+          class HashFn = DefaultHashFn<Key>,
+          class Pred = DefaultPred<Key>,
+          class Alloc = std::allocator<std::pair<Key, Value>>>
 class HashMap : public HashSet<std::pair<Key, Value>,
                                EmptyFn,
                                HashMapWrapper<HashFn>,
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 2f810ea..2b1a5eb 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -22,16 +22,94 @@
 #include <functional>
 #include <iterator>
 #include <memory>
+#include <string>
 #include <type_traits>
 #include <utility>
 
 #include <android-base/logging.h>
 
+#include "base/data_hash.h"
 #include "bit_utils.h"
 #include "macros.h"
 
 namespace art {
 
+template <class Elem, class HashSetType>
+class HashSetIterator : std::iterator<std::forward_iterator_tag, Elem> {
+ public:
+  HashSetIterator(const HashSetIterator&) = default;
+  HashSetIterator(HashSetIterator&&) = default;
+  HashSetIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {}
+
+  // Conversion from iterator to const_iterator.
+  template <class OtherElem,
+            class OtherHashSetType,
+            typename = typename std::enable_if<
+                std::is_same<Elem, const OtherElem>::value &&
+                std::is_same<HashSetType, const OtherHashSetType>::value>::type>
+  HashSetIterator(const HashSetIterator<OtherElem, OtherHashSetType>& other)
+      : index_(other.index_), hash_set_(other.hash_set_) {}
+
+  HashSetIterator& operator=(const HashSetIterator&) = default;
+  HashSetIterator& operator=(HashSetIterator&&) = default;
+
+  bool operator==(const HashSetIterator& other) const {
+    return hash_set_ == other.hash_set_ && this->index_ == other.index_;
+  }
+
+  bool operator!=(const HashSetIterator& other) const {
+    return !(*this == other);
+  }
+
+  HashSetIterator operator++() {  // Value after modification.
+    this->index_ = hash_set_->NextNonEmptySlot(index_);
+    return *this;
+  }
+
+  HashSetIterator operator++(int) {
+    HashSetIterator temp = *this;
+    ++*this;
+    return temp;
+  }
+
+  Elem& operator*() const {
+    DCHECK(!hash_set_->IsFreeSlot(this->index_));
+    return hash_set_->ElementForIndex(this->index_);
+  }
+
+  Elem* operator->() const {
+    return &**this;
+  }
+
+ private:
+  size_t index_;
+  HashSetType* hash_set_;
+
+  template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+  friend bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+                         const HashSetIterator<Elem2, HashSetType2>& rhs);
+  template <class T, class EmptyFn, class HashFn, class Pred, class Alloc> friend class HashSet;
+  template <class OtherElem, class OtherHashSetType> friend class HashSetIterator;
+};
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+                const HashSetIterator<Elem2, HashSetType2>& rhs) {
+  static_assert(
+      std::is_convertible<HashSetIterator<Elem1, HashSetType1>,
+                          HashSetIterator<Elem2, HashSetType2>>::value ||
+      std::is_convertible<HashSetIterator<Elem2, HashSetType2>,
+                          HashSetIterator<Elem1, HashSetType1>>::value, "Bad iterator types.");
+  DCHECK_EQ(lhs.hash_set_, rhs.hash_set_);
+  return lhs.index_ == rhs.index_;
+}
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator!=(const HashSetIterator<Elem1, HashSetType1>& lhs,
+                const HashSetIterator<Elem2, HashSetType2>& rhs) {
+  return !(lhs == rhs);
+}
+
 // Returns true if an item is empty.
 template <class T>
 class DefaultEmptyFn {
@@ -55,70 +133,35 @@
   }
 };
 
-// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
-// boxed. Uses linear probing to resolve collisions.
+template <class T>
+using DefaultHashFn = typename std::conditional<std::is_same<T, std::string>::value,
+                                                DataHash,
+                                                std::hash<T>>::type;
+
+struct DefaultStringEquals {
+  // Allow comparison with anything that can be compared to std::string, for example StringPiece.
+  template <typename T>
+  bool operator()(const std::string& lhs, const T& rhs) const {
+    return lhs == rhs;
+  }
+};
+
+template <class T>
+using DefaultPred = typename std::conditional<std::is_same<T, std::string>::value,
+                                              DefaultStringEquals,
+                                              std::equal_to<T>>::type;
+
+// Low memory version of a hash set, uses less memory than std::unordered_multiset since elements
+// aren't boxed. Uses linear probing to resolve collisions.
 // EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
 // TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
 // and more complicated.
-template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
-    class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+template <class T,
+          class EmptyFn = DefaultEmptyFn<T>,
+          class HashFn = DefaultHashFn<T>,
+          class Pred = DefaultPred<T>,
+          class Alloc = std::allocator<T>>
 class HashSet {
-  template <class Elem, class HashSetType>
-  class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
-   public:
-    BaseIterator(const BaseIterator&) = default;
-    BaseIterator(BaseIterator&&) = default;
-    BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
-    }
-    BaseIterator& operator=(const BaseIterator&) = default;
-    BaseIterator& operator=(BaseIterator&&) = default;
-
-    bool operator==(const BaseIterator& other) const {
-      return hash_set_ == other.hash_set_ && this->index_ == other.index_;
-    }
-
-    bool operator!=(const BaseIterator& other) const {
-      return !(*this == other);
-    }
-
-    BaseIterator operator++() {  // Value after modification.
-      this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
-      return *this;
-    }
-
-    BaseIterator operator++(int) {
-      BaseIterator temp = *this;
-      this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
-      return temp;
-    }
-
-    Elem& operator*() const {
-      DCHECK(!hash_set_->IsFreeSlot(this->index_));
-      return hash_set_->ElementForIndex(this->index_);
-    }
-
-    Elem* operator->() const {
-      return &**this;
-    }
-
-    // TODO: Operator -- --(int)  (and use std::bidirectional_iterator_tag)
-
-   private:
-    size_t index_;
-    HashSetType* hash_set_;
-
-    size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
-      const size_t num_buckets = hash_set->NumBuckets();
-      DCHECK_LT(index, num_buckets);
-      do {
-        ++index;
-      } while (index < num_buckets && hash_set->IsFreeSlot(index));
-      return index;
-    }
-
-    friend class HashSet;
-  };
-
  public:
   using value_type = T;
   using allocator_type = Alloc;
@@ -126,8 +169,8 @@
   using const_reference = const T&;
   using pointer = T*;
   using const_pointer = const T*;
-  using iterator = BaseIterator<T, HashSet>;
-  using const_iterator = BaseIterator<const T, const HashSet>;
+  using iterator = HashSetIterator<T, HashSet>;
+  using const_iterator = HashSetIterator<const T, const HashSet>;
   using size_type = size_t;
   using difference_type = ptrdiff_t;
 
@@ -136,7 +179,7 @@
   static constexpr size_t kMinBuckets = 1000;
 
   // If we don't own the data, this will create a new array which owns the data.
-  void Clear() {
+  void clear() {
     DeallocateStorage();
     num_elements_ = 0;
     elements_until_expand_ = 0;
@@ -300,13 +343,12 @@
     return const_iterator(this, NumBuckets());
   }
 
-  bool Empty() const {
-    return Size() == 0;
+  size_t size() const {
+    return num_elements_;
   }
 
-  // Return true if the hash set has ownership of the underlying data.
-  bool OwnsData() const {
-    return owns_data_;
+  bool empty() const {
+    return size() == 0;
   }
 
   // Erase algorithm:
@@ -317,7 +359,7 @@
   // and set the empty slot to be the location we just moved from.
   // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
   // element to its actual location/index.
-  iterator Erase(iterator it) {
+  iterator erase(iterator it) {
     // empty_index is the index that will become empty.
     size_t empty_index = it.index_;
     DCHECK(!IsFreeSlot(empty_index));
@@ -368,12 +410,12 @@
   // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
   // object in the heap for performance solution.
   template <typename K>
-  iterator Find(const K& key) {
+  iterator find(const K& key) {
     return FindWithHash(key, hashfn_(key));
   }
 
   template <typename K>
-  const_iterator Find(const K& key) const {
+  const_iterator find(const K& key) const {
     return FindWithHash(key, hashfn_(key));
   }
 
@@ -387,14 +429,26 @@
     return const_iterator(this, FindIndex(key, hash));
   }
 
+  // Insert an element with hint, allows duplicates.
+  // Note: The hint is not very useful for a HashSet<> unless there are many hash conflicts
+  // and in that case the use of HashSet<> itself should be reconsidered.
+  iterator insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
+    return insert(element);
+  }
+  iterator insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
+    return insert(std::move(element));
+  }
+
   // Insert an element, allows duplicates.
-  template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
-  void Insert(U&& element) {
-    InsertWithHash(std::forward<U>(element), hashfn_(element));
+  iterator insert(const T& element) {
+    return InsertWithHash(element, hashfn_(element));
+  }
+  iterator insert(T&& element) {
+    return InsertWithHash(std::move(element), hashfn_(element));
   }
 
   template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
-  void InsertWithHash(U&& element, size_t hash) {
+  iterator InsertWithHash(U&& element, size_t hash) {
     DCHECK_EQ(hash, hashfn_(element));
     if (num_elements_ >= elements_until_expand_) {
       Expand();
@@ -403,10 +457,7 @@
     const size_t index = FirstAvailableSlot(IndexForHash(hash));
     data_[index] = std::forward<U>(element);
     ++num_elements_;
-  }
-
-  size_t Size() const {
-    return num_elements_;
+    return iterator(this, index);
   }
 
   void swap(HashSet& other) {
@@ -430,12 +481,12 @@
   }
 
   void ShrinkToMaximumLoad() {
-    Resize(Size() / max_load_factor_);
+    Resize(size() / max_load_factor_);
   }
 
   // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
   // set. No-op if the hash set is already large enough to do this.
-  void Reserve(size_t num_elements) {
+  void reserve(size_t num_elements) {
     size_t num_buckets = num_elements / max_load_factor_;
     // Deal with rounding errors. Add one for rounding.
     while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
@@ -466,7 +517,7 @@
 
   // Calculate the current load factor and return it.
   double CalculateLoadFactor() const {
-    return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+    return static_cast<double>(size()) / static_cast<double>(NumBuckets());
   }
 
   // Make sure that everything reinserts in the right spot. Returns the number of errors.
@@ -510,7 +561,7 @@
     // maximum load factor.
     const double load_factor = CalculateLoadFactor();
     if (load_factor > max_load_factor_) {
-      Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+      Resize(size() / ((min_load_factor_ + max_load_factor_) * 0.5));
     }
   }
 
@@ -605,7 +656,7 @@
 
   // Expand the set based on the load factors.
   void Expand() {
-    size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+    size_t min_index = static_cast<size_t>(size() / min_load_factor_);
     // Resize based on the minimum load factor.
     Resize(min_index);
   }
@@ -615,7 +666,7 @@
     if (new_size < kMinBuckets) {
       new_size = kMinBuckets;
     }
-    DCHECK_GE(new_size, Size());
+    DCHECK_GE(new_size, size());
     T* const old_data = data_;
     size_t old_num_buckets = num_buckets_;
     // Reinsert all of the old elements.
@@ -649,6 +700,15 @@
     return index;
   }
 
+  size_t NextNonEmptySlot(size_t index) const {
+    const size_t num_buckets = NumBuckets();
+    DCHECK_LT(index, num_buckets);
+    do {
+      ++index;
+    } while (index < num_buckets && IsFreeSlot(index));
+    return index;
+  }
+
   // Return new offset.
   template <typename Elem>
   static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
@@ -679,6 +739,9 @@
   double min_load_factor_;
   double max_load_factor_;
 
+  template <class Elem, class HashSetType>
+  friend class HashSetIterator;
+
   ART_FRIEND_TEST(InternTableTest, CrossHash);
 };
 
diff --git a/libartbase/base/hash_set_test.cc b/libartbase/base/hash_set_test.cc
index ff745b4..782a68b 100644
--- a/libartbase/base/hash_set_test.cc
+++ b/libartbase/base/hash_set_test.cc
@@ -24,6 +24,8 @@
 #include <vector>
 
 #include <gtest/gtest.h>
+
+#include "base/stringpiece.h"
 #include "hash_map.h"
 
 namespace art {
@@ -66,16 +68,16 @@
 TEST_F(HashSetTest, TestSmoke) {
   HashSet<std::string, IsEmptyFnString> hash_set;
   const std::string test_string = "hello world 1234";
-  ASSERT_TRUE(hash_set.Empty());
-  ASSERT_EQ(hash_set.Size(), 0U);
-  hash_set.Insert(test_string);
-  auto it = hash_set.Find(test_string);
+  ASSERT_TRUE(hash_set.empty());
+  ASSERT_EQ(hash_set.size(), 0U);
+  hash_set.insert(test_string);
+  auto it = hash_set.find(test_string);
   ASSERT_EQ(*it, test_string);
-  auto after_it = hash_set.Erase(it);
+  auto after_it = hash_set.erase(it);
   ASSERT_TRUE(after_it == hash_set.end());
-  ASSERT_TRUE(hash_set.Empty());
-  ASSERT_EQ(hash_set.Size(), 0U);
-  it = hash_set.Find(test_string);
+  ASSERT_TRUE(hash_set.empty());
+  ASSERT_EQ(hash_set.size(), 0U);
+  it = hash_set.find(test_string);
   ASSERT_TRUE(it == hash_set.end());
 }
 
@@ -86,26 +88,26 @@
   for (size_t i = 0; i < count; ++i) {
     // Insert a bunch of elements and make sure we can find them.
     strings.push_back(RandomString(10));
-    hash_set.Insert(strings[i]);
-    auto it = hash_set.Find(strings[i]);
+    hash_set.insert(strings[i]);
+    auto it = hash_set.find(strings[i]);
     ASSERT_TRUE(it != hash_set.end());
     ASSERT_EQ(*it, strings[i]);
   }
-  ASSERT_EQ(strings.size(), hash_set.Size());
+  ASSERT_EQ(strings.size(), hash_set.size());
   // Try to erase the odd strings.
   for (size_t i = 1; i < count; i += 2) {
-    auto it = hash_set.Find(strings[i]);
+    auto it = hash_set.find(strings[i]);
     ASSERT_TRUE(it != hash_set.end());
     ASSERT_EQ(*it, strings[i]);
-    hash_set.Erase(it);
+    hash_set.erase(it);
   }
   // Test removed.
   for (size_t i = 1; i < count; i += 2) {
-    auto it = hash_set.Find(strings[i]);
+    auto it = hash_set.find(strings[i]);
     ASSERT_TRUE(it == hash_set.end());
   }
   for (size_t i = 0; i < count; i += 2) {
-    auto it = hash_set.Find(strings[i]);
+    auto it = hash_set.find(strings[i]);
     ASSERT_TRUE(it != hash_set.end());
     ASSERT_EQ(*it, strings[i]);
   }
@@ -119,7 +121,7 @@
   for (size_t i = 0; i < count; ++i) {
     // Insert a bunch of elements and make sure we can find them.
     strings.push_back(RandomString(10));
-    hash_set.Insert(strings[i]);
+    hash_set.insert(strings[i]);
   }
   // Make sure we visit each string exactly once.
   std::map<std::string, size_t> found_count;
@@ -133,7 +135,7 @@
   // Remove all the elements with iterator erase.
   for (auto it = hash_set.begin(); it != hash_set.end();) {
     ++found_count[*it];
-    it = hash_set.Erase(it);
+    it = hash_set.erase(it);
     ASSERT_EQ(hash_set.Verify(), 0U);
   }
   for (size_t i = 0; i < count; ++i) {
@@ -147,14 +149,14 @@
   static constexpr size_t count = 1000;
   for (size_t i = 0; i < count; ++i) {
     strings.push_back(RandomString(10));
-    hash_seta.Insert(strings[i]);
+    hash_seta.insert(strings[i]);
   }
   std::swap(hash_seta, hash_setb);
-  hash_seta.Insert("TEST");
-  hash_setb.Insert("TEST2");
+  hash_seta.insert("TEST");
+  hash_setb.insert("TEST2");
   for (size_t i = 0; i < count; ++i) {
     strings.push_back(RandomString(10));
-    hash_seta.Insert(strings[i]);
+    hash_seta.insert(strings[i]);
   }
 }
 
@@ -163,7 +165,7 @@
   std::vector<std::string> strings = {"a", "b", "c", "d", "e", "f", "g"};
   for (size_t i = 0; i < strings.size(); ++i) {
     // Insert some strings into the beginning of our hash set to establish an initial size
-    hash_set.Insert(strings[i]);
+    hash_set.insert(strings[i]);
   }
 
   hash_set.ShrinkToMaximumLoad();
@@ -174,12 +176,12 @@
   static constexpr size_t count = 1000;
   for (size_t i = 0; i < count; ++i) {
     random_strings.push_back(RandomString(10));
-    hash_set.Insert(random_strings[i]);
+    hash_set.insert(random_strings[i]);
   }
 
   // Erase all the extra strings which guarantees that our load factor will be really bad.
   for (size_t i = 0; i < count; ++i) {
-    hash_set.Erase(hash_set.Find(random_strings[i]));
+    hash_set.erase(hash_set.find(random_strings[i]));
   }
 
   const double bad_load = hash_set.CalculateLoadFactor();
@@ -191,7 +193,7 @@
 
   // Make sure all the initial elements we had are still there
   for (const std::string& initial_string : strings) {
-    EXPECT_NE(hash_set.end(), hash_set.Find(initial_string))
+    EXPECT_NE(hash_set.end(), hash_set.find(initial_string))
         << "expected to find " << initial_string;
   }
 }
@@ -201,7 +203,7 @@
   static constexpr size_t kStringCount = 1000;
   static constexpr double kEpsilon = 0.01;
   for (size_t i = 0; i < kStringCount; ++i) {
-    hash_set.Insert(RandomString(i % 10 + 1));
+    hash_set.insert(RandomString(i % 10 + 1));
   }
   // Check that changing the load factor resizes the table to be within the target range.
   EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
@@ -228,29 +230,29 @@
   SetSeed(seed);
   LOG(INFO) << "Starting stress test with seed " << seed;
   for (size_t i = 0; i < operations; ++i) {
-    ASSERT_EQ(hash_set.Size(), std_set.size());
+    ASSERT_EQ(hash_set.size(), std_set.size());
     size_t delta = std::abs(static_cast<ssize_t>(target_size) -
-                            static_cast<ssize_t>(hash_set.Size()));
+                            static_cast<ssize_t>(hash_set.size()));
     size_t n = PRand();
     if (n % target_size == 0) {
-      hash_set.Clear();
+      hash_set.clear();
       std_set.clear();
-      ASSERT_TRUE(hash_set.Empty());
+      ASSERT_TRUE(hash_set.empty());
       ASSERT_TRUE(std_set.empty());
     } else  if (n % target_size < delta) {
       // Skew towards adding elements until we are at the desired size.
       const std::string& s = strings[PRand() % string_count];
-      hash_set.Insert(s);
+      hash_set.insert(s);
       std_set.insert(s);
-      ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+      ASSERT_EQ(*hash_set.find(s), *std_set.find(s));
     } else {
       const std::string& s = strings[PRand() % string_count];
-      auto it1 = hash_set.Find(s);
+      auto it1 = hash_set.find(s);
       auto it2 = std_set.find(s);
       ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
       if (it1 != hash_set.end()) {
         ASSERT_EQ(*it1, *it2);
-        hash_set.Erase(it1);
+        hash_set.erase(it1);
         std_set.erase(it2);
       }
     }
@@ -268,13 +270,13 @@
 
 TEST_F(HashSetTest, TestHashMap) {
   HashMap<std::string, int, IsEmptyStringPair> hash_map;
-  hash_map.Insert(std::make_pair(std::string("abcd"), 123));
-  hash_map.Insert(std::make_pair(std::string("abcd"), 124));
-  hash_map.Insert(std::make_pair(std::string("bags"), 444));
-  auto it = hash_map.Find(std::string("abcd"));
+  hash_map.insert(std::make_pair(std::string("abcd"), 123));
+  hash_map.insert(std::make_pair(std::string("abcd"), 124));
+  hash_map.insert(std::make_pair(std::string("bags"), 444));
+  auto it = hash_map.find(std::string("abcd"));
   ASSERT_EQ(it->second, 123);
-  hash_map.Erase(it);
-  it = hash_map.Find(std::string("abcd"));
+  hash_map.erase(it);
+  it = hash_map.find(std::string("abcd"));
   ASSERT_EQ(it->second, 124);
 }
 
@@ -325,33 +327,50 @@
 
 TEST_F(HashSetTest, TestLookupByAlternateKeyType) {
   HashSet<std::vector<int>, IsEmptyFnVectorInt, VectorIntHashEquals, VectorIntHashEquals> hash_set;
-  hash_set.Insert(std::vector<int>({1, 2, 3, 4}));
-  hash_set.Insert(std::vector<int>({4, 2}));
-  ASSERT_EQ(hash_set.end(), hash_set.Find(std::vector<int>({1, 1, 1, 1})));
-  ASSERT_NE(hash_set.end(), hash_set.Find(std::vector<int>({1, 2, 3, 4})));
-  ASSERT_EQ(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 1, 1, 1})));
-  ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
+  hash_set.insert(std::vector<int>({1, 2, 3, 4}));
+  hash_set.insert(std::vector<int>({4, 2}));
+  ASSERT_EQ(hash_set.end(), hash_set.find(std::vector<int>({1, 1, 1, 1})));
+  ASSERT_NE(hash_set.end(), hash_set.find(std::vector<int>({1, 2, 3, 4})));
+  ASSERT_EQ(hash_set.end(), hash_set.find(std::forward_list<int>({1, 1, 1, 1})));
+  ASSERT_NE(hash_set.end(), hash_set.find(std::forward_list<int>({1, 2, 3, 4})));
 }
 
 TEST_F(HashSetTest, TestReserve) {
   HashSet<std::string, IsEmptyFnString> hash_set;
   std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
   for (size_t size : sizes) {
-    hash_set.Reserve(size);
+    hash_set.reserve(size);
     const size_t buckets_before = hash_set.NumBuckets();
     // Check that we expanded enough.
     CHECK_GE(hash_set.ElementsUntilExpand(), size);
     // Try inserting elements until we are at our reserve size and ensure the hash set did not
     // expand.
-    while (hash_set.Size() < size) {
-      hash_set.Insert(std::to_string(hash_set.Size()));
+    while (hash_set.size() < size) {
+      hash_set.insert(std::to_string(hash_set.size()));
     }
     CHECK_EQ(hash_set.NumBuckets(), buckets_before);
   }
   // Check the behaviour for shrinking, it does not necessarily resize down.
   constexpr size_t size = 100;
-  hash_set.Reserve(size);
+  hash_set.reserve(size);
   CHECK_GE(hash_set.ElementsUntilExpand(), size);
 }
 
+TEST_F(HashSetTest, IteratorConversion) {
+  const char* test_string = "dummy";
+  HashSet<std::string> hash_set;
+  HashSet<std::string>::iterator it = hash_set.insert(test_string);
+  HashSet<std::string>::const_iterator cit = it;
+  ASSERT_TRUE(it == cit);
+  ASSERT_EQ(*it, *cit);
+}
+
+TEST_F(HashSetTest, StringSearchyStringPiece) {
+  const char* test_string = "dummy";
+  HashSet<std::string> hash_set;
+  HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string);
+  HashSet<std::string>::iterator it = hash_set.find(StringPiece(test_string));
+  ASSERT_TRUE(it == insert_pos);
+}
+
 }  // namespace art
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index d53480d..5cea869 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -19,7 +19,7 @@
 #include <inttypes.h>
 #include <stdlib.h>
 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
-#ifndef ANDROID_OS
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
 #include <sys/resource.h>
 #endif
 
@@ -29,7 +29,12 @@
 
 #include "android-base/stringprintf.h"
 #include "android-base/unique_fd.h"
+
+#if !defined(__Fuchsia__)
 #include "cutils/ashmem.h"
+#else
+#include "fuchsia_compat.h"
+#endif
 
 #include "allocator.h"
 #include "bit_utils.h"
@@ -161,7 +166,7 @@
 // non-null, we check that pointer is the actual_ptr == expected_ptr,
 // and if not, report in error_msg what the conflict mapping was if
 // found, or a generic error in other cases.
-static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
+bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
                             std::string* error_msg) {
   // Handled first by caller for more specific error messages.
   CHECK(actual_ptr != MAP_FAILED);
@@ -178,7 +183,7 @@
   }
 
   // We asked for an address but didn't get what we wanted, all paths below here should fail.
-  int result = munmap(actual_ptr, byte_count);
+  int result = TargetMUnmap(actual_ptr, byte_count);
   if (result == -1) {
     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
   }
@@ -207,18 +212,18 @@
 }
 
 #if USE_ART_LOW_4G_ALLOCATOR
-static inline void* TryMemMapLow4GB(void* ptr,
+void* MemMap::TryMemMapLow4GB(void* ptr,
                                     size_t page_aligned_byte_count,
                                     int prot,
                                     int flags,
                                     int fd,
                                     off_t offset) {
-  void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
+  void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
   if (actual != MAP_FAILED) {
     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
     // 4GB. If this is the case, unmap and retry.
     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
-      munmap(actual, page_aligned_byte_count);
+      TargetMUnmap(actual, page_aligned_byte_count);
       actual = MAP_FAILED;
     }
   }
@@ -237,7 +242,7 @@
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
-  use_ashmem = use_ashmem && !kIsTargetLinux;
+  use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   if (byte_count == 0) {
     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
   }
@@ -521,7 +526,7 @@
   if (!reuse_) {
     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
     if (!already_unmapped_) {
-      int result = munmap(base_begin_, base_size_);
+      int result = TargetMUnmap(base_begin_, base_size_);
       if (result == -1) {
         PLOG(FATAL) << "munmap failed";
       }
@@ -565,7 +570,7 @@
 
 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
                            std::string* error_msg, bool use_ashmem) {
-  use_ashmem = use_ashmem && !kIsTargetLinux;
+  use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -607,7 +612,7 @@
 
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Unmap/map the tail region.
-  int result = munmap(tail_base_begin, tail_base_size);
+  int result = TargetMUnmap(tail_base_begin, tail_base_size);
   if (result == -1) {
     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
@@ -618,12 +623,12 @@
   // calls. Otherwise, libc (or something else) might take this memory
   // region. Note this isn't perfect as there's no way to prevent
   // other threads to try to take this memory region here.
-  uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
-                                                    tail_base_size,
-                                                    tail_prot,
-                                                    flags,
-                                                    fd.get(),
-                                                    0));
+  uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
+                                                          tail_base_size,
+                                                          tail_prot,
+                                                          flags,
+                                                          fd.get(),
+                                                          0));
   if (actual == MAP_FAILED) {
     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
@@ -790,6 +795,8 @@
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   DCHECK(gMaps == nullptr);
   gMaps = new Maps;
+
+  TargetMMapInit();
 }
 
 void MemMap::Shutdown() {
@@ -821,8 +828,10 @@
       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
                               new_base_size),
       base_size_ - new_base_size);
-  CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
-                  base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+  CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
+                        reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+                        base_size_ - new_base_size), 0)
+                        << new_base_size << " " << base_size_;
   base_size_ = new_base_size;
   size_ = new_size;
 }
@@ -968,7 +977,7 @@
     if (orig_prot != prot_non_exec) {
       if (mprotect(actual, length, orig_prot) != 0) {
         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
-        munmap(actual, length);
+        TargetMUnmap(actual, length);
         errno = ENOMEM;
         return MAP_FAILED;
       }
@@ -976,14 +985,14 @@
     return actual;
   }
 
-  actual = mmap(addr, length, prot, flags, fd, offset);
+  actual = TargetMMap(addr, length, prot, flags, fd, offset);
 #else
 #if defined(__LP64__)
   if (low_4gb && addr == nullptr) {
     flags |= MAP_32BIT;
   }
 #endif
-  actual = mmap(addr, length, prot, flags, fd, offset);
+  actual = TargetMMap(addr, length, prot, flags, fd, offset);
 #endif
   return actual;
 }
@@ -1059,13 +1068,13 @@
   // Unmap the unaligned parts.
   if (base_begin < aligned_base_begin) {
     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
-    CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
+    CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
         << "base_begin=" << reinterpret_cast<void*>(base_begin)
         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
   }
   if (aligned_base_end < base_end) {
     MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
-    CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
+    CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
         << "base_end=" << reinterpret_cast<void*>(base_end)
         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
   }
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 3a324b2..1979357 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,10 +29,11 @@
 
 namespace art {
 
-#if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && \
+    (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
 #define USE_ART_LOW_4G_ALLOCATOR 1
 #else
-#if defined(__LP64__) && !defined(__x86_64__)
+#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
 #error "Unrecognized 64-bit architecture."
 #endif
 #define USE_ART_LOW_4G_ALLOCATOR 0
@@ -264,6 +265,12 @@
                                              off_t offset)
       REQUIRES(!MemMap::mem_maps_lock_);
 
+  // member function to access real_munmap
+  static bool CheckMapRequest(uint8_t* expected_ptr,
+                              void* actual_ptr,
+                              size_t byte_count,
+                              std::string* error_msg);
+
   const std::string name_;
   uint8_t* begin_;  // Start of data. May be changed by AlignBy.
   size_t size_;  // Length of data.
@@ -284,8 +291,19 @@
 
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
+
+  static void* TryMemMapLow4GB(void* ptr,
+                               size_t page_aligned_byte_count,
+                               int prot,
+                               int flags,
+                               int fd,
+                               off_t offset);
 #endif
 
+  static void TargetMMapInit();
+  static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
+  static int TargetMUnmap(void* start, size_t len);
+
   static std::mutex* mem_maps_lock_;
 
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
new file mode 100644
index 0000000..db31efb
--- /dev/null
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+#include <sys/mman.h>
+#include "logging.h"
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace art {
+
+static zx_handle_t fuchsia_lowmem_vmar = ZX_HANDLE_INVALID;
+static zx_vaddr_t fuchsia_lowmem_base = 0;
+static size_t fuchsia_lowmem_size = 0;
+
+static const char map_name[] = "mmap-android";
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_START = 0x80000000;
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_SIZE  = 0x60000000;
+
+void MemMap::TargetMMapInit() {
+  if (fuchsia_lowmem_vmar != ZX_HANDLE_INVALID) {
+    return;
+  }
+
+  zx_info_vmar_t vmarinfo;
+  CHECK_EQ(zx_object_get_info(zx_vmar_root_self(),
+                              ZX_INFO_VMAR,
+                              &vmarinfo,
+                              sizeof(vmarinfo),
+                              NULL,
+                              NULL), ZX_OK) << "could not find info from root vmar";
+
+  uintptr_t lower_mem_start = FUCHSIA_LOWER_MEM_START - vmarinfo.base;
+  fuchsia_lowmem_size = FUCHSIA_LOWER_MEM_SIZE;
+  uint32_t allocflags = ZX_VM_FLAG_CAN_MAP_READ |
+                        ZX_VM_FLAG_CAN_MAP_WRITE |
+                        ZX_VM_FLAG_CAN_MAP_EXECUTE |
+                        ZX_VM_FLAG_SPECIFIC;
+  CHECK_EQ(zx_vmar_allocate(zx_vmar_root_self(),
+                            lower_mem_start,
+                            fuchsia_lowmem_size,
+                            allocflags,
+                            &fuchsia_lowmem_vmar,
+                            &fuchsia_lowmem_base), ZX_OK) << "could not allocate lowmem vmar";
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+  zx_status_t status;
+  uintptr_t mem = 0;
+
+  bool mmap_lower = (flags & MAP_32BIT) != 0;
+
+  // for file-based mapping use system library
+  if ((flags & MAP_ANONYMOUS) == 0) {
+    if (start != nullptr) {
+      flags |= MAP_FIXED;
+    }
+    CHECK(!mmap_lower) << "cannot map files into low memory for Fuchsia";
+    return mmap(start, len, prot, flags, fd, fd_off);
+  }
+
+  uint32_t vmarflags = 0;
+  if ((prot & PROT_READ) != 0) {
+    vmarflags |= ZX_VM_FLAG_PERM_READ;
+  }
+  if ((prot & PROT_WRITE) != 0) {
+    vmarflags |= ZX_VM_FLAG_PERM_WRITE;
+  }
+  if ((prot & PROT_EXEC) != 0) {
+    vmarflags |= ZX_VM_FLAG_PERM_EXECUTE;
+  }
+
+  if (len == 0) {
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+
+  zx_info_vmar_t vmarinfo;
+  size_t vmaroffset = 0;
+  if (start != nullptr) {
+    vmarflags |= ZX_VM_FLAG_SPECIFIC;
+    status = zx_object_get_info((mmap_lower ? fuchsia_lowmem_vmar : zx_vmar_root_self()),
+                                ZX_INFO_VMAR,
+                                &vmarinfo,
+                                sizeof(vmarinfo),
+                                NULL,
+                                NULL);
+    if (status < 0 || reinterpret_cast<uintptr_t>(start) < vmarinfo.base) {
+      errno = EINVAL;
+      return MAP_FAILED;
+    }
+    vmaroffset = reinterpret_cast<uintptr_t>(start) - vmarinfo.base;
+  }
+
+  zx_handle_t vmo;
+  if (zx_vmo_create(len, 0, &vmo) < 0) {
+    errno = ENOMEM;
+    return MAP_FAILED;
+  }
+  zx_vmo_get_size(vmo, &len);
+  zx_object_set_property(vmo, ZX_PROP_NAME, map_name, strlen(map_name));
+
+  if (mmap_lower) {
+    status = zx_vmar_map(fuchsia_lowmem_vmar, vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+  } else {
+    status = zx_vmar_map(zx_vmar_root_self(), vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+  }
+  zx_handle_close(vmo);
+  if (status != ZX_OK) {
+    return MAP_FAILED;
+  }
+
+  return reinterpret_cast<void *>(mem);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+  uintptr_t addr = reinterpret_cast<uintptr_t>(start);
+  zx_handle_t alloc_vmar = zx_vmar_root_self();
+  if (addr >= fuchsia_lowmem_base && addr < fuchsia_lowmem_base + fuchsia_lowmem_size) {
+    alloc_vmar = fuchsia_lowmem_vmar;
+  }
+  zx_status_t status = zx_vmar_unmap(alloc_vmar, addr, len);
+  if (status < 0) {
+    errno = EINVAL;
+    return -1;
+  }
+  return 0;
+}
+
+}  // namespace art
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
new file mode 100644
index 0000000..601b049
--- /dev/null
+++ b/libartbase/base/mem_map_unix.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+
+#include <sys/mman.h>
+
+namespace art {
+
+void MemMap::TargetMMapInit() {
+  // no-op for unix
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+  return mmap(start, len, prot, flags, fd, fd_off);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+  return munmap(start, len);
+}
+
+}  // namespace art
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index 6c78bad..80144d2 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -66,15 +66,15 @@
 
 template <typename T,
           typename EmptyFn = DefaultEmptyFn<T>,
-          typename HashFn = std::hash<T>,
-          typename Pred = std::equal_to<T>>
+          typename HashFn = DefaultHashFn<T>,
+          typename Pred = DefaultPred<T>>
 using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
 
 template <typename Key,
           typename Value,
           typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
-          typename HashFn = std::hash<Key>,
-          typename Pred = std::equal_to<Key>>
+          typename HashFn = DefaultHashFn<Key>,
+          typename Pred = DefaultPred<Key>>
 using ScopedArenaHashMap = HashMap<Key,
                                    Value,
                                    EmptyFn,
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 73c1c22..6e3b78e 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -244,20 +244,6 @@
   }
 }
 
-// Hash bytes using a relatively fast hash.
-static inline size_t HashBytes(const uint8_t* data, size_t len) {
-  size_t hash = 0x811c9dc5;
-  for (uint32_t i = 0; i < len; ++i) {
-    hash = (hash * 16777619) ^ data[i];
-  }
-  hash += hash << 13;
-  hash ^= hash >> 7;
-  hash += hash << 3;
-  hash ^= hash >> 17;
-  hash += hash << 5;
-  return hash;
-}
-
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/variant_map_test.cc b/libartbase/base/variant_map_test.cc
index 4677b6d..f2da338 100644
--- a/libartbase/base/variant_map_test.cc
+++ b/libartbase/base/variant_map_test.cc
@@ -108,7 +108,7 @@
   EXPECT_EQ(size_t(2), fmFilled.Size());
 
   // Test copy constructor
-  FruitMap fmEmptyCopy(fmEmpty);
+  FruitMap fmEmptyCopy(fmEmpty);  // NOLINT
   EXPECT_EQ(size_t(0), fmEmptyCopy.Size());
 
   // Test copy constructor
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 78db8b9..d435945 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -1746,8 +1746,8 @@
         ErrorStringPrintf("Item %d offset is 0", i);
         return false;
       }
-      DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end());
-      offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
+      DCHECK(offset_to_type_map_.find(aligned_offset) == offset_to_type_map_.end());
+      offset_to_type_map_.insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
     }
 
     aligned_offset = ptr_ - begin_;
@@ -1951,7 +1951,7 @@
 
 bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
   DCHECK_NE(offset, 0u);
-  auto it = offset_to_type_map_.Find(offset);
+  auto it = offset_to_type_map_.find(offset);
   if (UNLIKELY(it == offset_to_type_map_.end())) {
     ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
     return false;
diff --git a/libdexfile/dex/dex_instruction_test.cc b/libdexfile/dex/dex_instruction_test.cc
index c944085..6ce9dba 100644
--- a/libdexfile/dex/dex_instruction_test.cc
+++ b/libdexfile/dex/dex_instruction_test.cc
@@ -135,7 +135,7 @@
 static void Build35c(uint16_t* out,
                      Instruction::Code code,
                      uint16_t method_idx,
-                     std::vector<uint16_t> args) {
+                     const std::vector<uint16_t>& args) {
   out[0] = 0;
   out[0] |= (args.size() << 12);
   out[0] |= static_cast<uint16_t>(code);
@@ -152,7 +152,7 @@
 
 static std::string DumpInst35c(Instruction::Code code,
                                uint16_t method_idx,
-                               std::vector<uint16_t> args) {
+                               const std::vector<uint16_t>& args) {
   uint16_t inst[6] = {};
   Build35c(inst, code, method_idx, args);
   return Instruction::At(inst)->DumpString(nullptr);
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 748e24e..6f49adf 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -57,7 +57,7 @@
 
 // The name of the profile entry in the dex metadata file.
 // DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
-const char* ProfileCompilationInfo::kDexMetadataProfileEntry = "primary.prof";
+const char ProfileCompilationInfo::kDexMetadataProfileEntry[] = "primary.prof";
 
 static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
 
@@ -1181,8 +1181,8 @@
       // Allow archives without the profile entry. In this case, create an empty profile.
       // This gives more flexible when ure-using archives that may miss the entry.
       // (e.g. dex metadata files)
-      LOG(WARNING) << std::string("Could not find entry ") + kDexMetadataProfileEntry +
-            " in the zip archive. Creating an empty profile.";
+      LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
+          << " in the zip archive. Creating an empty profile.";
       source->reset(ProfileSource::Create(nullptr));
       return kProfileLoadSuccess;
     }
@@ -1383,7 +1383,7 @@
   // the current profile info.
   // Note that the number of elements should be very small, so this should not
   // be a performance issue.
-  for (const ProfileLineHeader other_profile_line_header : profile_line_headers) {
+  for (const ProfileLineHeader& other_profile_line_header : profile_line_headers) {
     if (!filter_fn(other_profile_line_header.dex_location, other_profile_line_header.checksum)) {
       continue;
     }
@@ -2021,9 +2021,9 @@
   return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
 }
 
-std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
+HashSet<std::string> ProfileCompilationInfo::GetClassDescriptors(
     const std::vector<const DexFile*>& dex_files) {
-  std::unordered_set<std::string> ret;
+  HashSet<std::string> ret;
   for (const DexFile* dex_file : dex_files) {
     const DexFileData* data = FindDexData(dex_file);
     if (data != nullptr) {
@@ -2032,7 +2032,7 @@
           // Something went bad. The profile is probably corrupted. Abort and return an emtpy set.
           LOG(WARNING) << "Corrupted profile: invalid type index "
               << type_idx.index_ << " in dex " << dex_file->GetLocation();
-          return std::unordered_set<std::string>();
+          return HashSet<std::string>();
         }
         const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
         ret.insert(dex_file->GetTypeDescriptor(type_id));
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index e28c5f1..3596f3e 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -24,6 +24,7 @@
 #include "base/arena_object.h"
 #include "base/atomic.h"
 #include "base/bit_memory_region.h"
+#include "base/hash_set.h"
 #include "base/malloc_arena_pool.h"
 #include "base/mem_map.h"
 #include "base/safe_map.h"
@@ -73,7 +74,7 @@
   static const uint8_t kProfileMagic[];
   static const uint8_t kProfileVersion[];
 
-  static const char* kDexMetadataProfileEntry;
+  static const char kDexMetadataProfileEntry[];
 
   static constexpr uint8_t kIndividualInlineCacheSize = 5;
 
@@ -426,7 +427,7 @@
   ArenaAllocator* GetAllocator() { return &allocator_; }
 
   // Return all of the class descriptors in the profile for a set of dex files.
-  std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
+  HashSet<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
 
   // Return true if the fd points to a profile file.
   bool IsProfileFile(int fd);
diff --git a/profman/profman.cc b/profman/profman.cc
index 5fbce66..9b47097 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -389,7 +389,7 @@
   }
 
   bool OpenApkFilesFromLocations(
-      std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
+      const std::function<void(std::unique_ptr<const DexFile>&&)>& process_fn) {
     bool use_apk_fd_list = !apks_fd_.empty();
     if (use_apk_fd_list) {
       // Get the APKs from the collection of FDs.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c374e03..19554cd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1242,12 +1242,12 @@
           ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
           if (space->HasAddress(klass.Ptr())) {
             DCHECK(!klass->IsErroneous()) << klass->GetStatus();
-            auto it = new_class_set->Find(ClassTable::TableSlot(klass));
+            auto it = new_class_set->find(ClassTable::TableSlot(klass));
             DCHECK(it != new_class_set->end());
             DCHECK_EQ(it->Read(), klass);
             ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
             if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
-              auto it2 = new_class_set->Find(ClassTable::TableSlot(super_class));
+              auto it2 = new_class_set->find(ClassTable::TableSlot(super_class));
               DCHECK(it2 != new_class_set->end());
               DCHECK_EQ(it2->Read(), super_class);
             }
@@ -2528,7 +2528,7 @@
       old = result_ptr;  // For the comparison below, after releasing the lock.
       if (descriptor_equals) {
         class_table->InsertWithHash(result_ptr, hash);
-        Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
+        WriteBarrier::ForEveryFieldWrite(class_loader.Get());
       }  // else throw below, after releasing the lock.
     }
   }
@@ -3143,7 +3143,7 @@
     DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
   }
   // Ensure that the card is marked so that remembered sets pick up native roots.
-  Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get());
+  WriteBarrier::ForEveryFieldWrite(klass.Get());
   self->AllowThreadSuspension();
 }
 
@@ -3330,7 +3330,7 @@
   if (class_loader != nullptr) {
     // Since we added a strong root to the class table, do the write barrier as required for
     // remembered sets and generational GCs.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+    WriteBarrier::ForEveryFieldWrite(class_loader);
   }
   dex_caches_.push_back(data);
 }
@@ -3390,7 +3390,7 @@
   if (h_class_loader.Get() != nullptr) {
     // Since we added a strong root to the class table, do the write barrier as required for
     // remembered sets and generational GCs.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+    WriteBarrier::ForEveryFieldWrite(h_class_loader.Get());
   }
 }
 
@@ -3461,7 +3461,7 @@
   if (h_class_loader.Get() != nullptr) {
     // Since we added a strong root to the class table, do the write barrier as required for
     // remembered sets and generational GCs.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+    WriteBarrier::ForEveryFieldWrite(h_class_loader.Get());
   }
   return h_dex_cache.Get();
 }
@@ -3761,7 +3761,7 @@
     class_table->InsertWithHash(klass, hash);
     if (class_loader != nullptr) {
       // This is necessary because we need to have the card dirtied for remembered sets.
-      Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+      WriteBarrier::ForEveryFieldWrite(class_loader);
     }
     if (log_new_roots_) {
       new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
@@ -3791,7 +3791,7 @@
                                 klass->NumDirectMethods(),
                                 klass->NumDeclaredVirtualMethods());
   // Need to mark the card so that the remembered sets and mod union tables get updated.
-  Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass);
+  WriteBarrier::ForEveryFieldWrite(klass);
 }
 
 ObjPtr<mirror::Class> ClassLinker::LookupClass(Thread* self,
@@ -5195,7 +5195,7 @@
 
   // Make sure the remembered set and mod-union tables know that we updated some of the native
   // roots.
-  Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class);
+  WriteBarrier::ForEveryFieldWrite(new_class);
 }
 
 void ClassLinker::RegisterClassLoader(ObjPtr<mirror::ClassLoader> class_loader) {
@@ -5353,7 +5353,7 @@
       if (class_loader != nullptr) {
         // We updated the class in the class table, perform the write barrier so that the GC knows
         // about the change.
-        Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+        WriteBarrier::ForEveryFieldWrite(class_loader);
       }
       CHECK_EQ(existing, klass.Get());
       if (log_new_roots_) {
@@ -8760,7 +8760,7 @@
   if (table->InsertStrongRoot(dex_file) && class_loader != nullptr) {
     // It was not already inserted, perform the write barrier to let the GC know the class loader's
     // class table was modified.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+    WriteBarrier::ForEveryFieldWrite(class_loader);
   }
 }
 
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index e313ec5..a233357 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -37,7 +37,7 @@
   ReaderMutexLock mu(Thread::Current(), lock_);
   TableSlot slot(klass);
   for (ClassSet& class_set : classes_) {
-    auto it = class_set.Find(slot);
+    auto it = class_set.find(slot);
     if (it != class_set.end()) {
       return it->Read() == klass;
     }
@@ -49,7 +49,7 @@
   ReaderMutexLock mu(Thread::Current(), lock_);
   TableSlot slot(klass);
   for (ClassSet& class_set : classes_) {
-    auto it = class_set.Find(slot);
+    auto it = class_set.find(slot);
     if (it != class_set.end()) {
       return it->Read();
     }
@@ -119,14 +119,14 @@
   ReaderMutexLock mu(Thread::Current(), lock_);
   size_t sum = 0;
   for (size_t i = 0; i < classes_.size() - 1; ++i) {
-    sum += classes_[i].Size();
+    sum += classes_[i].size();
   }
   return sum;
 }
 
 size_t ClassTable::NumReferencedNonZygoteClasses() const {
   ReaderMutexLock mu(Thread::Current(), lock_);
-  return classes_.back().Size();
+  return classes_.back().size();
 }
 
 mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
@@ -145,12 +145,12 @@
   TableSlot slot(klass);
   WriterMutexLock mu(Thread::Current(), lock_);
   for (ClassSet& class_set : classes_) {
-    auto it = class_set.Find(slot);
+    auto it = class_set.find(slot);
     if (it != class_set.end()) {
       return it->Read();
     }
   }
-  classes_.back().Insert(slot);
+  classes_.back().insert(slot);
   return klass;
 }
 
@@ -163,12 +163,12 @@
 void ClassTable::CopyWithoutLocks(const ClassTable& source_table) {
   if (kIsDebugBuild) {
     for (ClassSet& class_set : classes_) {
-      CHECK(class_set.Empty());
+      CHECK(class_set.empty());
     }
   }
   for (const ClassSet& class_set : source_table.classes_) {
     for (const TableSlot& slot : class_set) {
-      classes_.back().Insert(slot);
+      classes_.back().insert(slot);
     }
   }
 }
@@ -187,9 +187,9 @@
   DescriptorHashPair pair(descriptor, ComputeModifiedUtf8Hash(descriptor));
   WriterMutexLock mu(Thread::Current(), lock_);
   for (ClassSet& class_set : classes_) {
-    auto it = class_set.Find(pair);
+    auto it = class_set.find(pair);
     if (it != class_set.end()) {
-      class_set.Erase(it);
+      class_set.erase(it);
       return true;
     }
   }
@@ -268,7 +268,7 @@
   // default in case classes were pruned.
   for (const ClassSet& class_set : classes_) {
     for (const TableSlot& root : class_set) {
-      combined.Insert(root);
+      combined.insert(root);
     }
   }
   const size_t ret = combined.WriteToMemory(ptr);
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 6275612..85d633f 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -64,7 +64,7 @@
           << oat_file->GetLocation();
     }
     if (class_loader != nullptr) {
-      runtime->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+      WriteBarrier::ForEveryFieldWrite(class_loader);
     } else {
       runtime->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(oat_file);
     }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 10fa8c5..cbd9880 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1822,7 +1822,7 @@
 
 std::string ConcurrentCopying::DumpReferenceInfo(mirror::Object* ref,
                                                  const char* ref_name,
-                                                 std::string indent) {
+                                                 const char* indent) {
   std::ostringstream oss;
   oss << indent << heap_->GetVerification()->DumpObjectInfo(ref, ref_name) << '\n';
   if (ref != nullptr) {
@@ -1846,13 +1846,13 @@
                                                  MemberOffset offset,
                                                  mirror::Object* ref) {
   std::ostringstream oss;
-  std::string indent = "  ";
-  oss << indent << "Invalid reference: ref=" << ref
+  constexpr const char* kIndent = "  ";
+  oss << kIndent << "Invalid reference: ref=" << ref
       << " referenced from: object=" << obj << " offset= " << offset << '\n';
   // Information about `obj`.
-  oss << DumpReferenceInfo(obj, "obj", indent) << '\n';
+  oss << DumpReferenceInfo(obj, "obj", kIndent) << '\n';
   // Information about `ref`.
-  oss << DumpReferenceInfo(ref, "ref", indent);
+  oss << DumpReferenceInfo(ref, "ref", kIndent);
   return oss.str();
 }
 
@@ -1928,10 +1928,10 @@
 
 std::string ConcurrentCopying::DumpGcRoot(mirror::Object* ref) {
   std::ostringstream oss;
-  std::string indent = "  ";
-  oss << indent << "Invalid GC root: ref=" << ref << '\n';
+  constexpr const char* kIndent = "  ";
+  oss << kIndent << "Invalid GC root: ref=" << ref << '\n';
   // Information about `ref`.
-  oss << DumpReferenceInfo(ref, "ref", indent);
+  oss << DumpReferenceInfo(ref, "ref", kIndent);
   return oss.str();
 }
 
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index f1e7e2f..448525d 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -242,7 +242,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   // Dump information about reference `ref` and return it as a string.
   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
-  std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, std::string indent = "")
+  std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
       REQUIRES_SHARED(Locks::mutator_lock_);
   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
   // and return it as a string.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 681ac2e..8cd484f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -48,6 +48,7 @@
 #include "runtime.h"
 #include "thread-inl.h"
 #include "thread_list.h"
+#include "write_barrier-inl.h"
 
 using ::art::mirror::Object;
 
@@ -531,7 +532,7 @@
       // Dirty the card at the destionation as it may contain
       // references (including the class pointer) to the bump pointer
       // space.
-      GetHeap()->WriteBarrierEveryFieldOf(forward_address);
+      WriteBarrier::ForEveryFieldWrite(forward_address);
       // Handle the bitmaps marking.
       accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
       DCHECK(live_bitmap != nullptr);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 6756868..791d037 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -37,6 +37,7 @@
 #include "runtime.h"
 #include "thread-inl.h"
 #include "verify_object.h"
+#include "write_barrier-inl.h"
 
 namespace art {
 namespace gc {
@@ -151,7 +152,7 @@
       // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
       // cases because we don't directly allocate into the main alloc
       // space (besides promotions) under the SS/GSS collector.
-      WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
+      WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
     }
     pre_fence_visitor(obj, usable_size);
     QuasiAtomic::ThreadFenceForConstructor();
@@ -418,22 +419,6 @@
   }
 }
 
-inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
-                                    MemberOffset offset ATTRIBUTE_UNUSED,
-                                    ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
-  card_table_->MarkCard(dst.Ptr());
-}
-
-inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
-                                    int start_offset ATTRIBUTE_UNUSED,
-                                    size_t length ATTRIBUTE_UNUSED) {
-  card_table_->MarkCard(dst.Ptr());
-}
-
-inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
-  card_table_->MarkCard(obj.Ptr());
-}
-
 }  // namespace gc
 }  // namespace art
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 609d2ab..d014372 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -466,23 +466,6 @@
   // Record the bytes freed by thread-local buffer revoke.
   void RecordFreeRevoke();
 
-  // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
-  // The call is not needed if null is stored in the field.
-  ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst,
-                                       MemberOffset offset,
-                                       ObjPtr<mirror::Object> new_value)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Write barrier for array operations that update many field positions
-  ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
-                                       int start_offset,
-                                       // TODO: element_count or byte_count?
-                                       size_t length)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   accounting::CardTable* GetCardTable() const {
     return card_table_.get();
   }
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2db8815..c8aaa21 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -366,7 +366,7 @@
 size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
   size_t read_count = 0;
   UnorderedSet set(ptr, /*make copy*/false, &read_count);
-  if (set.Empty()) {
+  if (set.empty()) {
     // Avoid inserting empty sets.
     return read_count;
   }
@@ -392,7 +392,7 @@
     table_to_write = &combined;
     for (UnorderedSet& table : tables_) {
       for (GcRoot<mirror::String>& string : table) {
-        combined.Insert(string);
+        combined.insert(string);
       }
     }
   } else {
@@ -403,9 +403,9 @@
 
 void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
   for (UnorderedSet& table : tables_) {
-    auto it = table.Find(GcRoot<mirror::String>(s));
+    auto it = table.find(GcRoot<mirror::String>(s));
     if (it != table.end()) {
-      table.Erase(it);
+      table.erase(it);
       return;
     }
   }
@@ -415,7 +415,7 @@
 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
   for (UnorderedSet& table : tables_) {
-    auto it = table.Find(GcRoot<mirror::String>(s));
+    auto it = table.find(GcRoot<mirror::String>(s));
     if (it != table.end()) {
       return it->Read();
     }
@@ -426,7 +426,7 @@
 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
   for (UnorderedSet& table : tables_) {
-    auto it = table.Find(string);
+    auto it = table.find(string);
     if (it != table.end()) {
       return it->Read();
     }
@@ -442,7 +442,7 @@
   // Always insert the last table, the image tables are before and we avoid inserting into these
   // to prevent dirty pages.
   DCHECK(!tables_.empty());
-  tables_.back().Insert(GcRoot<mirror::String>(s));
+  tables_.back().insert(GcRoot<mirror::String>(s));
 }
 
 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
@@ -467,7 +467,7 @@
     mirror::Object* object = it->Read<kWithoutReadBarrier>();
     mirror::Object* new_object = visitor->IsMarked(object);
     if (new_object == nullptr) {
-      it = set->Erase(it);
+      it = set->erase(it);
     } else {
       *it = GcRoot<mirror::String>(new_object->AsString());
       ++it;
@@ -480,7 +480,7 @@
                          tables_.end(),
                          0U,
                          [](size_t sum, const UnorderedSet& set) {
-                           return sum + set.Size();
+                           return sum + set.size();
                          });
 }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b010650..b1cd5c0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -167,8 +167,8 @@
 
   // Generating debug information is for using the Linux perf tool on
   // host which does not work with ashmem.
-  // Also, target linux does not support ashmem.
-  bool use_ashmem = !generate_debug_info && !kIsTargetLinux;
+  // Also, targets linux and fuchsia do not support ashmem.
+  bool use_ashmem = !generate_debug_info && !kIsTargetLinux && !kIsTargetFuchsia;
 
   // With 'perf', we want a 1-1 mapping between an address and a method.
   bool garbage_collect_code = !generate_debug_info;
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index e64a325..faec6e6 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -34,6 +34,7 @@
 #include "mirror/method_type.h"
 #include "obj_ptr.h"
 #include "runtime.h"
+#include "write_barrier-inl.h"
 
 #include <atomic>
 
@@ -76,7 +77,7 @@
     runtime->RecordResolveString(this, string_idx);
   }
   // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
-  runtime->GetHeap()->WriteBarrierEveryFieldOf(this);
+  WriteBarrier::ForEveryFieldWrite(this);
 }
 
 inline void DexCache::ClearString(dex::StringIndex string_idx) {
@@ -113,7 +114,7 @@
   GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
       TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
   // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
-  Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+  WriteBarrier::ForEveryFieldWrite(this);
 }
 
 inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
@@ -145,7 +146,7 @@
   GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
       MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed);
   // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
-  Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+  WriteBarrier::ForEveryFieldWrite(this);
 }
 
 inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) {
@@ -171,7 +172,7 @@
       reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target);
   if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) {
     // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+    WriteBarrier::ForEveryFieldWrite(this);
     return call_site;
   } else {
     return target.Read();
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index a5603e0..d94ded0 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -39,6 +39,7 @@
 #include "runtime.h"
 #include "string.h"
 #include "throwable.h"
+#include "write_barrier-inl.h"
 
 namespace art {
 namespace mirror {
@@ -635,7 +636,7 @@
   SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags,
       kIsVolatile>(field_offset, new_value);
   if (new_value != nullptr) {
-    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+    WriteBarrier::ForFieldWrite<WriteBarrier::kWithoutNullCheck>(this, field_offset, new_value);
     // TODO: Check field assignment could theoretically cause thread suspension, TODO: fix this.
     CheckFieldAssignment(field_offset, new_value);
   }
@@ -670,7 +671,7 @@
   bool success = CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
       kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
   if (success) {
-    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+    WriteBarrier::ForFieldWrite(this, field_offset, new_value);
   }
   return success;
 }
@@ -701,7 +702,7 @@
   bool success = CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<
       kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
   if (success) {
-    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+    WriteBarrier::ForFieldWrite(this, field_offset, new_value);
   }
   return success;
 }
@@ -783,7 +784,7 @@
     if (kTransactionActive) {
       Runtime::Current()->RecordWriteFieldReference(this, field_offset, witness_value, true);
     }
-    Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+    WriteBarrier::ForFieldWrite(this, field_offset, new_value);
   }
   VerifyRead<kVerifyFlags>(witness_value);
   return witness_value;
@@ -807,7 +808,7 @@
   if (kTransactionActive) {
     Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
   }
-  Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+  WriteBarrier::ForFieldWrite(this, field_offset, new_value);
   VerifyRead<kVerifyFlags>(old_value);
   return old_value;
 }
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 3f507e8..025c10b 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -121,16 +121,15 @@
     CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
     src->VisitReferences(visitor, visitor);
   }
-  gc::Heap* heap = Runtime::Current()->GetHeap();
   // Perform write barriers on copied object references.
   ObjPtr<Class> c = src->GetClass();
   if (c->IsArrayClass()) {
     if (!c->GetComponentType()->IsPrimitive()) {
       ObjectArray<Object>* array = dest->AsObjectArray<Object>();
-      heap->WriteBarrierArray(dest, 0, array->GetLength());
+      WriteBarrier::ForArrayWrite(dest, 0, array->GetLength());
     }
   } else {
-    heap->WriteBarrierEveryFieldOf(dest);
+    WriteBarrier::ForEveryFieldWrite(dest);
   }
   return dest.Ptr();
 }
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index ed3c567..1d2f47f 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -32,6 +32,7 @@
 #include "object-inl.h"
 #include "runtime.h"
 #include "thread.h"
+#include "write_barrier-inl.h"
 
 namespace art {
 namespace mirror {
@@ -197,7 +198,7 @@
       }
     }
   }
-  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  WriteBarrier::ForArrayWrite(this, dst_pos, count);
   if (kIsDebugBuild) {
     for (int i = 0; i < count; ++i) {
       // The get will perform the VerifyObject.
@@ -246,7 +247,7 @@
       SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
     }
   }
-  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  WriteBarrier::ForArrayWrite(this, dst_pos, count);
   if (kIsDebugBuild) {
     for (int i = 0; i < count; ++i) {
       // The get will perform the VerifyObject.
@@ -328,7 +329,7 @@
       }
     }
   }
-  Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+  WriteBarrier::ForArrayWrite(this, dst_pos, count);
   if (UNLIKELY(i != count)) {
     std::string actualSrcType(mirror::Object::PrettyTypeOf(o));
     std::string dstType(PrettyTypeOf());
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 2b05b0e..9355ae7 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1063,7 +1063,8 @@
     dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
 #else
     UNUSED(oat_file_begin);
-    static_assert(!kIsTargetBuild || kIsTargetLinux, "host_dlopen_handles_ will leak handles");
+    static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,
+                  "host_dlopen_handles_ will leak handles");
     MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
     dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
     if (dlopen_handle_ != nullptr) {
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 5472d4c..7f7f6fc 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -26,6 +26,23 @@
 
 namespace art {
 
+void CodeInfo::Decode(const uint8_t* data) {
+  size_t non_header_size = DecodeUnsignedLeb128(&data);
+  size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
+  MemoryRegion region(const_cast<uint8_t*>(data), non_header_size);
+  BitMemoryReader reader(BitMemoryRegion(region), /* bit_offset */ 0);
+  stack_maps_.Decode(reader);
+  register_masks_.Decode(reader);
+  stack_masks_.Decode(reader);
+  invoke_infos_.Decode(reader);
+  inline_infos_.Decode(reader);
+  dex_register_masks_.Decode(reader);
+  dex_register_maps_.Decode(reader);
+  dex_register_catalog_.Decode(reader);
+  number_of_dex_registers_ = DecodeVarintBits(reader);
+  CHECK_EQ(non_header_size, BitsToBytesRoundUp(reader.GetBitOffset())) << "Invalid CodeInfo";
+}
+
 BitTable<StackMap>::const_iterator CodeInfo::BinarySearchNativePc(uint32_t packed_pc) const {
   return std::partition_point(
       stack_maps_.begin(),
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index c17efcf..83f0c05 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -445,22 +445,7 @@
                             uint32_t first_dex_register,
                             /*out*/ DexRegisterMap* map) const;
 
-  void Decode(const uint8_t* data) {
-    size_t non_header_size = DecodeUnsignedLeb128(&data);
-    BitMemoryRegion region(MemoryRegion(const_cast<uint8_t*>(data), non_header_size));
-    size_t bit_offset = 0;
-    size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
-    stack_maps_.Decode(region, &bit_offset);
-    register_masks_.Decode(region, &bit_offset);
-    stack_masks_.Decode(region, &bit_offset);
-    invoke_infos_.Decode(region, &bit_offset);
-    inline_infos_.Decode(region, &bit_offset);
-    dex_register_masks_.Decode(region, &bit_offset);
-    dex_register_maps_.Decode(region, &bit_offset);
-    dex_register_catalog_.Decode(region, &bit_offset);
-    number_of_dex_registers_ = DecodeVarintBits(region, &bit_offset);
-    CHECK_EQ(non_header_size, BitsToBytesRoundUp(bit_offset)) << "Invalid CodeInfo";
-  }
+  void Decode(const uint8_t* data);
 
   size_t size_;
   BitTable<StackMap> stack_maps_;
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 91fcc07..e40bca5 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -121,11 +121,11 @@
     return SubtypeCheckInfo::MakeUnchecked(bs, overflow, depth);
   }
 
-  static bool HasNext(SubtypeCheckInfo io) {
+  static bool HasNext(const SubtypeCheckInfo& io) {
     return io.HasNext();
   }
 
-  static BitString GetPathToRoot(SubtypeCheckInfo io) {
+  static BitString GetPathToRoot(const SubtypeCheckInfo& io) {
     return io.GetPathToRoot();
   }
 
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 979fa42..666bf81 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -654,13 +654,15 @@
     MockClass* klass,
     size_t cur_depth,
     size_t total_depth,
-    std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>> transitions) {
+    const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
   MockScopedLockSubtypeCheck lock_a;
   MockScopedLockMutator lock_b;
   using SCTree = MockSubtypeCheck;
 
   ASSERT_EQ(cur_depth, klass->Depth());
-  ApplyTransition(SCTree::Lookup(klass), transitions[cur_depth].first, transitions[cur_depth].second);
+  ApplyTransition(SCTree::Lookup(klass),
+                  transitions[cur_depth].first,
+                  transitions[cur_depth].second);
 
   if (total_depth == cur_depth + 1) {
     return;
@@ -676,7 +678,7 @@
 void EnsureStateChangedTest(
     MockClass* root,
     size_t depth,
-    std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>> transitions) {
+    const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
   ASSERT_EQ(depth, transitions.size());
 
   EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
diff --git a/runtime/write_barrier-inl.h b/runtime/write_barrier-inl.h
new file mode 100644
index 0000000..af8c1be
--- /dev/null
+++ b/runtime/write_barrier-inl.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_WRITE_BARRIER_INL_H_
+#define ART_RUNTIME_WRITE_BARRIER_INL_H_
+
+#include "write_barrier.h"
+
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+template <WriteBarrier::NullCheck kNullCheck>
+inline void WriteBarrier::ForFieldWrite(ObjPtr<mirror::Object> dst,
+                                        MemberOffset offset ATTRIBUTE_UNUSED,
+                                        ObjPtr<mirror::Object> new_value) {
+  if (kNullCheck == kWithNullCheck && new_value == nullptr) {
+    return;
+  }
+  DCHECK(new_value != nullptr);
+  GetCardTable()->MarkCard(dst.Ptr());
+}
+
+inline void WriteBarrier::ForArrayWrite(ObjPtr<mirror::Object> dst,
+                                        int start_offset ATTRIBUTE_UNUSED,
+                                        size_t length ATTRIBUTE_UNUSED) {
+  GetCardTable()->MarkCard(dst.Ptr());
+}
+
+inline void WriteBarrier::ForEveryFieldWrite(ObjPtr<mirror::Object> obj) {
+  GetCardTable()->MarkCard(obj.Ptr());
+}
+
+inline gc::accounting::CardTable* WriteBarrier::GetCardTable() {
+  return Runtime::Current()->GetHeap()->GetCardTable();
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_WRITE_BARRIER_INL_H_
diff --git a/runtime/write_barrier.h b/runtime/write_barrier.h
new file mode 100644
index 0000000..112154e
--- /dev/null
+++ b/runtime/write_barrier.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_WRITE_BARRIER_H_
+#define ART_RUNTIME_WRITE_BARRIER_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+namespace gc {
+namespace accounting {
+class CardTable;
+}  // namespace accounting
+}  // namespace gc
+
+class WriteBarrier {
+ public:
+  enum NullCheck {
+    kWithoutNullCheck,
+    kWithNullCheck,
+  };
+
+  // Must be called if a reference field of an Object in the heap changes, and before any GC
+  // safe-point. The call is not needed if null is stored in the field.
+  template <NullCheck kNullCheck = kWithNullCheck>
+  ALWAYS_INLINE static void ForFieldWrite(ObjPtr<mirror::Object> dst,
+                                          MemberOffset offset ATTRIBUTE_UNUSED,
+                                          ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Must be called if a reference field of an ObjectArray in the heap changes, and before any GC
+  // safe-point. The call is not needed if null is stored in the field.
+  ALWAYS_INLINE static void ForArrayWrite(ObjPtr<mirror::Object> dst,
+                                          int start_offset ATTRIBUTE_UNUSED,
+                                          size_t length ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Write barrier for every reference field in an object.
+  ALWAYS_INLINE static void ForEveryFieldWrite(ObjPtr<mirror::Object> obj)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  ALWAYS_INLINE static gc::accounting::CardTable* GetCardTable();
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_WRITE_BARRIER_H_
diff --git a/sigchainlib/sigchain_test.cc b/sigchainlib/sigchain_test.cc
index 1d1e54f..9584ded 100644
--- a/sigchainlib/sigchain_test.cc
+++ b/sigchainlib/sigchain_test.cc
@@ -70,7 +70,7 @@
 };
 
 
-static void TestSignalBlocking(std::function<void()> fn) {
+static void TestSignalBlocking(const std::function<void()>& fn) {
   // Unblock SIGSEGV, make sure it stays unblocked.
   sigset64_t mask;
   sigemptyset64(&mask);
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index bd1744c..541ae8b 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -255,7 +255,6 @@
   /// CHECK: StaticFieldGet
   /// CHECK: InstanceFieldGet
   /// CHECK: StaticFieldSet
-  /// CHECK: InstanceFieldGet
 
   /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
   /// CHECK: StaticFieldGet
diff --git a/test/717-integer-value-of/expected.txt b/test/717-integer-value-of/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/717-integer-value-of/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/717-integer-value-of/info.txt b/test/717-integer-value-of/info.txt
new file mode 100644
index 0000000..b65d679
--- /dev/null
+++ b/test/717-integer-value-of/info.txt
@@ -0,0 +1,2 @@
+Regression test for JIT crash when compiling Integer.valueOf() intrinsic after
+having messed up the IntegerCache through reflection.
diff --git a/test/717-integer-value-of/src/Main.java b/test/717-integer-value-of/src/Main.java
new file mode 100644
index 0000000..557b65c
--- /dev/null
+++ b/test/717-integer-value-of/src/Main.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+
+public class Main {
+    public static void main(String[] args) throws Exception {
+        if (!isDalvik) {
+          // This test is ART-specific. Just fake the expected output.
+          System.out.println("JNI_OnLoad called");
+          return;
+        }
+        System.loadLibrary(args[0]);
+        if (!hasJit()) {
+          return;
+        }
+        testValueOfArg();
+        testValueOfConst();
+    }
+
+    public static void testValueOfArg() throws Exception {
+        final VolatileFlag start_end = new VolatileFlag();
+        Thread t = new Thread() {
+            @Override
+            public void run() {
+                try {
+                    Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+                    Field cacheField = integerCacheClass.getDeclaredField("cache");
+                    cacheField.setAccessible(true);
+
+                    Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+                    Integer[] alt_cache = new Integer[cache.length];
+                    System.arraycopy(cache, 0, alt_cache, 0, cache.length);
+
+                    // Let the main thread know that everything is set up.
+                    synchronized (start_end) {
+                        start_end.notify();
+                    }
+                    while (!start_end.flag) {
+                        cacheField.set(integerCacheClass, alt_cache);
+                        cacheField.set(integerCacheClass, cache);
+                    }
+                } catch (Throwable t) {
+                    throw new Error(t);
+                }
+            }
+        };
+        synchronized (start_end) {
+            t.start();
+            start_end.wait();  // Wait for the thread to start.
+        }
+        // Previously, this may have used an invalid IntegerValueOfInfo (because of seeing
+        // the `alt_cache` which is not in the boot image) when asked to emit code after
+        // using a valid info (using `cache`) when requesting locations.
+        ensureJitCompiled(Main.class, "getAsInteger");
+
+        start_end.flag = true;
+        t.join();
+
+        Runtime.getRuntime().gc();  // Collect the `alt_cache`.
+
+        // If `getAsInteger()` was miscompiled, it shall try to retrieve an Integer reference
+        // from a collected array (low = 0, high = 0 means that this happens only for value 0),
+        // reading from a bogus location. Depending on the GC type, this bogus memory access may
+        // yield SIGSEGV or `null` or even a valid reference.
+        Integer new0 = getAsInteger(0);
+        int value = (int) new0;
+
+        if (value != 0) {
+            throw new Error("value is " + value);
+        }
+    }
+
+    public static void testValueOfConst() throws Exception {
+        Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+        Field cacheField = integerCacheClass.getDeclaredField("cache");
+        cacheField.setAccessible(true);
+        Field lowField = integerCacheClass.getDeclaredField("low");
+        lowField.setAccessible(true);
+
+        Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+        int low = (int) lowField.get(integerCacheClass);
+        Integer old42 = cache[42 - low];
+        cache[42 - low] = new Integer(42);
+
+        // This used to hit
+        //     DCHECK(boxed != nullptr &&
+        //            Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+        // when compiling the intrinsic.
+        ensureJitCompiled(Main.class, "get42AsInteger");
+
+        cache[42 - low] = old42;
+        Runtime.getRuntime().gc();
+        Integer new42 = get42AsInteger();
+
+        // If the DCHECK() was removed, MterpInvokeVirtualQuick() used to crash here.
+        // (Note: Our fault handler on x86-64 then also crashed.)
+        int value = (int) new42;
+
+        if (value != (int) old42) {
+            throw new Error("value is " + value);
+        }
+    }
+
+    private static class VolatileFlag {
+        public volatile boolean flag = false;
+    }
+
+    public static Integer get42AsInteger() {
+        return Integer.valueOf(42);
+    }
+
+    public static Integer getAsInteger(int value) {
+        return Integer.valueOf(value);
+    }
+
+    private native static boolean hasJit();
+    private static native void ensureJitCompiled(Class<?> itf, String method_name);
+
+    private final static boolean isDalvik = System.getProperty("java.vm.name").equals("Dalvik");
+}
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index b0a400a..995701d 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -79,7 +79,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -99,7 +99,7 @@
 ---------
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -120,13 +120,13 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
 ---------
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -153,7 +153,7 @@
 ###########################
 From top
 ---------
- printOrWait (IILart/ControlData;)V 45 54
+ printOrWait (IILart/ControlData;)V 43 54
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -188,7 +188,7 @@
  foo (IIILart/ControlData;)I 0 21
  run ()V 4 61
 ---------
- printOrWait (IILart/ControlData;)V 45 54
+ printOrWait (IILart/ControlData;)V 43 54
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -274,7 +274,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -284,7 +284,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -294,7 +294,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -304,7 +304,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -314,7 +314,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -324,7 +324,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -334,7 +334,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -344,7 +344,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -354,7 +354,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -364,7 +364,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -399,7 +399,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -422,7 +422,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -445,7 +445,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -468,7 +468,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -491,7 +491,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -514,7 +514,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -537,7 +537,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -560,7 +560,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -583,7 +583,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -606,7 +606,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -683,7 +683,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -693,7 +693,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -703,7 +703,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -713,7 +713,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -723,7 +723,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -740,7 +740,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -763,7 +763,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -786,7 +786,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -809,7 +809,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -832,7 +832,7 @@
  wait (JI)V -1 -2
  wait (J)V 1 442
  wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
  baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -870,7 +870,7 @@
 [public final native void java.lang.Object.wait(long,int) throws java.lang.InterruptedException, ffffffff]
 [public final void java.lang.Object.wait(long) throws java.lang.InterruptedException, 1]
 [public final void java.lang.Object.wait() throws java.lang.InterruptedException, 2]
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 18]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 16]
 [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
 [private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
 [public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
@@ -894,7 +894,7 @@
 ###########################
 17
 JVMTI_ERROR_ILLEGAL_ARGUMENT
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2d]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2b]
 [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
 [private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
 [public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
diff --git a/test/knownfailures.json b/test/knownfailures.json
index c680f53..f6ae0be 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -532,13 +532,6 @@
         "bug": "b/33650497"
     },
     {
-        "tests": "640-checker-integer-valueof",
-        "description": [
-            "The java.lang.Integer.valueOf intrinsic is not supported in PIC mode."
-        ],
-        "variant": "optimizing & pictest | speed-profile & pictest"
-    },
-    {
         "tests": "202-thread-oome",
         "description": "ASAN aborts when large thread stacks are requested.",
         "variant": "host",
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index 6fc62e7..84266d9 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -58,6 +58,7 @@
     method public java.lang.String asString(int);
     method public java.lang.String asString();
     method public com.android.ahat.heapdump.AhatInstance getAssociatedBitmapInstance();
+    method public com.android.ahat.heapdump.AhatClassObj getAssociatedClassForOverhead();
     method public com.android.ahat.heapdump.AhatInstance getBaseline();
     method public java.lang.String getClassName();
     method public com.android.ahat.heapdump.AhatClassObj getClassObj();
diff --git a/tools/ahat/src/main/com/android/ahat/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java
index ae0776a..127ff37 100644
--- a/tools/ahat/src/main/com/android/ahat/Summarizer.java
+++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java
@@ -16,6 +16,7 @@
 
 package com.android.ahat;
 
+import com.android.ahat.heapdump.AhatClassObj;
 import com.android.ahat.heapdump.AhatInstance;
 import com.android.ahat.heapdump.Site;
 import com.android.ahat.heapdump.Value;
@@ -100,11 +101,17 @@
 
     // Annotate bitmaps with a thumbnail.
     AhatInstance bitmap = inst.getAssociatedBitmapInstance();
-    String thumbnail = "";
     if (bitmap != null) {
       URI uri = DocString.formattedUri("bitmap?id=0x%x", bitmap.getId());
       formatted.appendThumbnail(uri, "bitmap image");
     }
+
+    // Annotate $classOverhead arrays
+    AhatClassObj cls = inst.getAssociatedClassForOverhead();
+    if (cls != null) {
+      formatted.append(" overhead for ");
+      formatted.append(summarize(cls));
+    }
     return formatted;
   }
 
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
index 9c80802..c574e98 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
@@ -333,6 +333,26 @@
     return null;
   }
 
+  @Override public AhatClassObj getAssociatedClassForOverhead() {
+    if (mByteArray != null) {
+      List<AhatInstance> refs = getHardReverseReferences();
+      if (refs.size() == 1) {
+        AhatClassObj ref = refs.get(0).asClassObj();
+        if (ref != null) {
+          for (FieldValue field : ref.getStaticFieldValues()) {
+            if (field.name.equals("$classOverhead")) {
+              if (field.value.asAhatInstance() == this) {
+                return ref;
+              }
+              return null;
+            }
+          }
+        }
+      }
+    }
+    return null;
+  }
+
   @Override public String toString() {
     String className = getClassName();
     if (className.endsWith("[]")) {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index 95553a2..a91da82 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -453,6 +453,18 @@
   }
 
   /**
+   * Returns the class object that this object represents the overhead for.
+   * ART adds a fake byte[] $classOverhead static field to classes to show the
+   * overheads associated with the class. If this is one such byte[] instance,
+   * returns the class it is associated with. Otherwise null is returned.
+   *
+   * @return the class instance that this is the overhead for
+   */
+  public AhatClassObj getAssociatedClassForOverhead() {
+    return null;
+  }
+
+  /**
    * Returns the (bounded-length) string associated with this instance.
    * Applies to instances of java.lang.String, char[], and in some cases
    * byte[]. Returns null if this object cannot be interpreted as a string.
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index 8fbb884..65a3fb8 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -481,4 +481,19 @@
     assertEquals("java.lang.String", str.getClassName());
     assertNull(str.asString());
   }
+
+  @Test
+  public void classOverhead() throws IOException {
+    TestDump dump = TestDump.getTestDump("O.hprof", null, null);
+    AhatSnapshot snapshot = dump.getAhatSnapshot();
+
+    // class libore.io.IoTracker has byte[124]@12c028d1 as its class overhead.
+    AhatInstance overhead = snapshot.findInstance(0x12c028d1);
+    AhatClassObj cls = overhead.getAssociatedClassForOverhead();
+    assertEquals(0x12c028d0, cls.getId());
+    assertEquals("libcore.io.IoTracker", cls.getName());
+
+    // Other kinds of objects should not have associated classes for overhead.
+    assertNull(cls.getAssociatedClassForOverhead());
+  }
 }
diff --git a/tools/wrapagentproperties/wrapagentproperties.cc b/tools/wrapagentproperties/wrapagentproperties.cc
index 8b4b062..77e19e6 100644
--- a/tools/wrapagentproperties/wrapagentproperties.cc
+++ b/tools/wrapagentproperties/wrapagentproperties.cc
@@ -245,7 +245,7 @@
 
 static jint CallNextAgent(StartType start,
                           ProxyJavaVM* vm,
-                          std::string options,
+                          const std::string& options,
                           void* reserved) {
   // TODO It might be good to set it up so that the library is unloaded even if no jvmtiEnv's are
   // created but this isn't expected to be common so we will just not bother.