Merge "Move image classes options from art to AndroidRuntime [art]"
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 59536e2..7e38157 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -81,6 +81,9 @@
 # Do you want run-tests with prebuild enabled?
 ART_TEST_RUN_TEST_PREBUILD ?= true
 
+# Do you want failed tests to have their artifacts cleaned up?
+ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
+
 # Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
 define ART_TEST_FAILED
   ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
index bac1f12..250924a 100644
--- a/compiler/compilers.cc
+++ b/compiler/compilers.cc
@@ -39,7 +39,7 @@
                                                          const art::DexFile& dex_file);
 
 // Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization();
+extern std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64);
 
 void QuickCompiler::Init() const {
   ArtInitQuickCompilerContext(GetCompilerDriver());
@@ -129,10 +129,10 @@
 std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
     const CompilerDriver& driver) const {
   if (driver.GetInstructionSet() == kX86) {
-    return X86CFIInitialization();
+    return X86CFIInitialization(false);
   }
   if (driver.GetInstructionSet() == kX86_64) {
-    return X86CFIInitialization();
+    return X86CFIInitialization(true);
   }
   return nullptr;
 }
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 346fbb8..dcc67c3 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -467,8 +467,13 @@
   kIsQuinOp,
   kIsSextupleOp,
   kIsIT,
+  kIsMoveOp,
   kMemLoad,
   kMemStore,
+  kMemVolatile,
+  kMemScaledx0,
+  kMemScaledx2,
+  kMemScaledx4,
   kPCRelFixup,  // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
   kRegDef0,
   kRegDef1,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 51446f6..4f8c1d4 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -42,7 +42,7 @@
 
 /* Default optimizer/debug setting for the compiler. */
 static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
-  (1 << kLoadStoreElimination) |  // TODO: this pass has been broken for awhile - fix or delete.
+  // (1 << kLoadStoreElimination) |
   // (1 << kLoadHoisting) |
   // (1 << kSuppressLoads) |
   // (1 << kNullCheckElimination) |
@@ -96,12 +96,12 @@
     ~0U,
     // 1 = kArm, unused (will use kThumb2).
     ~0U,
-    // 2 = kArm64.     TODO(Arm64): enable optimizations once backend is mature enough.
-    (1 << kLoadStoreElimination) |
+    // 2 = kArm64.
     0,
     // 3 = kThumb2.
     0,
     // 4 = kX86.
+    (1 << kLoadStoreElimination) |
     0,
     // 5 = kX86_64.
     (1 << kLoadStoreElimination) |
@@ -747,7 +747,7 @@
 
   /* Free Arenas from the cu.arena_stack for reuse by the cu.arena in the codegen. */
   if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
-    if (cu.arena_stack.PeakBytesAllocated() > 256 * 1024) {
+    if (cu.arena_stack.PeakBytesAllocated() > 1 * 1024 * 1024) {
       MemStats stack_stats(cu.arena_stack.GetPeakStats());
       LOG(INFO) << method_name << " " << Dumpable<MemStats>(stack_stats);
     }
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index d86be4e..d7ef6f0 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -43,7 +43,8 @@
   STLDeleteElements(&lvns_);
 }
 
-LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb) {
+LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
+                                                             ScopedArenaAllocator* allocator) {
   if (UNLIKELY(!Good())) {
     return nullptr;
   }
@@ -58,13 +59,17 @@
     last_value_ = kNoValue;  // Make bad.
     return nullptr;
   }
+  if (allocator == nullptr) {
+    allocator = allocator_;
+  }
   DCHECK(work_lvn_.get() == nullptr);
-  work_lvn_.reset(new (allocator_) LocalValueNumbering(this, bb->id));
+  work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
   if (bb->block_type == kEntryBlock) {
     if ((cu_->access_flags & kAccStatic) == 0) {
       // If non-static method, mark "this" as non-null
       int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
-      work_lvn_->SetSRegNullChecked(this_reg);
+      uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
+      work_lvn_->SetValueNameNullChecked(value_name);
     }
   } else {
     // To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
@@ -120,7 +125,9 @@
       work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
       BasicBlock* pred_bb = mir_graph_->GetBasicBlock(merge_lvns_[0]->Id());
       if (HasNullCheckLastInsn(pred_bb, bb->id)) {
-        work_lvn_->SetSRegNullChecked(pred_bb->last_mir_insn->ssa_rep->uses[0]);
+        int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
+        uint16_t value_name = merge_lvns_[0]->GetSRegValueName(s_reg);
+        work_lvn_->SetValueNameNullChecked(value_name);
       }
     } else {
       work_lvn_->Merge(merge_type);
@@ -135,9 +142,14 @@
   ++bbs_processed_;
   merge_lvns_.clear();
 
-  std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
-  lvns_[bb->id] = work_lvn_.release();
-  return (old_lvn == nullptr) || !old_lvn->Equals(*lvns_[bb->id]);
+  bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
+  if (change) {
+    std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
+    lvns_[bb->id] = work_lvn_.release();
+  } else {
+    work_lvn_.reset();
+  }
+  return change;
 }
 
 uint16_t GlobalValueNumbering::GetFieldId(const MirFieldInfo& field_info, uint16_t type) {
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a12a779..c06ff6f 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -32,7 +32,8 @@
   ~GlobalValueNumbering();
 
   // Prepare LVN for the basic block.
-  LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb);
+  LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
+                                         ScopedArenaAllocator* allocator = nullptr);
 
   // Finish processing the basic block.
   bool FinishBasicBlock(BasicBlock* bb);
@@ -55,7 +56,7 @@
 
   // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
   static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
-    return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMIR);
+    return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
   }
 
   // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index c82d231..e8501cd 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -212,6 +212,7 @@
       if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
         bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
             cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+        bb->data_flow_info->live_in_v = live_in_v_;
       }
     }
     cu_.mir_graph->num_blocks_ = count;
@@ -333,12 +334,23 @@
         ssa_reps_(),
         allocator_(),
         gvn_(),
-        value_names_() {
+        value_names_(),
+        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
     cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
     allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->ssa_base_vregs_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+    cu_.mir_graph->ssa_subscripts_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+      cu_.mir_graph->ssa_base_vregs_->Insert(i);
+      cu_.mir_graph->ssa_subscripts_->Insert(0);
+    }
   }
 
+  static constexpr size_t kMaxSsaRegs = 16384u;
+
   ArenaPool pool_;
   CompilationUnit cu_;
   size_t mir_count_;
@@ -347,6 +359,7 @@
   std::unique_ptr<ScopedArenaAllocator> allocator_;
   std::unique_ptr<GlobalValueNumbering> gvn_;
   std::vector<uint16_t> value_names_;
+  ArenaBitVector* live_in_v_;
 };
 
 class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 0e072ec..5997568 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -197,11 +197,7 @@
     Map* map, const typename Map::key_type& key) {
   auto lb = map->lower_bound(key);
   if (lb == map->end() || map->key_comp()(key, lb->first)) {
-    map->PutBefore(lb, key, AliasingValues(gvn_->allocator_));
-    // The new entry was inserted before lb.
-    DCHECK(lb != map->begin());
-    --lb;
-    DCHECK(!map->key_comp()(lb->first, key) && !map->key_comp()(key, lb->first));
+    lb = map->PutBefore(lb, key, AliasingValues(this));
   }
   return &lb->second;
 }
@@ -308,25 +304,37 @@
   return true;
 }
 
-LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id)
+template <typename K>
+void LocalValueNumbering::CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+                                                const ScopedArenaSafeMap<K, AliasingValues>& src) {
+  // We need each new AliasingValues (or rather its map members) to be constructed
+  // with our allocator, rather than the allocator of the source.
+  for (const auto& entry : src) {
+    auto it = dest->PutBefore(dest->end(), entry.first, AliasingValues(this));
+    it->second = entry.second;  // Map assignments preserve current allocator.
+  }
+}
+
+LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id,
+                                         ScopedArenaAllocator* allocator)
     : gvn_(gvn),
       id_(id),
-      sreg_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      sreg_wide_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      sfield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      non_aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      non_aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      sfield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
       global_memory_version_(0u),
-      non_aliasing_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      escaped_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), gvn->Allocator()->Adapter()),
-      escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), gvn->Allocator()->Adapter()),
-      range_checked_(RangeCheckKeyComparator() , gvn->Allocator()->Adapter()),
-      null_checked_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
-      merge_names_(gvn->Allocator()->Adapter()),
-      merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), gvn->Allocator()->Adapter()),
+      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+      escaped_refs_(std::less<uint16_t>(), allocator->Adapter()),
+      escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), allocator->Adapter()),
+      escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), allocator->Adapter()),
+      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+      null_checked_(std::less<uint16_t>(), allocator->Adapter()),
+      merge_names_(allocator->Adapter()),
+      merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
       merge_new_memory_version_(kNoValue) {
   std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
   std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
@@ -352,8 +360,8 @@
 }
 
 void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
-  sreg_value_map_ = other.sreg_value_map_;
-  sreg_wide_value_map_ = other.sreg_wide_value_map_;
+  CopyLiveSregValues(&sreg_value_map_, other.sreg_value_map_);
+  CopyLiveSregValues(&sreg_wide_value_map_, other.sreg_wide_value_map_);
 
   if (merge_type == kReturnMerge) {
     // RETURN or PHI+RETURN. We need only sreg value maps.
@@ -361,7 +369,7 @@
   }
 
   non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
-  non_aliasing_array_value_map_ = other.non_aliasing_array_value_map_;
+  CopyAliasingValuesMap(&non_aliasing_array_value_map_, other.non_aliasing_array_value_map_);
   non_aliasing_refs_ = other.non_aliasing_refs_;
   range_checked_ = other.range_checked_;
   null_checked_ = other.null_checked_;
@@ -380,8 +388,8 @@
   std::copy_n(other.unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
   std::copy_n(other.unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
   sfield_value_map_ = other.sfield_value_map_;
-  aliasing_ifield_value_map_ = other.aliasing_ifield_value_map_;
-  aliasing_array_value_map_ = other.aliasing_array_value_map_;
+  CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
+  CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
   escaped_refs_ = other.escaped_refs_;
   escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
   escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
@@ -493,8 +501,20 @@
   }
 }
 
-template <typename Map, Map LocalValueNumbering::* map_ptr>
-void LocalValueNumbering::IntersectMaps() {
+void LocalValueNumbering::CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src) {
+  auto dest_end = dest->end();
+  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+  DCHECK(live_in_v != nullptr);
+  for (const auto& entry : src) {
+    bool live = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+    if (live) {
+      dest->PutBefore(dest_end, entry.first, entry.second);
+    }
+  }
+}
+
+template <LocalValueNumbering::SregValueMap LocalValueNumbering::* map_ptr>
+void LocalValueNumbering::IntersectSregValueMaps() {
   DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
 
   // Find the LVN with the least entries in the set.
@@ -506,18 +526,22 @@
   }
 
   // For each key check if it's in all the LVNs.
+  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+  DCHECK(live_in_v != nullptr);
   for (const auto& entry : least_entries_lvn->*map_ptr) {
-    bool checked = true;
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn != least_entries_lvn) {
-        auto it = (lvn->*map_ptr).find(entry.first);
-        if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
-          checked = false;
-          break;
+    bool live_and_same = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+    if (live_and_same) {
+      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+        if (lvn != least_entries_lvn) {
+          auto it = (lvn->*map_ptr).find(entry.first);
+          if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
+            live_and_same = false;
+            break;
+          }
         }
       }
     }
-    if (checked) {
+    if (live_and_same) {
       (this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
     }
   }
@@ -721,11 +745,7 @@
                                               typename Map::iterator hint) {
   const typename Map::key_type& key = entry.first;
 
-  (this->*map_ptr).PutBefore(hint, key, AliasingValues(gvn_->allocator_));
-  DCHECK(hint != (this->*map_ptr).begin());
-  AliasingIFieldValuesMap::iterator it = hint;
-  --it;
-  DCHECK_EQ(it->first, key);
+  auto it = (this->*map_ptr).PutBefore(hint, key, AliasingValues(this));
   AliasingValues* my_values = &it->second;
 
   const AliasingValues* cmp_values = nullptr;
@@ -849,8 +869,8 @@
 void LocalValueNumbering::Merge(MergeType merge_type) {
   DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
 
-  IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_value_map_>();
-  IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_wide_value_map_>();
+  IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
+  IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
   if (merge_type == kReturnMerge) {
     // RETURN or PHI+RETURN. We need only sreg value maps.
     return;
@@ -1385,7 +1405,7 @@
         if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
           AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
           // Clear the value if we got a merged version in a loop.
-          *values = AliasingValues(gvn_->allocator_);
+          *values = AliasingValues(this);
           for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
             DCHECK_EQ(High16Bits(i), 0u);
             uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 190eab4..855d66d 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -36,7 +36,7 @@
   static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
 
  public:
-  LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id);
+  LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
 
   BasicBlockId Id() const {
     return id_;
@@ -44,9 +44,11 @@
 
   bool Equals(const LocalValueNumbering& other) const;
 
-  // Set non-static method's "this".
-  void SetSRegNullChecked(uint16_t s_reg) {
-    uint16_t value_name = GetOperandValue(s_reg);
+  uint16_t GetSRegValueName(uint16_t s_reg) const {
+    return GetOperandValue(s_reg);
+  }
+
+  void SetValueNameNullChecked(uint16_t value_name) {
     null_checked_.insert(value_name);
   }
 
@@ -76,7 +78,7 @@
 
   // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
   static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
-    return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+    return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
   }
 
   // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
@@ -225,12 +227,12 @@
   // store or because they contained the last_stored_value before the store and thus could not
   // have changed as a result.
   struct AliasingValues {
-    explicit AliasingValues(ScopedArenaAllocator* allocator)
+    explicit AliasingValues(LocalValueNumbering* lvn)
         : memory_version_before_stores(kNoValue),
           last_stored_value(kNoValue),
-          store_loc_set(std::less<uint16_t>(), allocator->Adapter()),
+          store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
           last_load_memory_version(kNoValue),
-          load_value_map(std::less<uint16_t>(), allocator->Adapter()) {
+          load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
     }
 
     uint16_t memory_version_before_stores;  // kNoValue if start version for the field.
@@ -286,6 +288,10 @@
   bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
                                uint16_t location, uint16_t value);
 
+  template <typename K>
+  void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+                             const ScopedArenaSafeMap<K, AliasingValues>& src);
+
   uint16_t MarkNonAliasingNonNull(MIR* mir);
   bool IsNonAliasing(uint16_t reg) const;
   bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
@@ -314,9 +320,11 @@
   template <typename Set, Set LocalValueNumbering::* set_ptr>
   void IntersectSets();
 
+  void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
+
   // Intersect maps as sets. The value type must be equality-comparable.
-  template <typename Map, Map LocalValueNumbering::* map_ptr>
-  void IntersectMaps();
+  template <SregValueMap LocalValueNumbering::* map_ptr>
+  void IntersectSregValueMaps();
 
   // Intersect maps as sets. The value type must be equality-comparable.
   template <typename Map>
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index b3eae42..e4e944e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -196,7 +196,7 @@
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
     allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
     gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
-    lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u));
+    lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
     gvn_->AllowModifications();
   }
 
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index d355ddc..23ceb56 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -329,7 +329,8 @@
   if (use_lvn) {
     allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
     global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get()));
-    local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id));
+    local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
+                                                                 allocator.get()));
   }
   while (bb != NULL) {
     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -1170,7 +1171,8 @@
     temp_gvn_->AllowModifications();
     PreOrderDfsIterator iter(this);
     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+      ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
+      LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
       if (lvn != nullptr) {
         for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
           lvn->GetValueNumber(mir);
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 5083bbc..35c3597 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -217,7 +217,7 @@
                  "ldmia", "!0C!!, <!1R>", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrRRI5,      0x6800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4,
                  "ldr", "!0C, [!1C, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrRRR,       0x5800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -226,14 +226,14 @@
     ENCODING_MAP(kThumbLdrPcRel,    0x4800,
                  kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
-                 | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
+                 | IS_LOAD_OFF4 | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
     ENCODING_MAP(kThumbLdrSpRel,    0x9800,
                  kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
-                 | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
+                 | IS_LOAD_OFF4, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrbRRI5,     0x7800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, #2d]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrbRRR,      0x5c00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -241,7 +241,7 @@
                  "ldrb", "!0C, [!1C, !2C]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrhRRI5,     0x8800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF2,
                  "ldrh", "!0C, [!1C, #!2F]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrhRRR,      0x5a00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -283,19 +283,19 @@
     ENCODING_MAP(kThumbMovRR,        0x1c00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES |IS_MOVE,
                  "movs", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_H2H,    0x46c0,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_H2L,    0x4640,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_L2H,    0x4680,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMul,           0x4340,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -354,7 +354,7 @@
                  "stmia", "!0C!!, <!1R>", 2, kFixupNone),
     ENCODING_MAP(kThumbStrRRI5,      0x6000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "str", "!0C, [!1C, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrRRR,       0x5000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -363,10 +363,10 @@
     ENCODING_MAP(kThumbStrSpRel,    0x9000,
                  kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
-                 | IS_STORE, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
+                 | IS_STORE_OFF4, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrbRRI5,     0x7000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, #!2d]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrbRRR,      0x5400,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -374,7 +374,7 @@
                  "strb", "!0C, [!1C, !2C]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrhRRI5,     0x8000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF2,
                  "strh", "!0C, [!1C, #!2F]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrhRRR,      0x5200,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -423,11 +423,11 @@
      */
     ENCODING_MAP(kThumb2Vldrs,       0xed900a00,
                  kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
                  REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
     ENCODING_MAP(kThumb2Vldrd,       0xed900b00,
                  kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF |
                  REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
     ENCODING_MAP(kThumb2Vmuls,        0xee200a00,
                  kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -440,11 +440,11 @@
                  "vmuld", "!0S, !1S, !2S", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vstrs,       0xed800a00,
                  kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "vstr", "!0s, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vstrd,       0xed800b00,
                  kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "vstr", "!0S, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vsubs,        0xee300a40,
                  kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -520,19 +520,19 @@
                  "mov", "!0C, #!1M", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRI12,       0xf8c00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRI12,       0xf8d00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRI8Predec,       0xf8400c00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0C, [!1C, #-!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRI8Predec,       0xf8500c00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, #-!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Cbnz,       0xb900, /* Note: does not affect flags */
                  kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
@@ -549,15 +549,15 @@
                  "add", "!0C,!1C,#!2d", 4, kFixupNone),
     ENCODING_MAP(kThumb2MovRR,       0xea4f0000, /* no setflags encoding */
                  kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vmovs,       0xeeb00a40,
                  kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "vmov.f32 ", " !0s, !1s", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vmovd,       0xeeb00b40,
                  kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "vmov.f64 ", " !0S, !1S", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldmia,         0xe8900000,
                  kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
@@ -613,59 +613,59 @@
                  "sbfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRR,    0xf8500000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrhRRR,    0xf8300000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrshRRR,    0xf9300000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrbRRR,    0xf8100000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrsbRRR,    0xf9100000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRR,    0xf8400000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "str", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrhRRR,    0xf8200000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "strh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrbRRR,    0xf8000000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrhRRI12,       0xf8b00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrshRRI12,       0xf9b00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrbRRI12,       0xf8900000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrsbRRI12,       0xf9900000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrhRRI12,       0xf8a00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrbRRI12,       0xf8800000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Pop,           0xe8bd0000,
                  kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -841,7 +841,7 @@
     ENCODING_MAP(kThumb2LdrPcRel12,       0xf8df0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
                  "ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
     ENCODING_MAP(kThumb2BCond,        0xf0008000,
                  kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
@@ -899,19 +899,19 @@
                  "umull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldrex,       0xe8500f00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldrex", "!0C, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldrexd,      0xe8d0007f,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOADX,
                  "ldrexd", "!0C, !1C, [!2C]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Strex,       0xe8400000,
                  kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STOREX,
                  "strex", "!0C, !1C, [!2C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Strexd,      0xe8c00070,
                  kFmtBitBlt, 3, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8,
-                 kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STORE,
+                 kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STOREX,
                  "strexd", "!0C, !1C, !2C, [!3C]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Clrex,       0xf3bf8f2f,
                  kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -927,12 +927,12 @@
                  "bfc", "!0C,#!1d,#!2d", 4, kFixupNone),
     ENCODING_MAP(kThumb2Dmb,         0xf3bf8f50,
                  kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
                  "dmb", "#!0B", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrPcReln12,       0xf85f0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF,
                  "ldr", "!0C, [r15pc, -#!1d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Stm,          0xe9000000,
                  kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
@@ -1023,17 +1023,17 @@
     ENCODING_MAP(kThumb2LdrdPcRel8,  0xe9df0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD_OFF4 | NEEDS_FIXUP,
                  "ldrd", "!0C, !1C, [pc, #!2E]", 4, kFixupLoad),
     ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
                  kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD_OFF4,
                  "ldrd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
                  kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4,
                  "strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
 };
 
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9652192..e0b8ec6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -198,6 +198,7 @@
     }
 
     LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+    size_t GetInstructionOffset(LIR* lir);
 
   private:
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index cf21da7..bba1a8c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1169,4 +1169,17 @@
   return OpReg(op, r_tgt);
 }
 
+size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
+  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+  size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
+
+  if (check_flags & SCALED_OFFSET_X2) {
+    offset = offset * 2;
+  } else if (check_flags & SCALED_OFFSET_X4) {
+    offset = offset * 4;
+  }
+  return offset;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3a8ea3f..90cb156 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -331,6 +331,7 @@
   kA64Stp4ffXD,      // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Stp4rrXD,      // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPost4rrXD,  // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
+  kA64StpPre4ffXD,   // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPre4rrXD,   // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Str3fXD,       // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
   kA64Str4fXxG,      // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 462be54..c46be53 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -214,7 +214,7 @@
                  "csneg", "!0r, !1r, !2r, !3c", kFixupNone),
     ENCODING_MAP(kA64Dmb1B, NO_VARIANTS(0xd50330bf),
                  kFmtBitBlt, 11, 8, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
                  "dmb", "#!0B", kFixupNone),
     ENCODING_MAP(WIDE(kA64Eor3Rrl), SF_VARIANTS(0x52000000),
                  kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
@@ -274,7 +274,7 @@
                  "fmin", "!0f, !1f, !2f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "fmov", "!0f, !1f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
@@ -318,7 +318,7 @@
                  "fsub", "!0f, !1f, !2f", kFixupNone),
     ENCODING_MAP(kA64Ldrb3wXd, NO_VARIANTS(0x39400000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0w, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(kA64Ldrb3wXx, NO_VARIANTS(0x38606800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -326,7 +326,7 @@
                  "ldrb", "!0w, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsb3rXd), CUSTOM_VARIANTS(0x39c00000, 0x39800000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsb", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsb3rXx), CUSTOM_VARIANTS(0x38e06800, 0x38a06800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -334,19 +334,19 @@
                  "ldrsb", "!0r, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(kA64Ldrh3wXF, NO_VARIANTS(0x79400000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrh", "!0w, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(kA64Ldrh4wXxd, NO_VARIANTS(0x78606800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsh3rXF), CUSTOM_VARIANTS(0x79c00000, 0x79800000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsh", "!0r, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsh4rXxd), CUSTOM_VARIANTS(0x78e06800, 0x78906800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
@@ -360,11 +360,11 @@
                  "ldr", "!0r, !1p", kFixupLoad),
     ENCODING_MAP(FWIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0f, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldr3rXD), SIZE_VARIANTS(0xb9400000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0r, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -380,11 +380,11 @@
                  "ldr", "!0r, [!1X], #!2d", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldp4ffXD), CUSTOM_VARIANTS(0x2d400000, 0x6d400000),
                  kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
                  "ldp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldp4rrXD), SF_VARIANTS(0x29400000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
                  "ldp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64LdpPost4rrXD), CUSTOM_VARIANTS(0x28c00000, 0xa8c00000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
@@ -400,11 +400,11 @@
                  "ldur", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldxr2rX), SIZE_VARIANTS(0x885f7c00),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldxr", "!0r, [!1X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldaxr2rX), SIZE_VARIANTS(0x885ffc00),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldaxr", "!0r, [!1X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Lsl3rrr), SF_VARIANTS(0x1ac02000),
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
@@ -432,7 +432,7 @@
                  "movz", "!0r, #!1d!2M", kFixupNone),
     ENCODING_MAP(WIDE(kA64Mov2rr), SF_VARIANTS(0x2a0003e0),
                  kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0r, !1r", kFixupNone),
     ENCODING_MAP(WIDE(kA64Mvn2rr), SF_VARIANTS(0x2a2003e0),
                  kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
@@ -508,23 +508,27 @@
                  "smulh", "!0x, !1x, !2x", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stp4ffXD), CUSTOM_VARIANTS(0x2d000000, 0x6d000000),
                  kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "stp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stp4rrXD), SF_VARIANTS(0x29000000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "stp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64StpPost4rrXD), CUSTOM_VARIANTS(0x28800000, 0xa8800000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
+    ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
+                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
+                 "stp", "!0r, !1f, [!2X, #!3D]!!", kFixupNone),
     ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0f, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -532,7 +536,7 @@
                  "str", "!0f, [!1X, !2x!3G]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Str3rXD), SIZE_VARIANTS(0xb9000000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0r, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Str4rXxG), SIZE_VARIANTS(0xb8206800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -540,7 +544,7 @@
                  "str", "!0r, [!1X, !2x!3G]", kFixupNone),
     ENCODING_MAP(kA64Strb3wXd, NO_VARIANTS(0x39000000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0w, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(kA64Strb3wXx, NO_VARIANTS(0x38206800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -548,7 +552,7 @@
                  "strb", "!0w, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(kA64Strh3wXF, NO_VARIANTS(0x79000000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strh", "!0w, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(kA64Strh4wXxd, NO_VARIANTS(0x78206800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -568,11 +572,11 @@
                  "stur", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stxr3wrX), SIZE_VARIANTS(0x88007c00),
                  kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
                  "stxr", "!0w, !1r, [!2X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stlxr3wrX), SIZE_VARIANTS(0x8800fc00),
                  kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
                  "stlxr", "!0w, !1r, [!2X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Sub4RRdT), SF_VARIANTS(0x51000000),
                  kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -723,6 +727,7 @@
                              << " @ 0x" << std::hex << lir->dalvik_offset;
                 if (kFailOnSizeError) {
                   LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+                             << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
                              << ". Expected " << expected << ", got 0x" << std::hex << operand;
                 } else {
                   LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index e584548..6fa8a4a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -330,19 +330,14 @@
 
   NewLIR0(kPseudoMethodEntry);
 
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64) -
-      Thread::kStackOverflowSignalReservedBytes;
-  const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
   const int spill_count = num_core_spills_ + num_fp_spills_;
   const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf;  // SP 16 byte alignment.
   const int frame_size_without_spills = frame_size_ - spill_size;
 
   if (!skip_overflow_check) {
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-      if (!large_frame) {
-        // Load stack limit
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
-      }
+      // Load stack limit
+      LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
     } else {
       // TODO(Arm64) Implement implicit checks.
       // Implicit stack overflow check.
@@ -350,24 +345,21 @@
       // redzone we will get a segmentation fault.
       // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
       // MarkPossibleStackOverflowException();
+      //
+      // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
+      //       so that we can avoid the following "sub sp" when spilling?
       LOG(FATAL) << "Implicit stack overflow checks not implemented.";
     }
   }
 
-  if (frame_size_ > 0) {
-    OpRegImm64(kOpSub, rs_sp, spill_size);
+  int spilled_already = 0;
+  if (spill_size > 0) {
+    spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
+    DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
   }
 
-  /* Need to spill any FP regs? */
-  if (fp_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
-    SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-  }
-
-  /* Spill core callee saves. */
-  if (core_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
-    SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
+  if (spilled_already != frame_size_) {
+    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   if (!skip_overflow_check) {
@@ -396,29 +388,9 @@
         const size_t sp_displace_;
       };
 
-      if (large_frame) {
-        // Compare Expected SP against bottom of stack.
-        // Branch to throw target if there is not enough room.
-        OpRegRegImm(kOpSub, rs_xIP1, rs_sp, frame_size_without_spills);
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP0);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_xIP1, rs_xIP0, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
-        OpRegCopy(rs_sp, rs_xIP1);  // Establish stack after checks.
-      } else {
-        /*
-         * If the frame is small enough we are guaranteed to have enough space that remains to
-         * handle signals on the user stack.
-         * Establishes stack before checks.
-         */
-        OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
-      }
-    } else {
-      OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
+      LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
+      AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
     }
-  } else {
-    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   FlushIns(ArgLocs, rl_method);
@@ -445,57 +417,7 @@
 
   NewLIR0(kPseudoMethodExit);
 
-  // Restore saves and drop stack frame.
-  // 2 versions:
-  //
-  // 1. (Original): Try to address directly, then drop the whole frame.
-  //                Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
-  //
-  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
-  //           in range. Then drop the rest.
-  //
-  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
-  //       in variant 1.
-
-  if (frame_size_ <= 504) {
-    // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
-    // Could be tighter, as the last load is below frame_size_ offset.
-    if (fp_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_);
-  } else {
-    // Second variant. Drop the frame part.
-    int drop = 0;
-    // TODO: Always use the first formula, as num_fp_spills would be zero?
-    if (fp_spill_mask_) {
-      drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-    } else {
-      drop = frame_size_ - kArm64PointerSize * num_core_spills_;
-    }
-
-    // Drop needs to be 16B aligned, so that SP keeps aligned.
-    drop = RoundDown(drop, 16);
-
-    OpRegImm64(kOpAdd, rs_sp, drop);
-
-    if (fp_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
-  }
+  UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
 
   // Finally return.
   NewLIR0(kA64Ret);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index ac36519..18f2a29 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -219,11 +219,12 @@
     void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
     void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
 
-    uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2);
-    void UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
+    // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+    // frame size.
+    int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+    // Unspill core and FP registers.
+    void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
 
     // Required for target - single operation generators.
     LIR* OpUnconditionalBranch(LIR* target);
@@ -260,7 +261,8 @@
     LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
                            A64RegExtEncodings ext, uint8_t amount);
     LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
+    LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                        A64RegExtEncodings ext, uint8_t amount);
     static const ArmEncodingMap EncodingMap[kA64Last];
     int EncodeShift(int code, int amount);
     int EncodeExtend(int extend_type, int amount);
@@ -297,6 +299,7 @@
     bool WideFPRsAreAliases() OVERRIDE {
       return true;  // 64b architecture.
     }
+    size_t GetInstructionOffset(LIR* lir);
 
     LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
@@ -381,6 +384,7 @@
     RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
                           bool is_div, bool check_zero);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+    size_t GetLoadStoreSize(LIR* lir);
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index f9f85f4..9403d5e 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -22,6 +22,7 @@
 #include "dex/reg_storage_eq.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "mirror/array.h"
+#include "utils.h"
 
 namespace art {
 
@@ -788,6 +789,7 @@
 }
 
 LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
 }
 
@@ -1237,6 +1239,14 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
+static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
+  // Find first register.
+  int first_bit_set = CTZ(reg_mask) + 1;
+  *reg = *reg + first_bit_set;
+  reg_mask >>= first_bit_set;
+  return reg_mask;
+}
+
 /**
  * @brief Split a register list in pairs or registers.
  *
@@ -1253,15 +1263,15 @@
  *   }
  * @endcode
  */
-uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
+static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
   // Find first register.
-  int first_bit_set = __builtin_ctz(reg_mask) + 1;
+  int first_bit_set = CTZ(reg_mask) + 1;
   int reg = *reg1 + first_bit_set;
   reg_mask >>= first_bit_set;
 
   if (LIKELY(reg_mask)) {
     // Save the first register, find the second and use the pair opcode.
-    int second_bit_set = __builtin_ctz(reg_mask) + 1;
+    int second_bit_set = CTZ(reg_mask) + 1;
     *reg2 = reg;
     reg_mask >>= second_bit_set;
     *reg1 = reg + second_bit_set;
@@ -1274,68 +1284,274 @@
   return reg_mask;
 }
 
-void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      DCHECK_LE(offset, 63);
-      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
 // TODO(Arm64): consider using ld1 and st1?
-void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                           uint32_t fp_reg_mask, int frame_size) {
+  m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
+
+  int core_count = POPCOUNT(core_reg_mask);
+
+  if (fp_reg_mask != 0) {
+    // Spill FP regs.
+    int fp_count = POPCOUNT(fp_reg_mask);
+    int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
+    SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
+  }
+
+  if (core_reg_mask != 0) {
+    // Spill core regs.
+    int spill_offset = frame_size - (core_count * kArm64PointerSize);
+    SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
+  }
+
+  return frame_size;
+}
+
+static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                               uint32_t fp_reg_mask, int frame_size) {
+  // Otherwise, spill both core and fp regs at the same time.
+  // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
+  // down. From then on, we fill upwards. This will generate overall the same number of instructions
+  // as the specialized code above in most cases (exception being odd number of core and even
+  // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
+  //
+  // Some demonstrative fill cases : (c) = core, (f) = fp
+  // cc    44   cc    44   cc    22   cc    33   fc => 1[1/2]
+  // fc => 23   fc => 23   ff => 11   ff => 22
+  // ff    11    f    11               f    11
+  //
+  int reg1 = -1, reg2 = -1;
+  int core_count = POPCOUNT(core_reg_mask);
+  int fp_count = POPCOUNT(fp_reg_mask);
+
+  int combined = fp_count + core_count;
+  int all_offset = RoundUp(combined, 2);  // Needs to be 16B = 2-reg aligned.
+
+  int cur_offset = 2;  // What's the starting offset after the first stp? We expect the base slot
+                       // to be filled.
+
+  // First figure out whether the bottom is FP or core.
+  if (fp_count > 0) {
+    // Some FP spills.
+    //
+    // Four cases: (d0 is dummy to fill up stp)
+    // 1) Single FP, even number of core -> stp d0, fp_reg
+    // 2) Single FP, odd number of core -> stp fp_reg, d0
+    // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
+    // 4) More FP, odd number combined -> stp d0, fp_reg
+    if (fp_count == 1) {
+      fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+      DCHECK_EQ(fp_reg_mask, 0U);
+      if (core_count % 2 == 0) {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      } else {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+        cur_offset = 0;  // That core reg needs to go into the upper half.
+      }
+    } else {
+      if (combined % 2 == 0) {
+        fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
+      } else {
+        fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      }
+    }
+  } else {
+    // No FP spills.
+    //
+    // Two cases:
+    // 1) Even number of core -> stp core1, core2
+    // 2) Odd number of core -> stp xzr, core1
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    } else {
+      core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    }
+  }
+
+  if (fp_count != 0) {
+    for (; fp_reg_mask != 0;) {
+      // Have some FP regs to do.
+      fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+      if (UNLIKELY(reg2 < 0)) {
+        m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                     cur_offset);
+        // Do not increment offset here, as the second half will be filled by a core reg.
+      } else {
+        m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
+        cur_offset += 2;
+      }
+    }
+
+    // Reset counting.
+    reg1 = -1;
+
+    // If there is an odd number of core registers, we need to store the bottom now.
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
+                   cur_offset + 1);
+      cur_offset += 2;  // Half-slot filled now.
+    }
+  }
+
+  // Spill the rest of the core regs. They are guaranteed to be even.
+  DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
+  for (; core_reg_mask != 0; cur_offset += 2) {
+    core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+    m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
+  }
+
+  DCHECK_EQ(cur_offset, all_offset);
+
+  return all_offset * 8;
+}
+
+int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                            int frame_size) {
+  // If the frame size is small enough that all offsets would fit into the immediates, use that
+  // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
+  // instruction-count wise than the complicated code below.
+  //
+  // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
+  // number of fp spills.
+  if ((RoundUp(frame_size, 8) / 8 <= 63)) {
+    return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  } else {
+    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  }
+}
+
+static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    } else {
+      DCHECK_LE(offset, 63);
+      m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
+    } else {
+      m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                               int frame_size) {
+  // Restore saves and drop stack frame.
+  // 2 versions:
+  //
+  // 1. (Original): Try to address directly, then drop the whole frame.
+  //                Limitation: ldp is a 7b signed immediate.
+  //
+  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
+  //           in range. Then drop the rest.
+  //
+  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
+  //       in variant 1.
+
+  // "Magic" constant, 63 (max signed 7b) * 8.
+  static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
+
+  const int num_core_spills = POPCOUNT(core_reg_mask);
+  const int num_fp_spills = POPCOUNT(fp_reg_mask);
+
+  int early_drop = 0;
+
+  if (frame_size > kMaxFramesizeForOffset) {
+    // Second variant. Drop the frame part.
+
+    // TODO: Always use the first formula, as num_fp_spills would be zero?
+    if (fp_reg_mask != 0) {
+      early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    } else {
+      early_drop = frame_size - kArm64PointerSize * num_core_spills;
+    }
+
+    // Drop needs to be 16B aligned, so that SP keeps aligned.
+    early_drop = RoundDown(early_drop, 16);
+
+    OpRegImm64(kOpAdd, rs_sp, early_drop);
+  }
+
+  // Unspill.
+  if (fp_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
+  }
+  if (core_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
+    UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
+  }
+
+  // Drop the (rest of) the frame.
+  OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
+}
+
 bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
   ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src_i = info->args[0];
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f6c140f..5131bd8 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -87,6 +87,26 @@
   return (bit7 | bit6 | bit5_to_0);
 }
 
+size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
+  bool opcode_is_wide = IS_WIDE(lir->opcode);
+  ArmOpcode opcode = UNWIDE(lir->opcode);
+  DCHECK(!IsPseudoLirOp(opcode));
+  const ArmEncodingMap *encoder = &EncodingMap[opcode];
+  uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
+  return (bits >> 30);
+}
+
+size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
+  size_t offset = lir->operands[2];
+  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+  if (check_flags & SCALED_OFFSET_X0) {
+    DCHECK(check_flags & IS_TERTIARY_OP);
+    offset = offset * (1 << GetLoadStoreSize(lir));
+  }
+  return offset;
+}
+
 LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
   DCHECK(r_dest.IsSingle());
   if (value == 0) {
@@ -525,7 +545,8 @@
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
+LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                                  A64RegExtEncodings ext, uint8_t amount) {
   ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
   ArmOpcode opcode = kA64Brk1d;
 
@@ -536,6 +557,11 @@
     case kOpCmp:
       opcode = kA64Cmp3Rre;
       break;
+    case kOpAdd:
+      // Note: intentional fallthrough
+    case kOpSub:
+      return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
+      break;
     default:
       LOG(FATAL) << "Bad Opcode: " << opcode;
       break;
@@ -545,7 +571,8 @@
   if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
     ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
     if (kind == kFmtExtend) {
-      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
+      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
+                     EncodeExtend(ext, amount));
     }
   }
 
@@ -555,10 +582,10 @@
 
 LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
   /* RegReg operations with SP in first parameter need extended register instruction form.
-   * Only CMN and CMP instructions are implemented.
+   * Only CMN, CMP, ADD & SUB instructions are implemented.
    */
   if (r_dest_src1 == rs_sp) {
-    return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
+    return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
   } else {
     return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
   }
@@ -825,15 +852,6 @@
     }
     OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
     return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
-  } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
-    // Note: "sub sp, sp, Xm" is not correct on arm64.
-    // We need special instructions for SP.
-    // Also operation on 32-bit SP should be avoided.
-    DCHECK(IS_WIDE(wide));
-    RegStorage r_tmp = AllocTempWide();
-    OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
-    OpRegImm64(op, r_tmp, value);
-    return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
   } else {
     RegStorage r_tmp;
     LIR* res;
@@ -878,10 +896,14 @@
 }
 
 int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
+  DCHECK_EQ(shift_type & 0x3, shift_type);
+  DCHECK_EQ(amount & 0x3f, amount);
   return ((shift_type & 0x3) << 7) | (amount & 0x3f);
 }
 
 int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
+  DCHECK_EQ(extend_type & 0x7, extend_type);
+  DCHECK_EQ(amount & 0x7, amount);
   return  (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
 }
 
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index f0e4d9c..aae9155 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -21,6 +21,7 @@
 #include "mirror/array.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_reference.h"
 #include "verifier/method_verifier.h"
 #include <functional>
 
@@ -722,6 +723,7 @@
   OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
   if (!SLOW_FIELD_PATH && field_info.FastGet()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
+    // A load of the class will lead to an iget with offset 0.
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kRefReg);
     GenNullCheck(rl_obj.reg, opt_flags);
@@ -768,7 +770,9 @@
   OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
   if (!SLOW_FIELD_PATH && field_info.FastPut()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
-    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
+    // Dex code never writes to the class field.
+    DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()),
+              sizeof(mirror::HeapReference<mirror::Class>));
     rl_obj = LoadValue(rl_obj, kRefReg);
     if (is_long_or_double) {
       rl_src = LoadValueWide(rl_src, reg_class);
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 2893157..eec2b32 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -15,15 +15,43 @@
  */
 
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
 
 namespace art {
 
 #define DEBUG_OPT(X)
 
+#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
+
 /* Check RAW, WAR, and RAW dependency on the register operands */
 #define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
                                        (use.Union(def).Intersects(*check->u.m.def_mask))
 
+/* Load Store Elimination filter:
+ *  - Wide Load/Store
+ *  - Exclusive Load/Store
+ *  - Quad operand Load/Store
+ *  - List Load/Store
+ *  - IT blocks
+ *  - Branch
+ *  - Dmb
+ */
+#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
+                                 (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
+                                 (flags & REG_USE012) == REG_USE012 || \
+                                 (flags & REG_DEF01) == REG_DEF01 || \
+                                 (flags & REG_DEF_LIST0) || \
+                                 (flags & REG_DEF_LIST1) || \
+                                 (flags & REG_USE_LIST0) || \
+                                 (flags & REG_USE_LIST1) || \
+                                 (flags & REG_DEF_FPCS_LIST0) || \
+                                 (flags & REG_DEF_FPCS_LIST2) || \
+                                 (flags & REG_USE_FPCS_LIST0) || \
+                                 (flags & REG_USE_FPCS_LIST2) || \
+                                 (flags & IS_VOLATILE) || \
+                                 (flags & IS_BRANCH) || \
+                                 (flags & IS_IT))
+
 /* Scheduler heuristics */
 #define MAX_HOIST_DISTANCE 20
 #define LDLD_DISTANCE 4
@@ -43,6 +71,7 @@
   /* Insert a move to replace the load */
   LIR* move_lir;
   move_lir = OpRegCopyNoInsert(dest, src);
+  move_lir->dalvik_offset = orig_lir->dalvik_offset;
   /*
    * Insert the converted instruction after the original since the
    * optimization is scannng in the top-down order and the new instruction
@@ -52,8 +81,53 @@
   InsertLIRAfter(orig_lir, move_lir);
 }
 
+void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
+  LOG(INFO) << type;
+  LOG(INFO) << "Check LIR:";
+  DumpLIRInsn(check_lir, 0);
+  LOG(INFO) << "This LIR:";
+  DumpLIRInsn(this_lir, 0);
+}
+
+inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
+  DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
+  RegStorage dest_reg, src_reg;
+
+  /* Same Register - Nop */
+  if (lir->operands[0] == reg_id) {
+    NopLIR(lir);
+    return;
+  }
+
+  /* different Regsister - Move + Nop */
+  switch (reg_id & RegStorage::kShapeTypeMask) {
+    case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
+      dest_reg = RegStorage::Solo32(lir->operands[0]);
+      src_reg = RegStorage::Solo32(reg_id);
+      break;
+    case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
+      dest_reg = RegStorage::Solo64(lir->operands[0]);
+      src_reg = RegStorage::Solo64(reg_id);
+      break;
+    case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
+      dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
+      src_reg = RegStorage::FloatSolo32(reg_id);
+      break;
+    case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
+      dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
+      src_reg = RegStorage::FloatSolo64(reg_id);
+      break;
+    default:
+      LOG(INFO) << "Load Store: Unsuported register type!";
+      return;
+  }
+  ConvertMemOpIntoMove(lir, dest_reg, src_reg);
+  NopLIR(lir);
+  return;
+}
+
 /*
- * Perform a pass of top-down walk, from the second-last instruction in the
+ * Perform a pass of top-down walk, from the first to the last instruction in the
  * superblock, to eliminate redundant loads and stores.
  *
  * An earlier load can eliminate a later load iff
@@ -66,213 +140,172 @@
  *   2) The native register is not clobbered in between
  *   3) The memory location is not written to in between
  *
- * A later store can be eliminated by an earlier store iff
+ * An earlier store can eliminate a later store iff
  *   1) They are must-aliases
  *   2) The memory location is not written to in between
  */
 void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
-  LIR* this_lir;
+  LIR* this_lir, *check_lir;
+  std::vector<int> alias_list;
 
   if (head_lir == tail_lir) {
     return;
   }
 
-  for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
-    if (IsPseudoLirOp(this_lir->opcode)) {
+  for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+    if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
       continue;
     }
 
-    int sink_distance = 0;
-
     uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+    /* Target LIR - skip if instr is:
+     *  - NOP
+     *  - Branch
+     *  - Load and store
+     *  - Wide load
+     *  - Wide store
+     *  - Exclusive load/store
+     */
+    if (LOAD_STORE_FILTER(target_flags) ||
+        ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
+        !(target_flags & (IS_LOAD | IS_STORE))) {
+      continue;
+    }
+    int native_reg_id = this_lir->operands[0];
+    int dest_reg_id = this_lir->operands[1];
+    bool is_this_lir_load = target_flags & IS_LOAD;
+    ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
+                                                        *this_lir->u.m.def_mask));
 
-    /* Skip non-interesting instructions */
-    if ((this_lir->flags.is_nop == true) ||
-        (target_flags & IS_BRANCH) ||
-        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||  // Skip wide loads.
-        ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
-         (REG_USE0 | REG_USE1 | REG_USE2)) ||  // Skip wide stores.
-        // Skip instructions that are neither loads or stores.
-        !(target_flags & (IS_LOAD | IS_STORE)) ||
-        // Skip instructions that do both load and store.
-        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
+    /* Memory region */
+    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
+      (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
       continue;
     }
 
-    int native_reg_id;
-    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-      // If x86, location differs depending on whether memory/reg operation.
-      native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
-    } else {
-      native_reg_id = this_lir->operands[0];
-    }
-    bool is_this_lir_load = target_flags & IS_LOAD;
-    LIR* check_lir;
-    /* Use the mem mask to determine the rough memory location */
-    ResourceMask this_mem_mask = kEncodeMem.Intersection(
-        this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask));
-
-    /*
-     * Currently only eliminate redundant ld/st for constant and Dalvik
-     * register accesses.
-     */
-    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg))) {
+    /* Does not redefine the address */
+    if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
       continue;
     }
 
     ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
+    ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
 
-    /*
-     * Add pc to the resource mask to prevent this instruction
-     * from sinking past branch instructions. Also take out the memory
-     * region bits since stop_mask is used to check data/control
-     * dependencies.
-     *
-     * Note: on x86(-64) and Arm64 we use the IsBranch bit, as the PC is not exposed.
-     */
-    ResourceMask pc_encoding = GetPCUseDefEncoding();
-    if (pc_encoding == kEncodeNone) {
-      // TODO: Stop the abuse of kIsBranch as a bit specification for ResourceMask.
-      pc_encoding = ResourceMask::Bit(kIsBranch);
+    /* The ARM backend can load/store PC */
+    ResourceMask uses_pc = GetPCUseDefEncoding();
+    if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
+      continue;
     }
-    ResourceMask  stop_use_reg_mask = pc_encoding.Union(*this_lir->u.m.use_mask).
-        Without(kEncodeMem);
 
+    /* Initialize alias list */
+    alias_list.clear();
+    ResourceMask alias_reg_list_mask = kEncodeNone;
+    if (!this_mem_mask.Intersects(kEncodeLiteral)) {
+      alias_list.push_back(dest_reg_id);
+      SetupRegMask(&alias_reg_list_mask, dest_reg_id);
+    }
+
+    /* Scan through the BB for posible elimination candidates */
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
-      /*
-       * Skip already dead instructions (whose dataflow information is
-       * outdated and misleading).
-       */
       if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
         continue;
       }
 
-      ResourceMask check_mem_mask = kEncodeMem.Intersection(
-          check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask));
-      ResourceMask alias_condition = this_mem_mask.Intersection(check_mem_mask);
-      bool stop_here = false;
+      if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
+        break;
+      }
 
-      /*
-       * Potential aliases seen - check the alias relations
-       */
+      ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
+                                                          *check_lir->u.m.def_mask));
+      ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
       uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
-      // TUNING: Support instructions with multiple register targets.
-      if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+      bool stop_here = false;
+      bool pass_over = false;
+
+      /* Check LIR - skip if instr is:
+       *  - Wide Load
+       *  - Wide Store
+       *  - Branch
+       *  - Dmb
+       *  - Exclusive load/store
+       *  - IT blocks
+       *  - Quad loads
+       */
+      if (LOAD_STORE_FILTER(check_flags)) {
         stop_here = true;
-      } else if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
-        bool is_check_lir_load = check_flags & IS_LOAD;
-        if  (alias_condition.Equals(kEncodeLiteral)) {
-          /*
-           * Should only see literal loads in the instruction
-           * stream.
-           */
-          DCHECK(!(check_flags & IS_STORE));
-          /* Same value && same register type */
-          if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
-              RegStorage::SameRegType(check_lir->operands[0], native_reg_id)) {
-            /*
-             * Different destination register - insert
-             * a move
-             */
-            if (check_lir->operands[0] != native_reg_id) {
-              // TODO: update for 64-bit regs.
-              ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
-                                   RegStorage::Solo32(native_reg_id));
-            }
-            NopLIR(check_lir);
-          }
-        } else if (alias_condition.Equals(kEncodeDalvikReg)) {
-          /* Must alias */
-          if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
-            /* Only optimize compatible registers */
-            bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
-            if ((is_this_lir_load && is_check_lir_load) ||
-                (!is_this_lir_load && is_check_lir_load)) {
-              /* RAR or RAW */
-              if (reg_compatible) {
-                /*
-                 * Different destination register -
-                 * insert a move
-                 */
-                if (check_lir->operands[0] != native_reg_id) {
-                  // TODO: update for 64-bit regs.
-                  ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
-                                       RegStorage::Solo32(native_reg_id));
-                }
-                NopLIR(check_lir);
-              } else {
-                /*
-                 * Destinaions are of different types -
-                 * something complicated going on so
-                 * stop looking now.
-                 */
-                stop_here = true;
-              }
-            } else if (is_this_lir_load && !is_check_lir_load) {
-              /* WAR - register value is killed */
-              stop_here = true;
-            } else if (!is_this_lir_load && !is_check_lir_load) {
-              /* WAW - nuke the earlier store */
-              NopLIR(this_lir);
-              stop_here = true;
-            }
-          /* Partial overlap */
-          } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
-            /*
-             * It is actually ok to continue if check_lir
-             * is a read. But it is hard to make a test
-             * case for this so we just stop here to be
-             * conservative.
-             */
-            stop_here = true;
+        /* Possible alias or result of earlier pass */
+      } else if (check_flags & IS_MOVE) {
+        for (auto &reg : alias_list) {
+          if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
+            pass_over = true;
+            alias_list.push_back(check_lir->operands[0]);
+            SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
           }
         }
-        /* Memory content may be updated. Stop looking now. */
+      /* Memory regions */
+      } else if (!alias_mem_mask.Equals(kEncodeNone)) {
+        DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+        bool is_check_lir_load = check_flags & IS_LOAD;
+        bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
+
+        if (alias_mem_mask.Equals(kEncodeLiteral)) {
+          DCHECK(check_flags & IS_LOAD);
+          /* Same value && same register type */
+          if (reg_compatible && (this_lir->target == check_lir->target)) {
+            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
+            EliminateLoad(check_lir, native_reg_id);
+          }
+        } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
+                   alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
+          bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
+          if (same_offset && !is_check_lir_load) {
+            if (check_lir->operands[0] != native_reg_id) {
+              DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
+              stop_here = true;
+              break;
+            }
+          }
+
+          if (reg_compatible && same_offset &&
+              ((is_this_lir_load && is_check_lir_load)  /* LDR - LDR */ ||
+              (!is_this_lir_load && is_check_lir_load)  /* STR - LDR */ ||
+              (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
+            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
+            EliminateLoad(check_lir, native_reg_id);
+          }
+        } else {
+          /* Unsupported memory region */
+        }
+      }
+
+      if (pass_over) {
+        continue;
+      }
+
+      if (stop_here == false) {
+        bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
+        if (stop_alias) {
+          /* Scan through alias list and if alias remove from alias list. */
+          for (auto &reg : alias_list) {
+            stop_alias = false;
+            ResourceMask alias_reg_mask = kEncodeNone;
+            SetupRegMask(&alias_reg_mask, reg);
+            stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
+            if (stop_alias) {
+              ClearRegMask(&alias_reg_list_mask, reg);
+              alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
+                                           reg), alias_list.end());
+            }
+          }
+        }
+        ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
+        stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
+        stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
         if (stop_here) {
           break;
-        /* The check_lir has been transformed - check the next one */
-        } else if (check_lir->flags.is_nop) {
-          continue;
         }
-      }
-
-
-      /*
-       * this and check LIRs have no memory dependency. Now check if
-       * their register operands have any RAW, WAR, and WAW
-       * dependencies. If so, stop looking.
-       */
-      if (stop_here == false) {
-        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
-      }
-
-      if (stop_here == true) {
-        if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-          // Prevent stores from being sunk between ops that generate ccodes and
-          // ops that use them.
-          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
-          if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
-            check_lir = PREV_LIR(check_lir);
-            sink_distance--;
-          }
-        }
-        DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
-        /* Only sink store instructions */
-        if (sink_distance && !is_this_lir_load) {
-          LIR* new_store_lir =
-              static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
-          *new_store_lir = *this_lir;
-          /*
-           * Stop point found - insert *before* the check_lir
-           * since the instruction list is scanned in the
-           * top-down order.
-           */
-          InsertLIRBefore(check_lir, new_store_lir);
-          NopLIR(this_lir);
-        }
+      } else {
         break;
-      } else if (!check_lir->flags.is_nop) {
-        sink_distance++;
       }
     }
   }
@@ -385,7 +418,7 @@
 
       /* Found a new place to put the load - move it here */
       if (stop_here == true) {
-        DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+        DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
         break;
       }
     }
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 9ce5bb7..ff5a46f 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -147,6 +147,15 @@
 }
 
 /*
+ * Clear the corresponding bit(s).
+ */
+inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
+  DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
+  DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
+  *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+}
+
+/*
  * Set up the proper fields in the resource mask
  */
 inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index ed7fcdd..4d8b91e 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1311,4 +1311,9 @@
       rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
 }
 
+size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
+  UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
+  return 0;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b832223..d03b859 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -51,6 +51,7 @@
 #define IS_BINARY_OP         (1ULL << kIsBinaryOp)
 #define IS_BRANCH            (1ULL << kIsBranch)
 #define IS_IT                (1ULL << kIsIT)
+#define IS_MOVE              (1ULL << kIsMoveOp)
 #define IS_LOAD              (1ULL << kMemLoad)
 #define IS_QUAD_OP           (1ULL << kIsQuadOp)
 #define IS_QUIN_OP           (1ULL << kIsQuinOp)
@@ -58,6 +59,7 @@
 #define IS_STORE             (1ULL << kMemStore)
 #define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
 #define IS_UNARY_OP          (1ULL << kIsUnaryOp)
+#define IS_VOLATILE          (1ULL << kMemVolatile)
 #define NEEDS_FIXUP          (1ULL << kPCRelFixup)
 #define NO_OPERAND           (1ULL << kNoOperand)
 #define REG_DEF0             (1ULL << kRegDef0)
@@ -94,6 +96,20 @@
 #define REG_USE_HI           (1ULL << kUseHi)
 #define REG_DEF_LO           (1ULL << kDefLo)
 #define REG_DEF_HI           (1ULL << kDefHi)
+#define SCALED_OFFSET_X0     (1ULL << kMemScaledx0)
+#define SCALED_OFFSET_X2     (1ULL << kMemScaledx2)
+#define SCALED_OFFSET_X4     (1ULL << kMemScaledx4)
+
+// Special load/stores
+#define IS_LOADX             (IS_LOAD | IS_VOLATILE)
+#define IS_LOAD_OFF          (IS_LOAD | SCALED_OFFSET_X0)
+#define IS_LOAD_OFF2         (IS_LOAD | SCALED_OFFSET_X2)
+#define IS_LOAD_OFF4         (IS_LOAD | SCALED_OFFSET_X4)
+
+#define IS_STOREX            (IS_STORE | IS_VOLATILE)
+#define IS_STORE_OFF         (IS_STORE | SCALED_OFFSET_X0)
+#define IS_STORE_OFF2        (IS_STORE | SCALED_OFFSET_X2)
+#define IS_STORE_OFF4        (IS_STORE | SCALED_OFFSET_X4)
 
 // Common combo register usage patterns.
 #define REG_DEF01            (REG_DEF0 | REG_DEF1)
@@ -552,6 +568,12 @@
 
     virtual ~Mir2Lir() {}
 
+    /**
+     * @brief Decodes the LIR offset.
+     * @return Returns the scaled offset of LIR.
+     */
+    virtual size_t GetInstructionOffset(LIR* lir);
+
     int32_t s4FromSwitchData(const void* switch_data) {
       return *reinterpret_cast<const int32_t*>(switch_data);
     }
@@ -641,7 +663,10 @@
     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
     void SetupRegMask(ResourceMask* mask, int reg);
+    void ClearRegMask(ResourceMask* mask, int reg);
     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+    void EliminateLoad(LIR* lir, int reg_id);
+    void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
     void DumpPromotionMap();
     void CodegenDump();
     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 3bc79ad..0a46f2e 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -394,7 +394,7 @@
    * @brief Generate the debug_frame CFI information.
    * @returns pointer to vector containing CFE information
    */
-  static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
+  static std::vector<uint8_t>* ReturnCommonCallFrameInformation(bool is_x86_64);
 
   /*
    * @brief Generate the debug_frame FDE information.
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 3111025..1bda738 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1443,49 +1443,8 @@
 }
 
 
-std::vector<uint8_t>* X86CFIInitialization() {
-  return X86Mir2Lir::ReturnCommonCallFrameInformation();
-}
-
-std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
-  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
-
-  // Length of the CIE (except for this field).
-  PushWord(*cfi_info, 16);
-
-  // CIE id.
-  PushWord(*cfi_info, 0xFFFFFFFFU);
-
-  // Version: 3.
-  cfi_info->push_back(0x03);
-
-  // Augmentation: empty string.
-  cfi_info->push_back(0x0);
-
-  // Code alignment: 1.
-  cfi_info->push_back(0x01);
-
-  // Data alignment: -4.
-  cfi_info->push_back(0x7C);
-
-  // Return address register (R8).
-  cfi_info->push_back(0x08);
-
-  // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
-  cfi_info->push_back(0x0C);
-  cfi_info->push_back(0x04);
-  cfi_info->push_back(0x04);
-
-  // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
-  cfi_info->push_back(0x2 << 6 | 0x08);
-  cfi_info->push_back(0x01);
-
-  // And 2 Noops to align to 4 byte boundary.
-  cfi_info->push_back(0x0);
-  cfi_info->push_back(0x0);
-
-  DCHECK_EQ(cfi_info->size() & 3, 0U);
-  return cfi_info;
+std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64) {
+  return X86Mir2Lir::ReturnCommonCallFrameInformation(is_x86_64);
 }
 
 static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
@@ -1496,6 +1455,114 @@
   }
 }
 
+static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) {
+  uint8_t buffer[12];
+  uint8_t *ptr = EncodeSignedLeb128(buffer, value);
+  for (uint8_t *p = buffer; p < ptr; p++) {
+    buf.push_back(*p);
+  }
+}
+
+std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation(bool is_x86_64) {
+  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+  // Length (will be filled in later in this routine).
+  PushWord(*cfi_info, 0);
+
+  // CIE id: always 0.
+  PushWord(*cfi_info, 0);
+
+  // Version: always 1.
+  cfi_info->push_back(0x01);
+
+  // Augmentation: 'zR\0'
+  cfi_info->push_back(0x7a);
+  cfi_info->push_back(0x52);
+  cfi_info->push_back(0x0);
+
+  // Code alignment: 1.
+  EncodeUnsignedLeb128(*cfi_info, 1);
+
+  // Data alignment.
+  if (is_x86_64) {
+    EncodeSignedLeb128(*cfi_info, -8);
+  } else {
+    EncodeSignedLeb128(*cfi_info, -4);
+  }
+
+  // Return address register.
+  if (is_x86_64) {
+    // R16(RIP)
+    cfi_info->push_back(0x10);
+  } else {
+    // R8(EIP)
+    cfi_info->push_back(0x08);
+  }
+
+  // Augmentation length: 1.
+  cfi_info->push_back(1);
+
+  // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+  cfi_info->push_back(0x03);
+
+  // Initial instructions.
+  if (is_x86_64) {
+    // DW_CFA_def_cfa R7(RSP) 8.
+    cfi_info->push_back(0x0c);
+    cfi_info->push_back(0x07);
+    cfi_info->push_back(0x08);
+
+    // DW_CFA_offset R16(RIP) 1 (* -8).
+    cfi_info->push_back(0x90);
+    cfi_info->push_back(0x01);
+  } else {
+    // DW_CFA_def_cfa R4(ESP) 4.
+    cfi_info->push_back(0x0c);
+    cfi_info->push_back(0x04);
+    cfi_info->push_back(0x04);
+
+    // DW_CFA_offset R8(EIP) 1 (* -4).
+    cfi_info->push_back(0x88);
+    cfi_info->push_back(0x01);
+  }
+
+  // Padding to a multiple of 4
+  while ((cfi_info->size() & 3) != 0) {
+    // DW_CFA_nop is encoded as 0.
+    cfi_info->push_back(0);
+  }
+
+  // Set the length of the CIE inside the generated bytes.
+  uint32_t length = cfi_info->size() - 4;
+  (*cfi_info)[0] = length;
+  (*cfi_info)[1] = length >> 8;
+  (*cfi_info)[2] = length >> 16;
+  (*cfi_info)[3] = length >> 24;
+  return cfi_info;
+}
+
+static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
+  if (is_x86_64) {
+    switch (art_reg_id) {
+    case 3 : *dwarf_reg_id =  3; return true;  // %rbx
+    // This is the only discrepancy between ART & DWARF register numbering.
+    case 5 : *dwarf_reg_id =  6; return true;  // %rbp
+    case 12: *dwarf_reg_id = 12; return true;  // %r12
+    case 13: *dwarf_reg_id = 13; return true;  // %r13
+    case 14: *dwarf_reg_id = 14; return true;  // %r14
+    case 15: *dwarf_reg_id = 15; return true;  // %r15
+    default: return false;  // Should not get here
+    }
+  } else {
+    switch (art_reg_id) {
+    case 5: *dwarf_reg_id = 5; return true;  // %ebp
+    case 6: *dwarf_reg_id = 6; return true;  // %esi
+    case 7: *dwarf_reg_id = 7; return true;  // %edi
+    default: return false;  // Should not get here
+    }
+  }
+}
+
 std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
   std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
 
@@ -1505,8 +1572,7 @@
   // Length (will be filled in later in this routine).
   PushWord(*cfi_info, 0);
 
-  // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
-  // one CIE for the whole debug_frame section.
+  // 'CIE_pointer' (filled in by linker).
   PushWord(*cfi_info, 0);
 
   // 'initial_location' (filled in by linker).
@@ -1515,6 +1581,9 @@
   // 'address_range' (number of bytes in the method).
   PushWord(*cfi_info, data_offset_);
 
+  // Augmentation length: 0
+  cfi_info->push_back(0);
+
   // The instructions in the FDE.
   if (stack_decrement_ != nullptr) {
     // Advance LOC to just past the stack decrement.
@@ -1525,6 +1594,30 @@
     cfi_info->push_back(0x0e);
     EncodeUnsignedLeb128(*cfi_info, frame_size_);
 
+    // Handle register spills
+    const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
+    const int kDataAlignmentFactor = (cu_->target64) ? -8 : -4;
+    uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
+    int offset = -(GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
+    for (int reg = 0; mask; mask >>= 1, reg++) {
+      if (mask & 0x1) {
+        pc += kSpillInstLen;
+
+        // Advance LOC to pass this instruction
+        AdvanceLoc(*cfi_info, kSpillInstLen);
+
+        int dwarf_reg_id;
+        if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
+          // DW_CFA_offset_extended_sf reg_no offset
+          cfi_info->push_back(0x11);
+          EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id);
+          EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor);
+        }
+
+        offset += GetInstructionSetPointerSize(cu_->instruction_set);
+      }
+    }
+
     // We continue with that stack until the epilogue.
     if (stack_increment_ != nullptr) {
       uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
@@ -1534,10 +1627,10 @@
       // current state: DW_CFA_remember_state.
       cfi_info->push_back(0x0a);
 
-      // We have now popped the stack: DW_CFA_def_cfa_offset 4.  There is only the return
-      // PC on the stack now.
+      // We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
+      // There is only the return PC on the stack now.
       cfi_info->push_back(0x0e);
-      EncodeUnsignedLeb128(*cfi_info, 4);
+      EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
 
       // Everything after that is the same as before the epilogue.
       // Stack bump was followed by RET instruction.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index ccffe5b..a77d79e 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -591,6 +591,7 @@
                            kDouble, kNotVolatile);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
+        Clobber(rl_method.reg);
         store_method_addr_used_ = true;
       } else {
         if (val_lo == 0) {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index addd628..706933a 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -192,6 +192,18 @@
     return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
   }
 
+  static constexpr bool Is32Bit(uint16_t reg) {
+    return ((reg & kShapeMask) == k32BitSolo);
+  }
+
+  static constexpr bool Is64Bit(uint16_t reg) {
+    return ((reg & k64BitMask) == k64Bits);
+  }
+
+  static constexpr bool Is64BitSolo(uint16_t reg) {
+    return ((reg & kShapeMask) == k64BitSolo);
+  }
+
   // Used to retrieve either the low register of a pair, or the only register.
   int GetReg() const {
     DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
@@ -265,11 +277,11 @@
   }
 
   static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
-    return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
+    return ((reg1.reg_ & kShapeTypeMask) == (reg2.reg_ & kShapeTypeMask));
   }
 
   static constexpr bool SameRegType(int reg1, int reg2) {
-    return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
+    return ((reg1 & kShapeTypeMask) == (reg2 & kShapeTypeMask));
   }
 
   // Create a 32-bit solo.
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..1b3f2a1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -216,7 +216,7 @@
     verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
     bool is_range = (inst->Opcode() ==  Instruction::INVOKE_VIRTUAL_RANGE) ||
         (inst->Opcode() ==  Instruction::INVOKE_INTERFACE_RANGE);
-    const verifier::RegType&
+    verifier::RegType&
         reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
 
     if (!reg_type.HasClass()) {
@@ -284,18 +284,18 @@
       const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
       bool is_safe_cast = false;
       if (code == Instruction::CHECK_CAST) {
-        const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
-        const verifier::RegType& cast_type =
+        verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+        verifier::RegType& cast_type =
             method_verifier->ResolveCheckedClass(inst->VRegB_21c());
         is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
       } else {
-        const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+        verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
         // We only know its safe to assign to an array if the array type is precise. For example,
         // an Object[] can have any type of object stored in it, but it may also be assigned a
         // String[] in which case the stores need to be of Strings.
         if (array_type.IsPreciseReference()) {
-          const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
-          const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
+          verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+          verifier::RegType& component_type = method_verifier->GetRegTypeCache()
               ->GetComponentType(array_type, method_verifier->GetClassLoader());
           is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
         }
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 4274386..1fde12e 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -106,14 +106,14 @@
   // | .strtab\0               |  (Optional)
   // | .debug_str\0            |  (Optional)
   // | .debug_info\0           |  (Optional)
-  // | .debug_frame\0          |  (Optional)
+  // | .eh_frame\0             |  (Optional)
   // | .debug_abbrev\0         |  (Optional)
   // +-------------------------+  (Optional)
   // | .debug_str              |  (Optional)
   // +-------------------------+  (Optional)
   // | .debug_info             |  (Optional)
   // +-------------------------+  (Optional)
-  // | .debug_frame            |  (Optional)
+  // | .eh_frame               |  (Optional)
   // +-------------------------+  (Optional)
   // | .debug_abbrev           |  (Optional)
   // +-------------------------+
@@ -127,7 +127,7 @@
   // | Elf32_Shdr .shstrtab    |
   // | Elf32_Shdr .debug_str   |  (Optional)
   // | Elf32_Shdr .debug_info  |  (Optional)
-  // | Elf32_Shdr .debug_frame |  (Optional)
+  // | Elf32_Shdr .eh_frame    |  (Optional)
   // | Elf32_Shdr .debug_abbrev|  (Optional)
   // +-------------------------+
 
@@ -844,14 +844,14 @@
     ElfRawSectionBuilder debug_info(".debug_info",   SHT_PROGBITS, 0, nullptr, 0, 1, 0);
     ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
     ElfRawSectionBuilder debug_str(".debug_str",    SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-    ElfRawSectionBuilder debug_frame(".debug_frame",  SHT_PROGBITS, 0, nullptr, 0, 4, 0);
-    debug_frame.SetBuffer(*compiler_driver_->GetCallFrameInformation());
+    ElfRawSectionBuilder eh_frame(".eh_frame",  SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+    eh_frame.SetBuffer(*compiler_driver_->GetCallFrameInformation());
 
     FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
                          debug_abbrev.GetBuffer(), debug_str.GetBuffer());
     builder.RegisterRawSection(debug_info);
     builder.RegisterRawSection(debug_abbrev);
-    builder.RegisterRawSection(debug_frame);
+    builder.RegisterRawSection(eh_frame);
     builder.RegisterRawSection(debug_str);
   }
 
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 22f36f4..9da59ab 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -410,8 +410,16 @@
             int cur_offset = cfi_info->size();
             cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
 
+            // Set the 'CIE_pointer' field to cur_offset+4.
+            uint32_t CIE_pointer = cur_offset + 4;
+            uint32_t offset_to_update = cur_offset + sizeof(uint32_t);
+            (*cfi_info)[offset_to_update+0] = CIE_pointer;
+            (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+            (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+            (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+
             // Set the 'initial_location' field to address the start of the method.
-            uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
+            offset_to_update = cur_offset + 2*sizeof(uint32_t);
             (*cfi_info)[offset_to_update+0] = quick_code_start;
             (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
             (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 37799cb..9f33f2d 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -222,11 +222,11 @@
   }
 
   void construct(pointer p, const_reference val) {
-    DebugStackIndirectTopRef::CheckTop();
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
     new (static_cast<void*>(p)) value_type(val);
   }
   void destroy(pointer p) {
-    DebugStackIndirectTopRef::CheckTop();
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
     p->~value_type();
   }
 
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 1522129..6d74b83 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1113,7 +1113,8 @@
 
 DEFINE_FUNCTION art_quick_resolution_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    PUSH esp                      // pass SP
+    movl %esp, %edi
+    PUSH EDI                      // pass SP. do not just PUSH ESP; that messes up unwinding
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                      // pass receiver
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5e784b1..bc13379 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3949,7 +3949,8 @@
   HeapChunkContext(bool merge, bool native)
       : buf_(16384 - 16),
         type_(0),
-        merge_(merge) {
+        merge_(merge),
+        chunk_overhead_(0) {
     Reset();
     if (native) {
       type_ = CHUNK_TYPE("NHSG");
@@ -3964,6 +3965,14 @@
     }
   }
 
+  void SetChunkOverhead(size_t chunk_overhead) {
+    chunk_overhead_ = chunk_overhead;
+  }
+
+  void ResetStartOfNextChunk() {
+    startOfNextMemoryChunk_ = nullptr;
+  }
+
   void EnsureHeader(const void* chunk_ptr) {
     if (!needHeader_) {
       return;
@@ -4008,7 +4017,7 @@
 
   void Reset() {
     p_ = &buf_[0];
-    startOfNextMemoryChunk_ = NULL;
+    ResetStartOfNextChunk();
     totalAllocationUnits_ = 0;
     needHeader_ = true;
     pieceLenField_ = NULL;
@@ -4035,6 +4044,8 @@
      */
     bool native = type_ == CHUNK_TYPE("NHSG");
 
+    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
+    // count gaps inbetween spaces as free memory.
     if (startOfNextMemoryChunk_ != NULL) {
         // Transmit any pending free memory. Native free memory of
         // over kMaxFreeLen could be because of the use of mmaps, so
@@ -4061,11 +4072,8 @@
     // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
     // If it's the same, we should combine them.
     uint8_t state = ExamineObject(obj, native);
-    // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
-    // allocation then the first sizeof(size_t) may belong to it.
-    const size_t dlMallocOverhead = sizeof(size_t);
-    AppendChunk(state, start, used_bytes + dlMallocOverhead);
-    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead;
+    AppendChunk(state, start, used_bytes + chunk_overhead_);
+    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
   }
 
   void AppendChunk(uint8_t state, void* ptr, size_t length)
@@ -4154,10 +4162,18 @@
   uint32_t type_;
   bool merge_;
   bool needHeader_;
+  size_t chunk_overhead_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
 };
 
+static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+  HeapChunkContext::HeapChunkCallback(
+      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
+}
+
 void Dbg::DdmSendHeapSegments(bool native) {
   Dbg::HpsgWhen when;
   Dbg::HpsgWhat what;
@@ -4198,14 +4214,27 @@
 #endif
   } else {
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
-    typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
-    for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
-      if ((*cur)->IsMallocSpace()) {
-        (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+    for (const auto& space : heap->GetContinuousSpaces()) {
+      if (space->IsDlMallocSpace()) {
+        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
+        // allocation then the first sizeof(size_t) may belong to it.
+        context.SetChunkOverhead(sizeof(size_t));
+        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+      } else if (space->IsRosAllocSpace()) {
+        context.SetChunkOverhead(0);
+        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+      } else if (space->IsBumpPointerSpace()) {
+        context.SetChunkOverhead(0);
+        ReaderMutexLock mu(self, *Locks::mutator_lock_);
+        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
+        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+      } else {
+        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
       }
+      context.ResetStartOfNextChunk();
     }
     // Walk the large objects, these are not in the AllocSpace.
+    context.SetChunkOverhead(0);
     heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
   }
 
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e5402e1..594c65f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1033,18 +1033,13 @@
 }
 
 static bool IsFDE(FDE* frame) {
-  // TODO This seems to be the constant everyone uses (for the .debug_frame
-  // section at least), however we should investigate this further.
-  const uint32_t kDwarfCIE_id = 0xffffffff;
-  const uint32_t kReservedLengths[] = {0xffffffff, 0xfffffff0};
-  return frame->CIE_pointer != kDwarfCIE_id &&
-      frame->raw_length_ != kReservedLengths[0] && frame->raw_length_ != kReservedLengths[1];
+  return frame->CIE_pointer != 0;
 }
 
 // TODO This only works for 32-bit Elf Files.
-static bool FixupDebugFrame(uintptr_t text_start, byte* dbg_frame, size_t dbg_frame_size) {
-  FDE* last_frame = reinterpret_cast<FDE*>(dbg_frame + dbg_frame_size);
-  FDE* frame = NextFDE(reinterpret_cast<FDE*>(dbg_frame));
+static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
+  FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
+  FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
   for (; frame < last_frame; frame = NextFDE(frame)) {
     if (!IsFDE(frame)) {
       return false;
@@ -1301,7 +1296,7 @@
 static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
                                uintptr_t text_start,
                                byte* dbg_info, size_t dbg_info_size,
-                               byte* dbg_frame, size_t dbg_frame_size) {
+                               byte* eh_frame, size_t eh_frame_size) {
   std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
   if (abbrev.get() == nullptr) {
     return false;
@@ -1313,7 +1308,7 @@
     return false;
   }
   return FixupDebugInfo(text_start, iter.get())
-      && FixupDebugFrame(text_start, dbg_frame, dbg_frame_size);
+      && FixupEHFrame(text_start, eh_frame, eh_frame_size);
 }
 
 void ElfFile::GdbJITSupport() {
@@ -1334,20 +1329,15 @@
   // Do we have interesting sections?
   const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
   const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
-  const Elf32_Shdr* debug_frame = all.FindSectionByName(".debug_frame");
+  const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
   const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
   const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
   const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
   Elf32_Shdr* text_sec = all.FindSectionByName(".text");
-  if (debug_info == nullptr || debug_abbrev == nullptr || debug_frame == nullptr ||
+  if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
       debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
     return;
   }
-#ifdef __LP64__
-  if (true) {
-    return;  // No ELF debug support in 64bit.
-  }
-#endif
   // We need to add in a strtab and symtab to the image.
   // all is MAP_PRIVATE so it can be written to freely.
   // We also already have strtab and symtab so we are fine there.
@@ -1364,7 +1354,7 @@
   if (!FixupDebugSections(
         all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
         all.Begin() + debug_info->sh_offset, debug_info->sh_size,
-        all.Begin() + debug_frame->sh_offset, debug_frame->sh_size)) {
+        all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
     LOG(ERROR) << "Failed to load GDB data";
     return;
   }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1d80833..bf8cca7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1623,12 +1623,12 @@
         std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
         RemoveSpace(temp_space_);
         temp_space_ = nullptr;
+        mem_map->Protect(PROT_READ | PROT_WRITE);
         CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
                               mem_map->Size());
         mem_map.release();
         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
         AddSpace(main_space_);
-        main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
         Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
         mem_map.reset(bump_pointer_space_->ReleaseMemMap());
         RemoveSpace(bump_pointer_space_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 5177cb9..fc6d2ef 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -334,9 +334,11 @@
           relocated_version_used = true;
         } else {
           image_filename = &system_filename;
+          is_system = true;
         }
       } else if (has_system) {
         image_filename = &system_filename;
+        is_system = true;
       } else {
         CHECK(has_cache);
         image_filename = &cache_filename;
@@ -351,8 +353,12 @@
       image_lock.Init(image_filename->c_str(), &error_msg);
       LOG(INFO) << "Using image file " << image_filename->c_str() << " for image location "
                 << image_location;
+      // If we are in /system we can assume the image is good. We can also
+      // assume this if we are using a relocated image (i.e. image checksum
+      // matches) since this is only different by the offset. We need this to
+      // make sure that host tests continue to work.
       space = ImageSpace::Init(image_filename->c_str(), image_location,
-                               false, &error_msg);
+                               !(is_system || relocated_version_used), &error_msg);
     }
     if (space != nullptr) {
       return space;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d5b90f2..43b9912 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -677,13 +677,15 @@
     return soa.AddLocalReference<jclass>(c->GetSuperClass());
   }
 
+  // Note: java_class1 should be safely castable to java_class2, and
+  // not the other way around.
   static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
     ScopedObjectAccess soa(env);
     mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
     mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
-    return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+    return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
   }
 
   static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c..da3080f 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -950,8 +950,28 @@
   jclass string_class = env_->FindClass("java/lang/String");
   ASSERT_NE(string_class, nullptr);
 
-  ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
-  ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+  // A superclass is assignable from an instance of its
+  // subclass but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+  jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+  ASSERT_NE(charsequence_interface, nullptr);
+
+  // An interface is assignable from an instance of an implementing
+  // class but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+  ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+  // Check that arrays are covariant.
+  jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+  ASSERT_NE(string_array_class, nullptr);
+  jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+  ASSERT_NE(object_array_class, nullptr);
+  ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+  // Primitive types are tested in 004-JniTest.
 
   // Null as either class should fail.
   CheckJniAbortCatcher jni_abort_catcher;
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 08cff99..d3fcb55 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -19,6 +19,8 @@
 
 #include "dex_cache.h"
 
+#include "base/logging.h"
+#include "mirror/class.h"
 #include "runtime.h"
 
 namespace art {
@@ -41,6 +43,12 @@
   }
 }
 
+inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+  // TODO default transaction support.
+  DCHECK(resolved == nullptr || !resolved->IsErroneous());
+  GetResolvedTypes()->Set(type_idx, resolved);
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index bfd603a..2c5fbcd 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -103,11 +103,8 @@
     return GetResolvedTypes()->Get(type_idx);
   }
 
-  void SetResolvedType(uint32_t type_idx, Class* resolved) ALWAYS_INLINE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // TODO default transaction support.
-    GetResolvedTypes()->Set(type_idx, resolved);
-  }
+  void SetResolvedType(uint32_t type_idx, Class* resolved)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b2..fb708a2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,6 +38,7 @@
 }
 
 static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+  LOG(INFO) << "System.exit called, status: " << status;
   Runtime::Current()->CallExitHook(status);
   exit(status);
 }
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 941fd0e..f7e238c 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -52,6 +52,7 @@
     return *this;
   }
 
+  allocator_type get_allocator() const { return map_.get_allocator(); }
   key_compare key_comp() const { return map_.key_comp(); }
   value_compare value_comp() const { return map_.value_comp(); }
 
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 62ecf4b..d4fe106 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@
   return !failure_messages_.empty();
 }
 
-inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
   DCHECK(!HasFailures());
-  const RegType& result = ResolveClassAndCheckAccess(class_idx);
+  RegType& result = ResolveClassAndCheckAccess(class_idx);
   DCHECK(!HasFailures());
   return result;
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 2571cf1..18f7626 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1175,7 +1175,7 @@
     // If this is a constructor for a class other than java.lang.Object, mark the first ("this")
     // argument as uninitialized. This restricts field access until the superclass constructor is
     // called.
-    const RegType& declaring_class = GetDeclaringClass();
+    RegType& declaring_class = GetDeclaringClass();
     if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
       reg_line->SetRegisterType(arg_start + cur_arg,
                                 reg_types_.UninitializedThisArgument(declaring_class));
@@ -1207,7 +1207,7 @@
         // it's effectively considered initialized the instant we reach here (in the sense that we
         // can return without doing anything or call virtual methods).
         {
-          const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+          RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
           if (!reg_type.IsNonZeroReferenceTypes()) {
             DCHECK(HasFailures());
             return false;
@@ -1241,8 +1241,8 @@
           return false;
         }
 
-        const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
-        const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+        RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+        RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
         reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
         cur_arg++;
         break;
@@ -1536,7 +1536,7 @@
        * This statement can only appear as the first instruction in an exception handler. We verify
        * that as part of extracting the exception type from the catch block list.
        */
-      const RegType& res_type = GetCaughtExceptionType();
+      RegType& res_type = GetCaughtExceptionType();
       work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
       break;
     }
@@ -1550,7 +1550,7 @@
     case Instruction::RETURN:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
         /* check the method signature */
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory1Types()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
                                             << return_type;
@@ -1558,7 +1558,7 @@
           // Compilers may generate synthetic functions that write byte values into boolean fields.
           // Also, it may use integer values for boolean, byte, short, and character return types.
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& src_type = work_line_->GetRegisterType(vregA);
+          RegType& src_type = work_line_->GetRegisterType(vregA);
           bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
                           ((return_type.IsBoolean() || return_type.IsByte() ||
                            return_type.IsShort() || return_type.IsChar()) &&
@@ -1575,7 +1575,7 @@
     case Instruction::RETURN_WIDE:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
         /* check the method signature */
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory2Types()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
         } else {
@@ -1590,7 +1590,7 @@
       break;
     case Instruction::RETURN_OBJECT:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsReferenceTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
         } else {
@@ -1598,7 +1598,7 @@
           DCHECK(!return_type.IsZero());
           DCHECK(!return_type.IsUninitializedReference());
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& reg_type = work_line_->GetRegisterType(vregA);
+          RegType& reg_type = work_line_->GetRegisterType(vregA);
           // Disallow returning uninitialized values and verify that the reference in vAA is an
           // instance of the "return_type"
           if (reg_type.IsUninitializedTypes()) {
@@ -1645,29 +1645,29 @@
       /* could be long or double; resolved upon use */
     case Instruction::CONST_WIDE_16: {
       int64_t val = static_cast<int16_t>(inst->VRegB_21s());
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_32: {
       int64_t val = static_cast<int32_t>(inst->VRegB_31i());
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE: {
       int64_t val = inst->VRegB_51l();
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_HIGH16: {
       int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
       break;
     }
@@ -1680,7 +1680,7 @@
     case Instruction::CONST_CLASS: {
       // Get type from instruction if unresolved then we need an access check
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
-      const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+      RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       // Register holds class, ie its type is class, on error it will hold Conflict.
       work_line_->SetRegisterType(inst->VRegA_21c(),
                                   res_type.IsConflict() ? res_type
@@ -1726,7 +1726,7 @@
        */
       const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
       const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
-      const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+      RegType& res_type = ResolveClassAndCheckAccess(type_idx);
       if (res_type.IsConflict()) {
         // If this is a primitive type, fail HARD.
         mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
@@ -1745,7 +1745,7 @@
       }
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
-      const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+      RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
       if (!res_type.IsNonZeroReferenceTypes()) {
         if (is_checkcast) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1768,7 +1768,7 @@
       break;
     }
     case Instruction::ARRAY_LENGTH: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+      RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
       if (res_type.IsReferenceTypes()) {
         if (!res_type.IsArrayTypes() && !res_type.IsZero()) {  // ie not an array or null
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1781,7 +1781,7 @@
       break;
     }
     case Instruction::NEW_INSTANCE: {
-      const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+      RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       if (res_type.IsConflict()) {
         DCHECK_NE(failures_.size(), 0U);
         break;  // bad class
@@ -1793,7 +1793,7 @@
             << "new-instance on primitive, interface or abstract class" << res_type;
         // Soft failure so carry on to set register type.
       }
-      const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+      RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
       // Any registers holding previous allocations from this address that have not yet been
       // initialized must be marked invalid.
       work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1846,7 +1846,7 @@
       work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::THROW: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+      RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
       if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
         Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
             << "thrown class " << res_type << " not instanceof Throwable";
@@ -1867,14 +1867,14 @@
 
     case Instruction::FILL_ARRAY_DATA: {
       /* Similar to the verification done for APUT */
-      const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+      RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
       /* array_type can be null if the reg type is Zero */
       if (!array_type.IsZero()) {
         if (!array_type.IsArrayTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
                                             << array_type;
         } else {
-          const RegType& component_type = reg_types_.GetComponentType(array_type,
+          RegType& component_type = reg_types_.GetComponentType(array_type,
                                                                       class_loader_->Get());
           DCHECK(!component_type.IsConflict());
           if (component_type.IsNonZeroReferenceTypes()) {
@@ -1902,8 +1902,8 @@
     }
     case Instruction::IF_EQ:
     case Instruction::IF_NE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+      RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
       bool mismatch = false;
       if (reg_type1.IsZero()) {  // zero then integral or reference expected
         mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1922,8 +1922,8 @@
     case Instruction::IF_GE:
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+      RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
       if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
                                           << reg_type2 << ") must be integral";
@@ -1932,7 +1932,7 @@
     }
     case Instruction::IF_EQZ:
     case Instruction::IF_NEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
       if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-eqz/if-nez";
@@ -1978,8 +1978,8 @@
         // type is assignable to the original then allow optimization. This check is performed to
         // ensure that subsequent merges don't lose type information - such as becoming an
         // interface from a class that would lose information relevant to field checks.
-        const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
-        const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+        RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+        RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
 
         if (!orig_type.Equals(cast_type) &&
             !cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
@@ -2034,7 +2034,7 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
       if (!reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2183,7 +2183,7 @@
                        inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
       mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
                                                               is_super);
-      const RegType* return_type = nullptr;
+      RegType* return_type = nullptr;
       if (called_method != nullptr) {
         Thread* self = Thread::Current();
         StackHandleScope<1> hs(self);
@@ -2239,7 +2239,7 @@
          * allowing the latter only if the "this" argument is the same as the "this" argument to
          * this method (which implies that we're in a constructor ourselves).
          */
-        const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+        RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
         if (this_type.IsConflict())  // failure.
           break;
 
@@ -2250,7 +2250,7 @@
         }
 
         /* must be in same class or in superclass */
-        // const RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
+        // RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
         // TODO: re-enable constructor type verification
         // if (this_super_klass.IsConflict()) {
           // Unknown super class, fail so we re-check at runtime.
@@ -2271,7 +2271,7 @@
          */
         work_line_->MarkRefsAsInitialized(this_type);
       }
-      const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
+      RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
                                                              return_type_descriptor, false);
       if (!return_type.IsLowHalf()) {
         work_line_->SetResultRegisterType(return_type);
@@ -2297,7 +2297,7 @@
         } else {
           descriptor = called_method->GetReturnTypeDescriptor();
         }
-        const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                                false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
@@ -2325,7 +2325,7 @@
       /* Get the type of the "this" arg, which should either be a sub-interface of called
        * interface or Object (see comments in RegType::JoinClass).
        */
-      const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+      RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
       if (this_type.IsZero()) {
         /* null pointer always passes (and always fails at runtime) */
       } else {
@@ -2355,7 +2355,7 @@
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
-      const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+      RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                              false);
       if (!return_type.IsLowHalf()) {
         work_line_->SetResultRegisterType(return_type);
@@ -2621,7 +2621,7 @@
       mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
       if (called_method != NULL) {
         const char* descriptor = called_method->GetReturnTypeDescriptor();
-        const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                                false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
@@ -2905,11 +2905,11 @@
   return true;
 }  // NOLINT(readability/fn_size)
 
-const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
-  const RegType& referrer = GetDeclaringClass();
+  RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
-  const RegType& result =
+  RegType& result =
       klass != NULL ? reg_types_.FromClass(descriptor, klass,
                                            klass->CannotBeAssignedFromOtherTypes())
                     : reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
@@ -2932,8 +2932,8 @@
   return result;
 }
 
-const RegType& MethodVerifier::GetCaughtExceptionType() {
-  const RegType* common_super = NULL;
+RegType& MethodVerifier::GetCaughtExceptionType() {
+  RegType* common_super = NULL;
   if (code_item_->tries_size_ != 0) {
     const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
     uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2944,7 +2944,7 @@
           if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
             common_super = &reg_types_.JavaLangThrowable(false);
           } else {
-            const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+            RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
             if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
               if (exception.IsUnresolvedTypes()) {
                 // We don't know enough about the type. Fail here and let runtime handle it.
@@ -2979,7 +2979,7 @@
 mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
                                                                MethodType method_type) {
   const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
-  const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
   if (klass_type.IsConflict()) {
     std::string append(" in attempt to access method ");
     append += dex_file_->GetMethodName(method_id);
@@ -2990,7 +2990,7 @@
     return NULL;  // Can't resolve Class so no more to do here
   }
   mirror::Class* klass = klass_type.GetClass();
-  const RegType& referrer = GetDeclaringClass();
+  RegType& referrer = GetDeclaringClass();
   mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
   if (res_method == NULL) {
     const char* name = dex_file_->GetMethodName(method_id);
@@ -3097,7 +3097,7 @@
    * rigorous check here (which is okay since we have to do it at runtime).
    */
   if (method_type != METHOD_STATIC) {
-    const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+    RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
     if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
       CHECK(have_pending_hard_failure_);
       return nullptr;
@@ -3118,7 +3118,7 @@
       }
     }
     if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
-      const RegType* res_method_class;
+      RegType* res_method_class;
       if (res_method != nullptr) {
         mirror::Class* klass = res_method->GetDeclaringClass();
         res_method_class = &reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
@@ -3159,12 +3159,12 @@
       return nullptr;
     }
 
-    const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
+    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
                                                         false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
         arg[sig_registers];
     if (reg_type.IsIntegralTypes()) {
-      const RegType& src_type = work_line_->GetRegisterType(get_reg);
+      RegType& src_type = work_line_->GetRegisterType(get_reg);
       if (!src_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
             << " but expected " << reg_type;
@@ -3247,7 +3247,7 @@
   // has a vtable entry for the target method.
   if (is_super) {
     DCHECK(method_type == METHOD_VIRTUAL);
-    const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
+    RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
     if (super.IsUnresolvedTypes()) {
       Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
                                    << PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3275,7 +3275,7 @@
                                                          RegisterLine* reg_line, bool is_range) {
   DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
          inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-  const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+  RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
   if (!actual_arg_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
     return nullptr;
@@ -3313,7 +3313,7 @@
   // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
   // match the call to the signature. Also, we might be calling through an abstract method
   // definition (which doesn't have register count values).
-  const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+  RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
   if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
     return NULL;
   }
@@ -3337,7 +3337,7 @@
   }
   if (!actual_arg_type.IsZero()) {
     mirror::Class* klass = res_method->GetDeclaringClass();
-    const RegType& res_method_class =
+    RegType& res_method_class =
         reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
                              klass->CannotBeAssignedFromOtherTypes());
     if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3373,7 +3373,7 @@
                                         << " missing signature component";
       return NULL;
     }
-    const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
     if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
       return res_method;
@@ -3401,7 +3401,7 @@
     DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
     type_idx = inst->VRegB_3rc();
   }
-  const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+  RegType& res_type = ResolveClassAndCheckAccess(type_idx);
   if (res_type.IsConflict()) {  // bad class
     DCHECK_NE(failures_.size(), 0U);
   } else {
@@ -3412,12 +3412,12 @@
       /* make sure "size" register is valid type */
       work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
       /* set register type to array class */
-      const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+      RegType& precise_type = reg_types_.FromUninitialized(res_type);
       work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
-      const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+      RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
       uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
       uint32_t arg[5];
       if (!is_range) {
@@ -3431,19 +3431,19 @@
         }
       }
       // filled-array result goes into "result" register
-      const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+      RegType& precise_type = reg_types_.FromUninitialized(res_type);
       work_line_->SetResultRegisterType(precise_type);
     }
   }
 }
 
 void MethodVerifier::VerifyAGet(const Instruction* inst,
-                                const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+                                RegType& insn_type, bool is_primitive) {
+  RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array class; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type. TODO: have a proper notion of bottom here.
@@ -3459,7 +3459,7 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
     } else {
       /* verify the class */
-      const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
       if (!component_type.IsReferenceTypes() && !is_primitive) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
             << " source for aget-object";
@@ -3486,12 +3486,12 @@
   }
 }
 
-void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
                                         const uint32_t vregA) {
   // Primitive assignability rules are weaker than regular assignability rules.
   bool instruction_compatible;
   bool value_compatible;
-  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  RegType& value_type = work_line_->GetRegisterType(vregA);
   if (target_type.IsIntegralTypes()) {
     instruction_compatible = target_type.Equals(insn_type);
     value_compatible = value_type.IsIntegralTypes();
@@ -3500,11 +3500,11 @@
     value_compatible = value_type.IsFloatTypes();
   } else if (target_type.IsLong()) {
     instruction_compatible = insn_type.IsLong();
-    const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+    RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
     value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
   } else if (target_type.IsDouble()) {
     instruction_compatible = insn_type.IsLong();  // no put-double, so expect put-long
-    const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+    RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
     value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
   } else {
     instruction_compatible = false;  // reference with primitive store
@@ -3526,19 +3526,19 @@
 }
 
 void MethodVerifier::VerifyAPut(const Instruction* inst,
-                                const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+                                RegType& insn_type, bool is_primitive) {
+  RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array type; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type.
     } else if (!array_type.IsArrayTypes()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
     } else {
-      const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
       const uint32_t vregA = inst->VRegA_23x();
       if (is_primitive) {
         VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3560,7 +3560,7 @@
 mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
   const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class
-  const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
   if (klass_type.IsConflict()) {  // bad class
     AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
@@ -3592,10 +3592,10 @@
   return field;
 }
 
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
   const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class
-  const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
   if (klass_type.IsConflict()) {
     AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
@@ -3629,7 +3629,7 @@
     return field;
   } else {
     mirror::Class* klass = field->GetDeclaringClass();
-    const RegType& field_klass =
+    RegType& field_klass =
         reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
                              klass, klass->CannotBeAssignedFromOtherTypes());
     if (obj_type.IsUninitializedTypes() &&
@@ -3654,17 +3654,17 @@
   }
 }
 
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
                                  bool is_primitive, bool is_static) {
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   mirror::ArtField* field;
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
-  const RegType* field_type = nullptr;
+  RegType* field_type = nullptr;
   if (field != NULL) {
     Thread* self = Thread::Current();
     mirror::Class* field_type_class;
@@ -3720,17 +3720,17 @@
   }
 }
 
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
                                  bool is_primitive, bool is_static) {
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   mirror::ArtField* field;
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
-  const RegType* field_type = nullptr;
+  RegType* field_type = nullptr;
   if (field != NULL) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3782,7 +3782,7 @@
          inst->Opcode() == Instruction::IPUT_QUICK ||
          inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
          inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
-  const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+  RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
   if (!object_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
     return nullptr;
@@ -3797,7 +3797,7 @@
   return f;
 }
 
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3812,7 +3812,7 @@
     FieldHelper fh(h_field);
     field_type_class = fh.GetType(can_load_classes_);
   }
-  const RegType* field_type;
+  RegType* field_type;
   if (field_type_class != nullptr) {
     field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                        field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3857,7 +3857,7 @@
   }
 }
 
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3867,7 +3867,7 @@
   }
   const char* descriptor = field->GetTypeDescriptor();
   mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
-  const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+  RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
   if (field != NULL) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3880,7 +3880,7 @@
     // Primitive field assignability rules are weaker than regular assignability rules
     bool instruction_compatible;
     bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
+    RegType& value_type = work_line_->GetRegisterType(vregA);
     if (field_type.IsIntegralTypes()) {
       instruction_compatible = insn_type.IsIntegralTypes();
       value_compatible = value_type.IsIntegralTypes();
@@ -3998,7 +3998,7 @@
   return &insn_flags_[work_insn_idx_];
 }
 
-const RegType& MethodVerifier::GetMethodReturnType() {
+RegType& MethodVerifier::GetMethodReturnType() {
   if (return_type_ == nullptr) {
     if (mirror_method_ != NULL) {
       Thread* self = Thread::Current();
@@ -4028,7 +4028,7 @@
   return *return_type_;
 }
 
-const RegType& MethodVerifier::GetDeclaringClass() {
+RegType& MethodVerifier::GetDeclaringClass() {
   if (declaring_class_ == NULL) {
     const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
@@ -4049,7 +4049,7 @@
   DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
   std::vector<int32_t> result;
   for (size_t i = 0; i < line->NumRegs(); ++i) {
-    const RegType& type = line->GetRegisterType(i);
+    RegType& type = line->GetRegisterType(i);
     if (type.IsConstant()) {
       result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
       result.push_back(type.ConstantValue());
@@ -4089,7 +4089,7 @@
   return result;
 }
 
-const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
   if (precise) {
     // Precise constant type.
     return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 757c419..e63a90c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,7 +230,7 @@
   bool HasCheckCasts() const;
   bool HasVirtualOrInterfaceInvokes() const;
   bool HasFailures() const;
-  const RegType& ResolveCheckedClass(uint32_t class_idx)
+  RegType& ResolveCheckedClass(uint32_t class_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
@@ -471,34 +471,34 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Helper to perform verification on puts of primitive type.
-  void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+  void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
                           const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an aget instruction. The destination register's type will be set to
   // be that of component type of the array unless the array type is unknown, in which case a
   // bottom type inferred from the type of instruction is used. is_primitive is false for an
   // aget-object.
-  void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+  void VerifyAGet(const Instruction* inst, RegType& insn_type,
                   bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an aput instruction.
-  void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+  void VerifyAPut(const Instruction* inst, RegType& insn_type,
                   bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Lookup instance field and fail for resolution violations
-  mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+  mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Lookup static field and fail for resolution violations
   mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iget or sget instruction.
-  void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+  void VerifyISGet(const Instruction* inst, RegType& insn_type,
                    bool is_primitive, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iput or sput instruction.
-  void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+  void VerifyISPut(const Instruction* inst, RegType& insn_type,
                    bool is_primitive, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -508,18 +508,18 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iget-quick instruction.
-  void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+  void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
                        bool is_primitive)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iput-quick instruction.
-  void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+  void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
                        bool is_primitive)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolves a class based on an index and performs access checks to ensure the referrer can
   // access the resolved class.
-  const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+  RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -527,7 +527,7 @@
    * address, determine the Join of all exceptions that can land here. Fails if no matching
    * exception handler can be found or if the Join of exception types fails.
    */
-  const RegType& GetCaughtExceptionType()
+  RegType& GetCaughtExceptionType()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -613,14 +613,14 @@
   }
 
   // Return the register type for the method.
-  const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get a type representing the declaring class of the method.
-  const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   InstructionFlags* CurrentInsnFlags();
 
-  const RegType& DetermineCat1Constant(int32_t value, bool precise)
+  RegType& DetermineCat1Constant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   RegTypeCache reg_types_;
@@ -641,7 +641,7 @@
   // Its object representation if known.
   mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
   const uint32_t method_access_flags_;  // Method's access flags.
-  const RegType* return_type_;  // Lazily computed return type of the method.
+  RegType* return_type_;  // Lazily computed return type of the method.
   const DexFile* const dex_file_;  // The dex file containing the method.
   // The dex_cache for the declaring class of the method.
   Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -649,7 +649,7 @@
   Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
   const DexFile::ClassDef* const class_def_;  // The class def of the declaring class of the method.
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
-  const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
+  RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
   std::unique_ptr<InstructionFlags[]> insn_flags_;
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index f0729e4..6422cdf 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-std::string PreciseConstType::Dump() const {
+std::string PreciseConstType::Dump() {
   std::stringstream result;
   uint32_t val = ConstantValue();
   if (val == 0) {
@@ -98,47 +98,47 @@
   return result.str();
 }
 
-std::string BooleanType::Dump() const {
+std::string BooleanType::Dump() {
   return "Boolean";
 }
 
-std::string ConflictType::Dump() const {
+std::string ConflictType::Dump() {
     return "Conflict";
 }
 
-std::string ByteType::Dump() const {
+std::string ByteType::Dump() {
   return "Byte";
 }
 
-std::string ShortType::Dump() const {
+std::string ShortType::Dump() {
   return "Short";
 }
 
-std::string CharType::Dump() const {
+std::string CharType::Dump() {
   return "Char";
 }
 
-std::string FloatType::Dump() const {
+std::string FloatType::Dump() {
   return "Float";
 }
 
-std::string LongLoType::Dump() const {
+std::string LongLoType::Dump() {
   return "Long (Low Half)";
 }
 
-std::string LongHiType::Dump() const {
+std::string LongHiType::Dump() {
   return "Long (High Half)";
 }
 
-std::string DoubleLoType::Dump() const {
+std::string DoubleLoType::Dump() {
   return "Double (Low Half)";
 }
 
-std::string DoubleHiType::Dump() const {
+std::string DoubleHiType::Dump() {
   return "Double (High Half)";
 }
 
-std::string IntegerType::Dump() const {
+std::string IntegerType::Dump() {
     return "Integer";
 }
 
@@ -361,7 +361,7 @@
   }
 }
 
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return "Undefined";
 }
 
@@ -391,7 +391,7 @@
   DCHECK(klass->IsInstantiable());
 }
 
-std::string UnresolvedMergedType::Dump() const {
+std::string UnresolvedMergedType::Dump() {
   std::stringstream result;
   std::set<uint16_t> types = GetMergedTypes();
   result << "UnresolvedMergedReferences(";
@@ -405,59 +405,59 @@
   return result.str();
 }
 
-std::string UnresolvedSuperClass::Dump() const {
+std::string UnresolvedSuperClass::Dump() {
   std::stringstream result;
   uint16_t super_type_id = GetUnresolvedSuperClassChildId();
   result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
   return result.str();
 }
 
-std::string UnresolvedReferenceType::Dump() const {
+std::string UnresolvedReferenceType::Dump() {
   std::stringstream result;
   result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
   return result.str();
 }
 
-std::string UnresolvedUninitializedRefType::Dump() const {
+std::string UnresolvedUninitializedRefType::Dump() {
   std::stringstream result;
   result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
   result << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string UnresolvedUninitializedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() {
   std::stringstream result;
   result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
   return result.str();
 }
 
-std::string ReferenceType::Dump() const {
+std::string ReferenceType::Dump() {
   std::stringstream result;
   result << "Reference" << ": " << PrettyDescriptor(GetClass());
   return result.str();
 }
 
-std::string PreciseReferenceType::Dump() const {
+std::string PreciseReferenceType::Dump() {
   std::stringstream result;
   result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
   return result.str();
 }
 
-std::string UninitializedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() {
   std::stringstream result;
   result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
   result << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string UninitializedThisReferenceType::Dump() const {
+std::string UninitializedThisReferenceType::Dump() {
   std::stringstream result;
   result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
   result << "Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string ImpreciseConstType::Dump() const {
+std::string ImpreciseConstType::Dump() {
   std::stringstream result;
   uint32_t val = ConstantValue();
   if (val == 0) {
@@ -472,7 +472,7 @@
   }
   return result.str();
 }
-std::string PreciseConstLoType::Dump() const {
+std::string PreciseConstLoType::Dump() {
   std::stringstream result;
 
   int32_t val = ConstantValueLo();
@@ -486,7 +486,7 @@
   return result.str();
 }
 
-std::string ImpreciseConstLoType::Dump() const {
+std::string ImpreciseConstLoType::Dump() {
   std::stringstream result;
 
   int32_t val = ConstantValueLo();
@@ -500,7 +500,7 @@
   return result.str();
 }
 
-std::string PreciseConstHiType::Dump() const {
+std::string PreciseConstHiType::Dump() {
   std::stringstream result;
   int32_t val = ConstantValueHi();
   result << "Precise ";
@@ -513,7 +513,7 @@
   return result.str();
 }
 
-std::string ImpreciseConstHiType::Dump() const {
+std::string ImpreciseConstHiType::Dump() {
   std::stringstream result;
   int32_t val = ConstantValueHi();
   result << "Imprecise ";
@@ -530,7 +530,7 @@
     : RegType(NULL, "", cache_id), constant_(constant) {
 }
 
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (incoming_type.IsUndefined()) {
     return *this;  // Undefined MERGE Undefined => Undefined
@@ -538,7 +538,7 @@
   return reg_types->Conflict();
 }
 
-const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+RegType& RegType::HighHalf(RegTypeCache* cache) const {
   DCHECK(IsLowHalf());
   if (IsLongLo()) {
     return cache->LongHi();
@@ -586,12 +586,10 @@
 }
 std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
   std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
-  const RegType& _left(reg_type_cache_->GetFromId(refs.first));
-  RegType& __left(const_cast<RegType&>(_left));
-  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+  RegType& _left(reg_type_cache_->GetFromId(refs.first));
+  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
 
-  RegType& _right(
-      const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+  RegType& _right(reg_type_cache_->GetFromId(refs.second));
   UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
 
   std::set<uint16_t> types;
@@ -614,7 +612,7 @@
   return types;
 }
 
-const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+RegType& RegType::GetSuperClass(RegTypeCache* cache) {
   if (!IsUnresolvedTypes()) {
     mirror::Class* super_klass = GetClass()->GetSuperClass();
     if (super_klass != NULL) {
@@ -635,7 +633,7 @@
   }
 }
 
-bool RegType::CanAccess(const RegType& other) const {
+bool RegType::CanAccess(RegType& other) {
   if (Equals(other)) {
     return true;  // Trivial accessibility.
   } else {
@@ -651,7 +649,7 @@
   }
 }
 
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
   if ((access_flags & kAccPublic) != 0) {
     return true;
   }
@@ -662,7 +660,7 @@
   }
 }
 
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     // Primitive arrays will always resolve
     DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -675,11 +673,11 @@
   }
 }
 
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return IsReference() && GetClass()->IsObjectClass();
 }
 
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     return descriptor_[0] == '[';
   } else if (HasClass()) {
@@ -689,7 +687,7 @@
   }
 }
 
-bool RegType::IsJavaLangObjectArray() const {
+bool RegType::IsJavaLangObjectArray() {
   if (HasClass()) {
     mirror::Class* type = GetClass();
     return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -697,7 +695,7 @@
   return false;
 }
 
-bool RegType::IsInstantiableTypes() const {
+bool RegType::IsInstantiableTypes() {
   return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
 }
 
@@ -705,7 +703,7 @@
   : ConstantType(constat, cache_id) {
 }
 
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (lhs.Equals(rhs)) {
     return true;
@@ -753,11 +751,11 @@
   }
 }
 
-bool RegType::IsAssignableFrom(const RegType& src) const {
+bool RegType::IsAssignableFrom(RegType& src) {
   return AssignableFrom(*this, src, false);
 }
 
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+bool RegType::IsStrictlyAssignableFrom(RegType& src) {
   return AssignableFrom(*this, src, true);
 }
 
@@ -775,11 +773,11 @@
   }
 }
 
-static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+static RegType& SelectNonConstant(RegType& a, RegType& b) {
   return a.IsConstantTypes() ? b : a;
 }
 
-const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
   DCHECK(!Equals(incoming_type));  // Trivial equality handled by caller
   if (IsConflict()) {
     return *this;  // Conflict MERGE * => Conflict
@@ -958,16 +956,16 @@
 void RegType::CheckInvariants() const {
   if (IsConstant() || IsConstantLo() || IsConstantHi()) {
     CHECK(descriptor_.empty()) << *this;
-    CHECK(klass_ == NULL) << *this;
+    CHECK(klass_.IsNull()) << *this;
   }
-  if (klass_ != NULL) {
+  if (!klass_.IsNull()) {
     CHECK(!descriptor_.empty()) << *this;
   }
 }
 
 void RegType::VisitRoots(RootCallback* callback, void* arg) {
-  if (klass_ != nullptr) {
-    callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
+  if (!klass_.IsNull()) {
+    klass_.VisitRoot(callback, arg, 0, kRootUnknown);
   }
 }
 
@@ -978,36 +976,37 @@
 void UnresolvedUninitializedThisRefType::CheckInvariants() const {
   CHECK_EQ(GetAllocationPc(), 0U) << *this;
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedUninitializedRefType::CheckInvariants() const {
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedMergedType::CheckInvariants() const {
   // Unresolved merged types: merged types should be defined.
   CHECK(descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
   CHECK_NE(merged_types_.first, 0U) << *this;
   CHECK_NE(merged_types_.second, 0U) << *this;
 }
 
 void UnresolvedReferenceType::CheckInvariants() const {
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedSuperClass::CheckInvariants() const {
   // Unresolved merged types: merged types should be defined.
   CHECK(descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
   CHECK_NE(unresolved_child_id_, 0U) << *this;
 }
 
 std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
-  os << rhs.Dump();
+  RegType& rhs_non_const = const_cast<RegType&>(rhs);
+  os << rhs_non_const.Dump();
   return os;
 }
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e985f3a..d508fb5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,7 @@
 #include "jni.h"
 
 #include "base/macros.h"
+#include "gc_root.h"
 #include "globals.h"
 #include "object_callbacks.h"
 #include "primitive.h"
@@ -107,7 +108,7 @@
     return IsLowHalf();
   }
   // Check this is the low half, and that type_h is its matching high-half.
-  inline bool CheckWidePair(const RegType& type_h) const {
+  inline bool CheckWidePair(RegType& type_h) const {
     if (IsLowHalf()) {
       return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
               (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -119,7 +120,7 @@
     return false;
   }
   // The high half that corresponds to this low half
-  const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsConstantBoolean() const {
     return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -198,55 +199,54 @@
   virtual bool HasClass() const {
     return false;
   }
-  bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   Primitive::Type GetPrimitiveType() const;
-  bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::string& GetDescriptor() const {
     DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
                           !IsUnresolvedSuperClass()));
     return descriptor_;
   }
-  mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(!IsUnresolvedReference());
-    DCHECK(klass_ != NULL) << Dump();
+    DCHECK(!klass_.IsNull()) << Dump();
     DCHECK(HasClass());
-    return klass_;
+    return klass_.Read();
   }
   uint16_t GetId() const {
     return cache_id_;
   }
-  const RegType& GetSuperClass(RegTypeCache* cache) const
+  RegType& GetSuperClass(RegTypeCache* cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
 
   // Can this type access other?
-  bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type access a member with the given properties?
-  bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+  bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src?
   // Note: Object and interface types may always be assigned to one another, see comment on
   // ClassJoin.
-  bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
   // an interface from an Object.
-  bool IsStrictlyAssignableFrom(const RegType& src) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Are these RegTypes the same?
-  bool Equals(const RegType& other) const {
+  bool Equals(RegType& other) const {
     return GetId() == other.GetId();
   }
 
   // Compute the merge of this register from one edge (path) with incoming_type from another.
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -275,7 +275,7 @@
  protected:
   RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+      : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -285,7 +285,7 @@
 
 
   const std::string descriptor_;
-  mirror::Class* klass_;  // Non-const only due to moving classes.
+  GcRoot<mirror::Class> klass_;
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
@@ -301,7 +301,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
   static ConflictType* GetInstance();
@@ -331,7 +331,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
   static UndefinedType* GetInstance();
@@ -350,7 +350,7 @@
       : RegType(klass, descriptor, cache_id) {
   }
 
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static UndefinedType* instance_;
@@ -373,7 +373,7 @@
   bool IsInteger() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                      uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -392,7 +392,7 @@
   bool IsBoolean() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                      uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -412,7 +412,7 @@
   bool IsByte() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                   uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -431,7 +431,7 @@
   bool IsShort() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                    uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -450,7 +450,7 @@
   bool IsChar() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                   uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -469,7 +469,7 @@
   bool IsFloat() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                    uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -491,7 +491,7 @@
 
 class LongLoType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsLongLo() const {
     return true;
   }
@@ -513,7 +513,7 @@
 
 class LongHiType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsLongHi() const {
     return true;
   }
@@ -532,7 +532,7 @@
 
 class DoubleLoType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsDoubleLo() const {
     return true;
   }
@@ -554,7 +554,7 @@
 
 class DoubleHiType : public Cat2Type {
  public:
-  std::string Dump() const;
+  std::string Dump();
   virtual bool IsDoubleHi() const {
     return true;
   }
@@ -621,7 +621,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class PreciseConstLoType : public ConstantType {
@@ -633,7 +633,7 @@
   bool IsPreciseConstantLo() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class PreciseConstHiType : public ConstantType {
@@ -645,7 +645,7 @@
   bool IsPreciseConstantHi() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstType : public ConstantType {
@@ -655,7 +655,7 @@
   bool IsImpreciseConstant() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstLoType : public ConstantType {
@@ -666,7 +666,7 @@
   bool IsImpreciseConstantLo() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstHiType : public ConstantType {
@@ -677,7 +677,7 @@
   bool IsImpreciseConstantHi() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -718,7 +718,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -737,7 +737,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -762,7 +762,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -782,7 +782,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -807,7 +807,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -829,7 +829,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of unresolved types.
@@ -857,7 +857,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -883,7 +883,7 @@
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -918,7 +918,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..fdf96a8 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
 namespace art {
 namespace verifier {
 
-inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
   DCHECK_LT(id, entries_.size());
   RegType* result = entries_[id];
   DCHECK(result != NULL);
   return *result;
 }
 
-inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
   // We only expect 0 to be a precise constant.
   DCHECK(value != 0 || precise);
   if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 91fba4d..255b506 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@
   DCHECK_EQ(entries_.size(), primitive_count_);
 }
 
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
-                                            bool precise) {
+RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+                                      bool precise) {
   DCHECK(RegTypeCache::primitive_initialized_);
   if (descriptor[1] == '\0') {
     switch (descriptor[0]) {
@@ -97,7 +97,7 @@
   }
 };
 
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
   CHECK(RegTypeCache::primitive_initialized_);
   switch (prim_type) {
     case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@
   return klass;
 }
 
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
-                                  bool precise) {
+RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+                            bool precise) {
   // Try looking up the class in the cache first.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     if (MatchDescriptor(i, descriptor, precise)) {
@@ -185,7 +185,7 @@
     } else {
       entry = new ReferenceType(klass, descriptor, entries_.size());
     }
-    entries_.push_back(entry);
+    AddEntry(entry);
     return *entry;
   } else {  // Class not resolved.
     // We tried loading the class and failed, this might get an exception raised
@@ -198,7 +198,7 @@
     }
     if (IsValidDescriptor(descriptor)) {
       RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
-      entries_.push_back(entry);
+      AddEntry(entry);
       return *entry;
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
@@ -208,8 +208,8 @@
   }
 }
 
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
-  DCHECK(klass != nullptr);
+RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+  DCHECK(klass != nullptr && !klass->IsErroneous());
   if (klass->IsPrimitive()) {
     // Note: precise isn't used for primitive classes. A char is assignable to an int. All
     // primitive classes are final.
@@ -218,7 +218,7 @@
     // Look for the reference in the list of entries to have.
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       RegType* cur_entry = entries_[i];
-      if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+      if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
         return *cur_entry;
       }
     }
@@ -229,7 +229,7 @@
     } else {
       entry = new ReferenceType(klass, descriptor, entries_.size());
     }
-    entries_.push_back(entry);
+    AddEntry(entry);
     return *entry;
   }
 }
@@ -311,17 +311,15 @@
   }
 }
 
-const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
   std::set<uint16_t> types;
   if (left.IsUnresolvedMergedReference()) {
-    RegType& non_const(const_cast<RegType&>(left));
-    types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+    types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
   } else {
     types.insert(left.GetId());
   }
   if (right.IsUnresolvedMergedReference()) {
-    RegType& non_const(const_cast<RegType&>(right));
-    std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+    std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
     types.insert(right_types.begin(), right_types.end());
   } else {
     types.insert(right.GetId());
@@ -339,7 +337,7 @@
   }
   // Create entry.
   RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
-  entries_.push_back(entry);
+  AddEntry(entry);
   if (kIsDebugBuild) {
     UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
     std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
@@ -348,7 +346,7 @@
   return *entry;
 }
 
-const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
@@ -363,11 +361,11 @@
     }
   }
   RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = NULL;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
@@ -393,11 +391,11 @@
     }
     entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
@@ -435,48 +433,48 @@
       return Conflict();
     }
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ImpreciseConstType& RegTypeCache::ByteConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::CharConstant() {
+ImpreciseConstType& RegTypeCache::CharConstant() {
   int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
-  const ConstantType& result =  FromCat1Const(jchar_max, false);
+  ConstantType& result =  FromCat1Const(jchar_max, false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ImpreciseConstType& RegTypeCache::ShortConstant() {
+  ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::IntConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ImpreciseConstType& RegTypeCache::IntConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ImpreciseConstType& RegTypeCache::PosByteConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ImpreciseConstType& RegTypeCache::PosShortConstant() {
+  ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
   UninitializedType* entry;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
@@ -498,14 +496,14 @@
     }
     entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
-    if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+    if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
         cur_entry->IsPreciseConstant() == precise &&
         (down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
       return *down_cast<ConstantType*>(cur_entry);
@@ -517,11 +515,11 @@
   } else {
     entry = new ImpreciseConstType(value, entries_.size());
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -535,11 +533,11 @@
   } else {
     entry = new ImpreciseConstLoType(value, entries_.size());
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -553,11 +551,11 @@
   } else {
     entry = new ImpreciseConstHiType(value, entries_.size());
   }
-  entries_.push_back(entry);
+  AddEntry(entry);
   return *entry;
 }
 
-const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
   if (!array.IsArrayTypes()) {
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
@@ -566,8 +564,15 @@
     return FromDescriptor(loader, component.c_str(), false);
   } else {
     mirror::Class* klass = array.GetClass()->GetComponentType();
-    return FromClass(klass->GetDescriptor().c_str(), klass,
-                     klass->CannotBeAssignedFromOtherTypes());
+    if (klass->IsErroneous()) {
+      // Arrays may have erroneous component types, use unresolved in that case.
+      // We assume that the primitive classes are not erroneous, so we know it is a
+      // reference type.
+      return FromDescriptor(loader, klass->GetDescriptor().c_str(), false);
+    } else {
+      return FromClass(klass->GetDescriptor().c_str(), klass,
+                       klass->CannotBeAssignedFromOtherTypes());
+    }
   }
 }
 
@@ -586,5 +591,9 @@
   }
 }
 
+void RegTypeCache::AddEntry(RegType* new_entry) {
+  entries_.push_back(new_entry);
+}
+
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 70d5f07..d46cf2c 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,99 +49,99 @@
     }
   }
   static void ShutDown();
-  const art::verifier::RegType& GetFromId(uint16_t id) const;
-  const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+  RegType& GetFromId(uint16_t id) const;
+  RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+  RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat1Const(int32_t value, bool precise)
+  ConstantType& FromCat1Const(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+  ConstantType& FromCat2ConstLo(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+  ConstantType& FromCat2ConstHi(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+  RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+  RegType& FromUnresolvedMerge(RegType& left, RegType& right)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUnresolvedSuperClass(const RegType& child)
+  RegType& FromUnresolvedSuperClass(RegType& child)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // String is final and therefore always precise.
     return From(NULL, "Ljava/lang/String;", true);
   }
-  const RegType& JavaLangThrowable(bool precise)
+  RegType& JavaLangThrowable(bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Throwable;", precise);
   }
-  const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
-  const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(1, true);
   }
   size_t GetCacheSize() {
     return entries_.size();
   }
-  const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *BooleanType::GetInstance();
   }
-  const RegType& Byte() {
+  RegType& Byte() {
     return *ByteType::GetInstance();
   }
-  const RegType& Char()  {
+  RegType& Char()  {
     return *CharType::GetInstance();
   }
-  const RegType& Short()  {
+  RegType& Short()  {
     return *ShortType::GetInstance();
   }
-  const RegType& Integer() {
+  RegType& Integer() {
     return *IntegerType::GetInstance();
   }
-  const RegType& Float() {
+  RegType& Float() {
     return *FloatType::GetInstance();
   }
-  const RegType& LongLo() {
+  RegType& LongLo() {
     return *LongLoType::GetInstance();
   }
-  const RegType& LongHi() {
+  RegType& LongHi() {
     return *LongHiType::GetInstance();
   }
-  const RegType& DoubleLo() {
+  RegType& DoubleLo() {
     return *DoubleLoType::GetInstance();
   }
-  const RegType& DoubleHi() {
+  RegType& DoubleHi() {
     return *DoubleHiType::GetInstance();
   }
-  const RegType& Undefined() {
+  RegType& Undefined() {
     return *UndefinedType::GetInstance();
   }
-  const RegType& Conflict() {
+  RegType& Conflict() {
     return *ConflictType::GetInstance();
   }
-  const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Class;", precise);
   }
-  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Object;", precise);
   }
-  const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+  UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Create an uninitialized 'this' argument for the given type.
-  const UninitializedType& UninitializedThisArgument(const RegType& type)
+  UninitializedType& UninitializedThisArgument(RegType& type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUninitialized(const RegType& uninit_type)
+  RegType& FromUninitialized(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+  ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+  RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
 
   void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -151,9 +151,11 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+  ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void AddEntry(RegType* new_entry);
+
   template <class Type>
   static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..e27558a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
-  const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
-  const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
-  const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+  RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+  RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+  RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+  RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
   EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
   EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
   EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
 
-  const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
-  const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+  RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+  RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
   EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
 
-  const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
-  const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
-  const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+  RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+  RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+  RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
   EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
   EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
 }
@@ -56,11 +56,11 @@
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
   int64_t val = static_cast<int32_t>(1234);
-  const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
-  const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-  const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
-  const RegType& long_lo = cache.LongLo();
-  const RegType& long_hi = cache.LongHi();
+  RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+  RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+  RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+  RegType& long_lo = cache.LongLo();
+  RegType& long_hi = cache.LongHi();
   // Check sanity of types.
   EXPECT_TRUE(precise_lo.IsLowHalf());
   EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
 
-  const RegType& bool_reg_type = cache.Boolean();
+  RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
   EXPECT_FALSE(bool_reg_type.IsConflict());
   EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@
   EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& byte_reg_type = cache.Byte();
+  RegType& byte_reg_type = cache.Byte();
   EXPECT_FALSE(byte_reg_type.IsUndefined());
   EXPECT_FALSE(byte_reg_type.IsConflict());
   EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@
   EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& char_reg_type = cache.Char();
+  RegType& char_reg_type = cache.Char();
   EXPECT_FALSE(char_reg_type.IsUndefined());
   EXPECT_FALSE(char_reg_type.IsConflict());
   EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@
   EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& short_reg_type = cache.Short();
+  RegType& short_reg_type = cache.Short();
   EXPECT_FALSE(short_reg_type.IsUndefined());
   EXPECT_FALSE(short_reg_type.IsConflict());
   EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@
   EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& int_reg_type = cache.Integer();
+  RegType& int_reg_type = cache.Integer();
   EXPECT_FALSE(int_reg_type.IsUndefined());
   EXPECT_FALSE(int_reg_type.IsConflict());
   EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@
   EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& long_reg_type = cache.LongLo();
+  RegType& long_reg_type = cache.LongLo();
   EXPECT_FALSE(long_reg_type.IsUndefined());
   EXPECT_FALSE(long_reg_type.IsConflict());
   EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@
   EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& float_reg_type = cache.Float();
+  RegType& float_reg_type = cache.Float();
   EXPECT_FALSE(float_reg_type.IsUndefined());
   EXPECT_FALSE(float_reg_type.IsConflict());
   EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@
   EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& double_reg_type = cache.DoubleLo();
+  RegType& double_reg_type = cache.DoubleLo();
   EXPECT_FALSE(double_reg_type.IsUndefined());
   EXPECT_FALSE(double_reg_type.IsConflict());
   EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@
   // match the one that is imprecise.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& imprecise_obj = cache.JavaLangObject(false);
-  const RegType& precise_obj = cache.JavaLangObject(true);
-  const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  RegType& imprecise_obj = cache.JavaLangObject(false);
+  RegType& precise_obj = cache.JavaLangObject(true);
+  RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
   EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
 
-  const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
+  RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
   EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
   EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
 }
@@ -375,21 +375,21 @@
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type));
   // Create an uninitialized type of this unresolved type
-  const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+  RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
   EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
   EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
   EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
   // Create an uninitialized type of this unresolved type with different  PC
-  const RegType& ref_type_unresolved_unintialised_1 =  cache.Uninitialized(ref_type, 1102ull);
+  RegType& ref_type_unresolved_unintialised_1 =  cache.Uninitialized(ref_type, 1102ull);
   EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
   EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
   // Create an uninitialized type of this unresolved type with the same PC
-  const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+  RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
   EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
 }
 
@@ -397,12 +397,12 @@
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
-  const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
-  const RegType& resolved_ref = cache.JavaLangString();
-  const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
-  const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
-  const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+  RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+  RegType& resolved_ref = cache.JavaLangString();
+  RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+  RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+  RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
 
   std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
   EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@
   // The JavaLangObject method instead of FromDescriptor. String class is final.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type = cache.JavaLangString();
-  const RegType& ref_type_2 = cache.JavaLangString();
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+  RegType& ref_type = cache.JavaLangString();
+  RegType& ref_type_2 = cache.JavaLangString();
+  RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
   EXPECT_TRUE(ref_type.IsPreciseReference());
 
   // Create an uninitialized type out of this:
-  const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+  RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
   EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
   EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
 }
@@ -442,9 +442,9 @@
   // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type = cache.JavaLangObject(true);
-  const RegType& ref_type_2 = cache.JavaLangObject(true);
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  RegType& ref_type = cache.JavaLangObject(true);
+  RegType& ref_type_2 = cache.JavaLangObject(true);
+  RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,20 +455,19 @@
   // String and object , LUB is object.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache_new(true);
-  const RegType& string = cache_new.JavaLangString();
-  const RegType& Object = cache_new.JavaLangObject(true);
+  RegType& string = cache_new.JavaLangString();
+  RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
   // Merge two unresolved types.
-  const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+  RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+  RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
   EXPECT_TRUE(merged.IsUnresolvedMergedReference());
-  RegType& merged_nonconst = const_cast<RegType&>(merged);
 
-  std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+  std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
   EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
   EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
 }
@@ -479,27 +478,27 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& float_type = cache_new.Float();
-  const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
-  const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+  RegType& float_type = cache_new.Float();
+  RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+  RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
   {
     // float MERGE precise cst => float.
-    const RegType& merged = float_type.Merge(precise_cst, &cache_new);
+    RegType& merged = float_type.Merge(precise_cst, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // precise cst MERGE float => float.
-    const RegType& merged = precise_cst.Merge(float_type, &cache_new);
+    RegType& merged = precise_cst.Merge(float_type, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // float MERGE imprecise cst => float.
-    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+    RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // imprecise cst MERGE float => float.
-    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+    RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
 }
@@ -510,50 +509,50 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& long_lo_type = cache_new.LongLo();
-  const RegType& long_hi_type = cache_new.LongHi();
-  const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
-  const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
-  const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
-  const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+  RegType& long_lo_type = cache_new.LongLo();
+  RegType& long_hi_type = cache_new.LongHi();
+  RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+  RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+  RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+  RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+    RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+    RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+    RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
-    const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+    RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+    RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+    RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+    RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
-    const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+    RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
 }
@@ -564,50 +563,50 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& double_lo_type = cache_new.DoubleLo();
-  const RegType& double_hi_type = cache_new.DoubleHi();
-  const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
-  const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
-  const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
-  const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+  RegType& double_lo_type = cache_new.DoubleLo();
+  RegType& double_hi_type = cache_new.DoubleHi();
+  RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+  RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+  RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+  RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+    RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+    RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
-    const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+    RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
-    const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+    RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+    RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+    RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
-    const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+    RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
-    const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+    RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
 }
@@ -616,8 +615,8 @@
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache_new(true);
-  const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
-  const RegType& precise_const = cache_new.FromCat1Const(10, true);
+  RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+  RegType& precise_const = cache_new.FromCat1Const(10, true);
 
   EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
   EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..378c6d3 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
 namespace art {
 namespace verifier {
 
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
   // The register index was validated during the static pass, so we don't need to check it here.
   DCHECK_LT(vsrc, num_regs_);
   return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..4d67cfb 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
   DCHECK_LT(vdst, num_regs_);
   if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
-                                       const RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
+                                       RegType& new_type2) {
   DCHECK_LT(vdst + 1, num_regs_);
   if (!new_type1.CheckWidePair(new_type2)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@
   result_[1] = result_[0];
 }
 
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+void RegisterLine::SetResultRegisterType(RegType& new_type) {
   DCHECK(!new_type.IsLowHalf());
   DCHECK(!new_type.IsHighHalf());
   result_[0] = new_type.GetId();
   result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
 }
 
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
-                                             const RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
+                                             RegType& new_type2) {
   DCHECK(new_type1.CheckWidePair(new_type2));
   result_[0] = new_type1.GetId();
   result_[1] = new_type2.GetId();
 }
 
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
   const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
   if (args_count < 1) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@
   }
   /* Get the element type of the array held in vsrc */
   const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  const RegType& this_type = GetRegisterType(this_reg);
+  RegType& this_type = GetRegisterType(this_reg);
   if (!this_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
                                                  << this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@
 }
 
 bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
-                                      const RegType& check_type) {
+                                      RegType& check_type) {
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  RegType& src_type = GetRegisterType(vsrc);
   if (!(check_type.IsAssignableFrom(src_type))) {
     enum VerifyError fail_type;
     if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@
     return false;
   }
   if (check_type.IsLowHalf()) {
-    const RegType& src_type_h = GetRegisterType(vsrc + 1);
+    RegType& src_type_h = GetRegisterType(vsrc + 1);
     if (!src_type.CheckWidePair(src_type_h)) {
       verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
                                                    << src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@
   return true;
 }
 
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
-                                          const RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
+                                          RegType& check_type2) {
   DCHECK(check_type1.CheckWidePair(check_type2));
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  RegType& src_type = GetRegisterType(vsrc);
   if (!check_type1.IsAssignableFrom(src_type)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
                                << " but expected " << check_type1;
     return false;
   }
-  const RegType& src_type_h = GetRegisterType(vsrc + 1);
+  RegType& src_type_h = GetRegisterType(vsrc + 1);
   if (!src_type.CheckWidePair(src_type_h)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
         << src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@
   return true;
 }
 
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
   DCHECK(uninit_type.IsUninitializedTypes());
-  const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+  RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
   size_t changed = 0;
   for (uint32_t i = 0; i < num_regs_; i++) {
     if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@
   }
 }
 
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump() {
   std::string result;
   for (size_t i = 0; i < num_regs_; i++) {
     result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@
   return result;
 }
 
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
   for (size_t i = 0; i < num_regs_; i++) {
     if (GetRegisterType(i).Equals(uninit_type)) {
       line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@
 
 void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
   DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
-  const RegType& type = GetRegisterType(vsrc);
+  RegType& type = GetRegisterType(vsrc);
   if (!SetRegisterType(vdst, type)) {
     return;
   }
@@ -238,8 +238,8 @@
 }
 
 void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
-  const RegType& type_l = GetRegisterType(vsrc);
-  const RegType& type_h = GetRegisterType(vsrc + 1);
+  RegType& type_l = GetRegisterType(vsrc);
+  RegType& type_h = GetRegisterType(vsrc + 1);
 
   if (!type_l.CheckWidePair(type_h)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@
 }
 
 void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
-  const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+  RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
   if ((!is_reference && !type.IsCategory1Types()) ||
       (is_reference && !type.IsReferenceTypes())) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@
  * register to another register, and reset the result register.
  */
 void RegisterLine::CopyResultRegister2(uint32_t vdst) {
-  const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
-  const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+  RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+  RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
   if (!type_l.IsCategory2Types()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes2 v" << vdst << "<- result0"  << " type=" << type_l;
@@ -281,40 +281,40 @@
 }
 
 void RegisterLine::CheckUnaryOp(const Instruction* inst,
-                                const RegType& dst_type,
-                                const RegType& src_type) {
+                                RegType& dst_type,
+                                RegType& src_type) {
   if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
     SetRegisterType(inst->VRegA_12x(), dst_type);
   }
 }
 
 void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
-                                    const RegType& dst_type1, const RegType& dst_type2,
-                                    const RegType& src_type1, const RegType& src_type2) {
+                                    RegType& dst_type1, RegType& dst_type2,
+                                    RegType& src_type1, RegType& src_type2) {
   if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
     SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
 void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
-                                      const RegType& dst_type1, const RegType& dst_type2,
-                                      const RegType& src_type) {
+                                      RegType& dst_type1, RegType& dst_type2,
+                                      RegType& src_type) {
   if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
     SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
 void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
-                                        const RegType& dst_type,
-                                        const RegType& src_type1, const RegType& src_type2) {
+                                        RegType& dst_type,
+                                        RegType& src_type1, RegType& src_type2) {
   if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
     SetRegisterType(inst->VRegA_12x(), dst_type);
   }
 }
 
 void RegisterLine::CheckBinaryOp(const Instruction* inst,
-                                 const RegType& dst_type,
-                                 const RegType& src_type1, const RegType& src_type2,
+                                 RegType& dst_type,
+                                 RegType& src_type1, RegType& src_type2,
                                  bool check_boolean_op) {
   const uint32_t vregB = inst->VRegB_23x();
   const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@
 }
 
 void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
-                                     const RegType& dst_type1, const RegType& dst_type2,
-                                     const RegType& src_type1_1, const RegType& src_type1_2,
-                                     const RegType& src_type2_1, const RegType& src_type2_2) {
+                                     RegType& dst_type1, RegType& dst_type2,
+                                     RegType& src_type1_1, RegType& src_type1_2,
+                                     RegType& src_type2_1, RegType& src_type2_2) {
   if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
       VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
     SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@
 }
 
 void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
-                                          const RegType& long_lo_type, const RegType& long_hi_type,
-                                          const RegType& int_type) {
+                                          RegType& long_lo_type, RegType& long_hi_type,
+                                          RegType& int_type) {
   if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
       VerifyRegisterType(inst->VRegC_23x(), int_type)) {
     SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@
 }
 
 void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
-                                      const RegType& dst_type, const RegType& src_type1,
-                                      const RegType& src_type2, bool check_boolean_op) {
+                                      RegType& dst_type, RegType& src_type1,
+                                      RegType& src_type2, bool check_boolean_op) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@
 }
 
 void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
-                                          const RegType& dst_type1, const RegType& dst_type2,
-                                          const RegType& src_type1_1, const RegType& src_type1_2,
-                                          const RegType& src_type2_1, const RegType& src_type2_2) {
+                                          RegType& dst_type1, RegType& dst_type2,
+                                          RegType& src_type1_1, RegType& src_type1_2,
+                                          RegType& src_type2_1, RegType& src_type2_2) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@
 }
 
 void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
-                                               const RegType& long_lo_type, const RegType& long_hi_type,
-                                               const RegType& int_type) {
+                                               RegType& long_lo_type, RegType& long_hi_type,
+                                               RegType& int_type) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@
 }
 
 void RegisterLine::CheckLiteralOp(const Instruction* inst,
-                                  const RegType& dst_type, const RegType& src_type,
+                                  RegType& dst_type, RegType& src_type,
                                   bool check_boolean_op, bool is_lit16) {
   const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
   const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@
 }
 
 void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+  RegType& reg_type = GetRegisterType(reg_idx);
   if (!reg_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
   } else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@
 }
 
 void RegisterLine::PopMonitor(uint32_t reg_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+  RegType& reg_type = GetRegisterType(reg_idx);
   if (!reg_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
   } else if (monitors_.empty()) {
@@ -460,9 +460,9 @@
   DCHECK(incoming_line != nullptr);
   for (size_t idx = 0; idx < num_regs_; idx++) {
     if (line_[idx] != incoming_line->line_[idx]) {
-      const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
-      const RegType& cur_type = GetRegisterType(idx);
-      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+      RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+      RegType& cur_type = GetRegisterType(idx);
+      RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
       changed = changed || !cur_type.Equals(new_type);
       line_[idx] = new_type.GetId();
     }
@@ -508,7 +508,8 @@
 
 std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  os << rhs.Dump();
+  RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
+  os << rhs_non_const.Dump();
   return os;
 }
 
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 57c7517..b0018d2 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
-  bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+  bool SetRegisterType(uint32_t vdst, RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+  bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /* Set the type of the "result" register. */
-  void SetResultRegisterType(const RegType& new_type)
+  void SetResultRegisterType(RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+  void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the type of register vsrc.
-  const RegType& GetRegisterType(uint32_t vsrc) const;
+  RegType& GetRegisterType(uint32_t vsrc) const;
 
-  bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+  bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+  bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FillWithGarbage() {
     memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@
    * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
    * the new ones at the same time).
    */
-  void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+  void MarkUninitRefsAsInvalid(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -134,7 +134,7 @@
    * reference type. This is called when an appropriate constructor is invoked -- all copies of
    * the reference must be marked as initialized.
    */
-  void MarkRefsAsInitialized(const RegType& uninit_type)
+  void MarkRefsAsInitialized(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -173,30 +173,30 @@
    * The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
    * versions. We just need to make sure vA is >= 1 and then return vC.
    */
-  const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+  RegType& GetInvocationThis(const Instruction* inst, bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple two-register instruction (e.g. "neg-int").
    * "dst_type" is stored into vA, and "src_type" is verified against vB.
    */
-  void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
-                    const RegType& src_type)
+  void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
+                    RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpWide(const Instruction* inst,
-                        const RegType& dst_type1, const RegType& dst_type2,
-                        const RegType& src_type1, const RegType& src_type2)
+                        RegType& dst_type1, RegType& dst_type2,
+                        RegType& src_type1, RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpToWide(const Instruction* inst,
-                          const RegType& dst_type1, const RegType& dst_type2,
-                          const RegType& src_type)
+                          RegType& dst_type1, RegType& dst_type2,
+                          RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpFromWide(const Instruction* inst,
-                            const RegType& dst_type,
-                            const RegType& src_type1, const RegType& src_type2)
+                            RegType& dst_type,
+                            RegType& src_type1, RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -205,19 +205,19 @@
    * against vB/vC.
    */
   void CheckBinaryOp(const Instruction* inst,
-                     const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+                     RegType& dst_type, RegType& src_type1, RegType& src_type2,
                      bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOpWide(const Instruction* inst,
-                         const RegType& dst_type1, const RegType& dst_type2,
-                         const RegType& src_type1_1, const RegType& src_type1_2,
-                         const RegType& src_type2_1, const RegType& src_type2_2)
+                         RegType& dst_type1, RegType& dst_type2,
+                         RegType& src_type1_1, RegType& src_type1_2,
+                         RegType& src_type2_1, RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOpWideShift(const Instruction* inst,
-                              const RegType& long_lo_type, const RegType& long_hi_type,
-                              const RegType& int_type)
+                              RegType& long_lo_type, RegType& long_hi_type,
+                              RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -225,20 +225,20 @@
    * are verified against vA/vB, then "dst_type" is stored into vA.
    */
   void CheckBinaryOp2addr(const Instruction* inst,
-                          const RegType& dst_type,
-                          const RegType& src_type1, const RegType& src_type2,
+                          RegType& dst_type,
+                          RegType& src_type1, RegType& src_type2,
                           bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWide(const Instruction* inst,
-                              const RegType& dst_type1, const RegType& dst_type2,
-                              const RegType& src_type1_1, const RegType& src_type1_2,
-                              const RegType& src_type2_1, const RegType& src_type2_2)
+                              RegType& dst_type1, RegType& dst_type2,
+                              RegType& src_type1_1, RegType& src_type1_2,
+                              RegType& src_type2_1, RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWideShift(const Instruction* inst,
-                                   const RegType& long_lo_type, const RegType& long_hi_type,
-                                   const RegType& int_type)
+                                   RegType& long_lo_type, RegType& long_hi_type,
+                                   RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -248,7 +248,7 @@
    * If "check_boolean_op" is set, we use the constant value in vC.
    */
   void CheckLiteralOp(const Instruction* inst,
-                      const RegType& dst_type, const RegType& src_type,
+                      RegType& dst_type, RegType& src_type,
                       bool check_boolean_op, bool is_lit16)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 4909a4a..554712a 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -286,3 +286,8 @@
 
   return char_returns[c1];
 }
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
+                                                                       jclass from, jclass to) {
+  return env->IsAssignableFrom(from, to);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 11c80f5..ae133be 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -29,6 +29,7 @@
         testShortMethod();
         testBooleanMethod();
         testCharMethod();
+        testIsAssignableFromOnPrimitiveTypes();
     }
 
     private static native void testFindClassOnAttachedNativeThread();
@@ -151,4 +152,19 @@
         }
       }
     }
+
+    // http://b/16531674
+    private static void testIsAssignableFromOnPrimitiveTypes() {
+      if (!nativeIsAssignableFrom(int.class, Integer.TYPE)) {
+        System.out.println("IsAssignableFrom(int.class, Integer.TYPE) returned false, expected true");
+        throw new AssertionError();
+      }
+
+      if (!nativeIsAssignableFrom(Integer.TYPE, int.class)) {
+        System.out.println("IsAssignableFrom(Integer.TYPE, int.class) returned false, expected true");
+        throw new AssertionError();
+      }
+    }
+
+    native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
 }
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 9f57dbd..f8d92cc 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -37,3 +37,4 @@
 atomicLong passes
 LiveFlags passes trip 3
 LiveFlags passes trip 1
+minDoubleWith3ConstsTest passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 748b0de..c089c52 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -58,6 +58,21 @@
         ManyFloatArgs();
         atomicLong();
         LiveFlags.test();
+        minDoubleWith3ConstsTest();
+    }
+
+    public static double minDouble(double a, double b, double c) {
+        return Math.min(Math.min(a, b), c);
+    }
+
+    public static void minDoubleWith3ConstsTest() {
+        double result = minDouble(1.2, 2.5, Double.NaN);
+        if (Double.isNaN(result)) {
+            System.out.println("minDoubleWith3ConstsTest passes");
+        } else {
+            System.out.println("minDoubleWith3ConstsTest fails: " + result +
+                               " (expecting NaN)");
+        }
     }
 
     public static void atomicLong() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0f8032e..5c1bc03 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -358,6 +358,9 @@
   prereq_rule :=
   skip_test := false
   uc_reloc_type :=
+  ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
+    run_test_options += --always-clean
+  endif
   ifeq ($(2),host)
     uc_host_or_target := HOST
     run_test_options += --host
diff --git a/test/run-all-tests b/test/run-all-tests
index 02f46f9..284cca0 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -95,6 +95,8 @@
     elif [ "x$1" = "x--prebuild" ]; then
         run_args="${run_args} --prebuild"
         shift;
+    elif [ "x$1" = "x--always-clean" ]; then
+        run_args="${run_args} --always-clean"
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         usage="yes"
@@ -114,7 +116,7 @@
              "further documentation:"
         echo "    --debug --dev --host --interpreter --jvm --no-optimize"
         echo "    --no-verify -O --update --valgrind --zygote --64 --relocate"
-        echo "    --prebuild"
+        echo "    --prebuild --always-clean"
         echo "  Specific Runtime Options:"
         echo "    --seq                Run tests one-by-one, avoiding failures caused by busy CPU"
     ) 1>&2
diff --git a/test/run-test b/test/run-test
index ae613d9..aef7c52 100755
--- a/test/run-test
+++ b/test/run-test
@@ -73,6 +73,7 @@
 build_only="no"
 suffix64=""
 trace="false"
+always_clean="no"
 
 while true; do
     if [ "x$1" = "x--host" ]; then
@@ -179,6 +180,9 @@
     elif [ "x$1" = "x--trace" ]; then
         trace="true"
         shift
+    elif [ "x$1" = "x--always-clean" ]; then
+        always_clean="yes"
+        shift
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         usage="yes"
@@ -325,6 +329,7 @@
              "files."
         echo "    --64                 Run the test in 64-bit mode"
         echo "    --trace              Run with method tracing"
+        echo "    --always-clean       Delete the test files even if the test fails."
     ) 1>&2
     exit 1
 fi
@@ -445,19 +450,8 @@
     fi
 fi
 
-# Clean up test files.
-if [ "$good" == "yes" ]; then
-    cd "$oldwd"
-    rm -rf "$tmp_dir"
-    if [ "$target_mode" = "yes" -a "$build_exit" = "0" ]; then
-        adb shell rm -rf $DEX_LOCATION
-    fi
-    exit 0
-fi
-
-
 (
-    if [ "$update_mode" != "yes" ]; then
+    if [ "$good" != "yes" -a "$update_mode" != "yes" ]; then
         echo "${test_dir}: FAILED!"
         echo ' '
         echo '#################### info'
@@ -467,9 +461,33 @@
         echo '####################'
         echo ' '
     fi
-    echo "${TEST_NAME} files left in ${tmp_dir} on host"
-    if [ "$target_mode" == "yes" ]; then
-        echo "and in ${DEX_LOCATION} on target"
+
+) 1>&2
+
+# Clean up test files.
+if [ "$always_clean" = "yes" -o "$good" = "yes" ]; then
+    cd "$oldwd"
+    rm -rf "$tmp_dir"
+    if [ "$target_mode" = "yes" -a "$build_exit" = "0" ]; then
+        adb shell rm -rf $DEX_LOCATION
+    fi
+    if [ "$good" = "yes" ]; then
+        exit 0
+    fi
+fi
+
+
+(
+    if [ "$always_clean" = "yes" ]; then
+        echo "${TEST_NAME} files deleted from host "
+        if [ "$target_mode" == "yes" ]; then
+            echo "and from target"
+        fi
+    else
+        echo "${TEST_NAME} files left in ${tmp_dir} on host"
+        if [ "$target_mode" == "yes" ]; then
+            echo "and in ${DEX_LOCATION} on target"
+        fi
     fi
 
 ) 1>&2