Merge "Add support for relative patching to quick offsets."
diff --git a/Android.mk b/Android.mk
index a63c29b..a30c090 100644
--- a/Android.mk
+++ b/Android.mk
@@ -90,6 +90,8 @@
 include $(art_path)/dalvikvm/Android.mk
 include $(art_path)/tools/Android.mk
 include $(art_build_path)/Android.oat.mk
+include $(art_path)/sigchainlib/Android.mk
+
 
 
 
@@ -443,21 +445,21 @@
 use-art:
 	adb root && sleep 3
 	adb shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
 .PHONY: use-artd
 use-artd:
 	adb root && sleep 3
 	adb shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libartd.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
 .PHONY: use-dalvik
 use-dalvik:
 	adb root && sleep 3
 	adb shell stop
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libdvm.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so
 	adb shell start
 
 .PHONY: use-art-full
@@ -467,7 +469,7 @@
 	adb shell rm -rf $(ART_DALVIK_CACHE_DIR)/*
 	adb shell setprop dalvik.vm.dex2oat-flags ""
 	adb shell setprop dalvik.vm.image-dex2oat-flags ""
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
 .PHONY: use-artd-full
@@ -477,7 +479,7 @@
 	adb shell rm -rf $(ART_DALVIK_CACHE_DIR)/*
 	adb shell setprop dalvik.vm.dex2oat-flags ""
 	adb shell setprop dalvik.vm.image-dex2oat-flags ""
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libartd.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
 .PHONY: use-art-smart
@@ -487,7 +489,7 @@
 	adb shell rm -rf $(ART_DALVIK_CACHE_DIR)/*
 	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
 	adb shell setprop dalvik.vm.image-dex2oat-flags ""
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
 .PHONY: use-art-interpret-only
@@ -497,7 +499,7 @@
 	adb shell rm -rf $(ART_DALVIK_CACHE_DIR)/*
 	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
 	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
 .PHONY: use-art-verify-none
@@ -507,7 +509,7 @@
 	adb shell rm -rf $(ART_DALVIK_CACHE_DIR)/*
 	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=verify-none"
 	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=verify-none"
-	adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so
+	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
 ########################################################################
diff --git a/build/Android.libarttest.mk b/build/Android.libarttest.mk
index 9e5f3d6..c080928 100644
--- a/build/Android.libarttest.mk
+++ b/build/Android.libarttest.mk
@@ -16,6 +16,7 @@
 
 LIBARTTEST_COMMON_SRC_FILES := \
 	test/JniTest/jni_test.cc \
+	test/SignalTest/signaltest.cc \
 	test/ReferenceMap/stack_walk_refmap_jni.cc \
 	test/StackWalk/stack_walk_jni.cc \
 	test/UnsafeTest/unsafe_test.cc
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 35d777e..66fb608 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -88,6 +88,7 @@
   std::unique_ptr<MIRGraph> mir_graph;   // MIR container.
   std::unique_ptr<Backend> cg;           // Target-specific codegen.
   TimingLogger timings;
+  bool print_pass;                 // Do we want to print a pass or not?
 };
 
 }  // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index c3f694d..1d4a9bb 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -105,7 +105,8 @@
     arena_stack(pool),
     mir_graph(nullptr),
     cg(nullptr),
-    timings("QuickCompiler", true, false) {
+    timings("QuickCompiler", true, false),
+    print_pass(false) {
 }
 
 CompilationUnit::~CompilationUnit() {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index c0068b2..6259496 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -21,8 +21,48 @@
 
 namespace art {
 
-uint16_t LocalValueNumbering::GetFieldId(const DexFile* dex_file, uint16_t field_idx) {
-  FieldReference key = { dex_file, field_idx };
+namespace {  // anonymous namespace
+
+// Operations used for value map keys instead of actual opcode.
+static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_DIRECT;
+static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SPUT;
+static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET;
+static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IPUT;
+static constexpr uint16_t kNonAliasingIFieldOp = Instruction::IGET;
+static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_OBJECT;
+static constexpr uint16_t kArrayAccessLocOp = Instruction::APUT;
+static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
+static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
+static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_OBJECT;
+static constexpr uint16_t kAliasingArrayMemoryVersionOp = Instruction::AGET_BOOLEAN;
+static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_BYTE;
+
+}  // anonymous namespace
+
+LocalValueNumbering::LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
+    : cu_(cu),
+      last_value_(0u),
+      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+      value_map_(std::less<uint64_t>(), allocator->Adapter()),
+      global_memory_version_(0u),
+      aliasing_ifield_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_array_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+      field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
+      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+      non_aliasing_ifields_(NonAliasingIFieldKeyComparator(), allocator->Adapter()),
+      escaped_array_refs_(EscapedArrayKeyComparator(), allocator->Adapter()),
+      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+      null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
+  std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+  std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+  std::fill_n(aliasing_array_version_, kFieldTypeCount, 0u);
+}
+
+uint16_t LocalValueNumbering::GetFieldId(const MirFieldInfo& field_info) {
+  FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex() };
   auto it = field_index_map_.find(key);
   if (it != field_index_map_.end()) {
     return it->second;
@@ -32,62 +72,6 @@
   return id;
 }
 
-void LocalValueNumbering::AdvanceGlobalMemory() {
-  // See AdvanceMemoryVersion() for explanation.
-  global_memory_version_ = next_memory_version_;
-  ++next_memory_version_;
-}
-
-uint16_t LocalValueNumbering::GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
-  // See AdvanceMemoryVersion() for explanation.
-  MemoryVersionKey key = { base, field, type };
-  MemoryVersionMap::iterator it = memory_version_map_.find(key);
-  uint16_t memory_version = (it != memory_version_map_.end()) ? it->second : 0u;
-  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
-    // Check modifications by potentially aliased access.
-    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
-    auto aa_it = memory_version_map_.find(aliased_access_key);
-    if (aa_it != memory_version_map_.end() && aa_it->second > memory_version) {
-      memory_version = aa_it->second;
-    }
-    memory_version = std::max(memory_version, global_memory_version_);
-  } else if (base != NO_VALUE) {
-    // Ignore global_memory_version_ for access via unique references.
-  } else {
-    memory_version = std::max(memory_version, global_memory_version_);
-  }
-  return memory_version;
-};
-
-uint16_t LocalValueNumbering::AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
-  // When we read the same value from memory, we want to assign the same value name to it.
-  // However, we need to be careful not to assign the same value name if the memory location
-  // may have been written to between the reads. To avoid that we do "memory versioning".
-  //
-  // For each write to a memory location (instance field, static field, array element) we assign
-  // a new memory version number to the location identified by the value name of the base register,
-  // the field id and type, or "{ base, field, type }". For static fields the "base" is NO_VALUE
-  // since they are not accessed via a reference. For arrays the "field" is NO_VALUE since they
-  // don't have a field id.
-  //
-  // To account for the possibility of aliased access to the same memory location via different
-  // "base", we also store the memory version number with the key "{ NO_VALUE, field, type }"
-  // if "base" is an aliasing reference and check it in GetMemoryVersion() on reads via
-  // aliasing references. A global memory version is set for method calls as a method can
-  // potentially write to any memory location accessed via an aliasing reference.
-
-  uint16_t result = next_memory_version_;
-  ++next_memory_version_;
-  MemoryVersionKey key = { base, field, type };
-  memory_version_map_.Overwrite(key, result);
-  if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
-    // Advance memory version for aliased access.
-    MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
-    memory_version_map_.Overwrite(aliased_access_key, result);
-  }
-  return result;
-};
-
 uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
   uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
   SetOperandValue(mir->ssa_rep->defs[0], res);
@@ -97,43 +81,332 @@
   return res;
 }
 
-void LocalValueNumbering::MakeArgsAliasing(MIR* mir) {
-  for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-    uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
-    non_aliasing_refs_.erase(reg);
-  }
+bool LocalValueNumbering::IsNonAliasing(uint16_t reg) {
+  return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
 }
 
+bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) {
+  if (IsNonAliasing(reg)) {
+    return true;
+  }
+  NonAliasingIFieldKey key = { reg, field_id, type };
+  return non_aliasing_ifields_.count(key) != 0u;
+}
+
+bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) {
+  if (IsNonAliasing(reg)) {
+    return true;
+  }
+  EscapedArrayKey key = { reg, type };
+  return escaped_array_refs_.count(key) != 0u;
+}
+
+
 void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
-  if (null_checked_.find(reg) != null_checked_.end()) {
-    if (cu_->verbose) {
-      LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+  auto lb = null_checked_.lower_bound(reg);
+  if (lb != null_checked_.end() && *lb == reg) {
+    if (LIKELY(Good())) {
+      if (cu_->verbose) {
+        LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+      }
+      mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
     }
-    mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
   } else {
-    null_checked_.insert(reg);
+    null_checked_.insert(lb, reg);
   }
 }
 
 void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
-  if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
-    if (cu_->verbose) {
-      LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+  RangeCheckKey key = { array, index };
+  auto lb = range_checked_.lower_bound(key);
+  if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
+    if (LIKELY(Good())) {
+      if (cu_->verbose) {
+        LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+      }
+      mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
     }
-    mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+  } else {
+    // Mark range check completed.
+    range_checked_.insert(lb, key);
   }
-  // Use side effect to note range check completed.
-  (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
 }
 
 void LocalValueNumbering::HandlePutObject(MIR* mir) {
   // If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
   uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  non_aliasing_refs_.erase(base);
+  HandleEscapingRef(base);
+}
+
+void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
+  auto it = non_aliasing_refs_.find(base);
+  if (it != non_aliasing_refs_.end()) {
+    uint64_t iget_key = BuildKey(Instruction::IGET, base, 0u, 0u);
+    for (auto iget_it = value_map_.lower_bound(iget_key), iget_end = value_map_.end();
+        iget_it != iget_end && EqualOpAndOperand1(iget_it->first, iget_key); ++iget_it) {
+      uint16_t field_id = ExtractOperand2(iget_it->first);
+      uint16_t type = ExtractModifier(iget_it->first);
+      NonAliasingIFieldKey key = { base, field_id, type };
+      non_aliasing_ifields_.insert(key);
+    }
+    uint64_t aget_key = BuildKey(kNonAliasingArrayStartVersionOp, base, 0u, 0u);
+    auto aget_it = value_map_.lower_bound(aget_key);
+    if (aget_it != value_map_.end() && EqualOpAndOperand1(aget_key, aget_it->first)) {
+      DCHECK_EQ(ExtractOperand2(aget_it->first), kNoValue);
+      uint16_t type = ExtractModifier(aget_it->first);
+      EscapedArrayKey key = { base, type };
+      escaped_array_refs_.insert(key);
+    }
+    non_aliasing_refs_.erase(it);
+  }
+}
+
+uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
+  // uint16_t type = opcode - Instruction::AGET;
+  uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
+  HandleNullCheck(mir, array);
+  uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
+  HandleRangeCheck(mir, array, index);
+  uint16_t type = opcode - Instruction::AGET;
+  // Establish value number for loaded register.
+  uint16_t res;
+  if (IsNonAliasingArray(array, type)) {
+    // Get the start version that accounts for aliasing within the array (different index names).
+    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+    // Find the current version from the non_aliasing_array_version_map_.
+    uint16_t memory_version = start_version;
+    auto it = non_aliasing_array_version_map_.find(start_version);
+    if (it != non_aliasing_array_version_map_.end()) {
+      memory_version = it->second;
+    } else {
+      // Just use the start_version.
+    }
+    res = LookupValue(kNonAliasingArrayOp, array, index, memory_version);
+  } else {
+    // Get the memory version of aliased array accesses of this type.
+    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                          aliasing_array_version_[type], kNoValue);
+    res = LookupValue(kAliasingArrayOp, array, index, memory_version);
+  }
+  if (opcode == Instruction::AGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleAPut(MIR* mir, uint16_t opcode) {
+  int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+  int index_idx = array_idx + 1;
+  uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
+  HandleNullCheck(mir, array);
+  uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
+  HandleRangeCheck(mir, array, index);
+
+  uint16_t type = opcode - Instruction::APUT;
+  uint16_t value = (opcode == Instruction::APUT_WIDE)
+                   ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                   : GetOperandValue(mir->ssa_rep->uses[0]);
+  if (IsNonAliasing(array)) {
+    // Get the start version that accounts for aliasing within the array (different index values).
+    uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+    auto it = non_aliasing_array_version_map_.find(start_version);
+    uint16_t memory_version = start_version;
+    if (it != non_aliasing_array_version_map_.end()) {
+      memory_version = it->second;
+    }
+    // We need to take 4 values (array, index, memory_version, value) into account for bumping
+    // the memory version but the key can take only 3. Merge array and index into a location.
+    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+    // Bump the version, adding to the chain.
+    memory_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+                                 array_access_location, value);
+    non_aliasing_array_version_map_.Overwrite(start_version, memory_version);
+    StoreValue(kNonAliasingArrayOp, array, index, memory_version, value);
+  } else {
+    // Get the memory version based on global_memory_version_ and aliasing_array_version_[type].
+    uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                          aliasing_array_version_[type], kNoValue);
+    if (HasValue(kAliasingArrayOp, array, index, memory_version, value)) {
+      // This APUT can be eliminated, it stores the same value that's already in the field.
+      // TODO: Eliminate the APUT.
+      return;
+    }
+    // We need to take 4 values (array, index, memory_version, value) into account for bumping
+    // the memory version but the key can take only 3. Merge array and index into a location.
+    uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+    // Bump the version, adding to the chain.
+    uint16_t bumped_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+                                          array_access_location, value);
+    aliasing_array_version_[type] = bumped_version;
+    memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+                                 bumped_version, kNoValue);
+    StoreValue(kAliasingArrayOp, array, index, memory_version, value);
+
+    // Clear escaped array refs for this type.
+    EscapedArrayKey array_key = { type, 0u };
+    auto it = escaped_array_refs_.lower_bound(array_key), end = escaped_array_refs_.end();
+    while (it != end && it->type == type) {
+      it = escaped_array_refs_.erase(it);
+    }
+  }
+}
+
+uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
+  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+  HandleNullCheck(mir, base);
+  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  uint16_t res;
+  if (!field_info.IsResolved() || field_info.IsVolatile()) {
+    // Volatile fields always get a new memory version; field id is irrelevant.
+    // Unresolved fields may be volatile, so handle them as such to be safe.
+    // Use result s_reg - will be unique.
+    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+  } else {
+    uint16_t type = opcode - Instruction::IGET;
+    uint16_t field_id = GetFieldId(field_info);
+    if (IsNonAliasingIField(base, field_id, type)) {
+      res = LookupValue(kNonAliasingIFieldOp, base, field_id, type);
+    } else {
+      // Get the start version that accounts for aliasing with unresolved fields of the same type
+      // and make it unique for the field by including the field_id.
+      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+                                           unresolved_ifield_version_[type], field_id);
+      // Find the current version from the aliasing_ifield_version_map_.
+      uint16_t memory_version = start_version;
+      auto version_it = aliasing_ifield_version_map_.find(start_version);
+      if (version_it != aliasing_ifield_version_map_.end()) {
+        memory_version = version_it->second;
+      } else {
+        // Just use the start_version.
+      }
+      res = LookupValue(kAliasingIFieldOp, base, field_id, memory_version);
+    }
+  }
+  if (opcode == Instruction::IGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
+  uint16_t type = opcode - Instruction::IPUT;
+  int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
+  uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
+  HandleNullCheck(mir, base);
+  const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+  if (!field_info.IsResolved()) {
+    // Unresolved fields always alias with everything of the same type.
+    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+    unresolved_ifield_version_[type] =
+        LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
+
+    // Treat fields of escaped references of the same type as potentially modified.
+    NonAliasingIFieldKey key = { type, 0u, 0u };  // lowest possible key of this type.
+    auto it = non_aliasing_ifields_.lower_bound(key), end = non_aliasing_ifields_.end();
+    while (it != end && it->type == type) {
+      it = non_aliasing_ifields_.erase(it);
+    }
+  } else if (field_info.IsVolatile()) {
+    // Nothing to do, resolved volatile fields always get a new memory version anyway and
+    // can't alias with resolved non-volatile fields.
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    uint16_t value = (opcode == Instruction::IPUT_WIDE)
+                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                     : GetOperandValue(mir->ssa_rep->uses[0]);
+    if (IsNonAliasing(base)) {
+      StoreValue(kNonAliasingIFieldOp, base, field_id, type, value);
+    } else {
+      // Get the start version that accounts for aliasing with unresolved fields of the same type
+      // and make it unique for the field by including the field_id.
+      uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+                                           unresolved_ifield_version_[type], field_id);
+      // Find the old version from the aliasing_ifield_version_map_.
+      uint16_t old_version = start_version;
+      auto version_it = aliasing_ifield_version_map_.find(start_version);
+      if (version_it != aliasing_ifield_version_map_.end()) {
+        old_version = version_it->second;
+      }
+      // Check if the field currently contains the value, making this a NOP.
+      if (HasValue(kAliasingIFieldOp, base, field_id, old_version, value)) {
+        // This IPUT can be eliminated, it stores the same value that's already in the field.
+        // TODO: Eliminate the IPUT.
+        return;
+      }
+      // Bump the version, adding to the chain started by start_version.
+      uint16_t memory_version = LookupValue(kAliasingIFieldBumpVersionOp, old_version, base, value);
+      // Update the aliasing_ifield_version_map_ so that HandleIGet() can get the memory_version
+      // without knowing the values used to build the chain.
+      aliasing_ifield_version_map_.Overwrite(start_version, memory_version);
+      StoreValue(kAliasingIFieldOp, base, field_id, memory_version, value);
+
+      // Clear non-aliasing fields for this field_id.
+      NonAliasingIFieldKey field_key = { type, field_id, 0u };
+      auto it = non_aliasing_ifields_.lower_bound(field_key), end = non_aliasing_ifields_.end();
+      while (it != end && it->field_id == field_id) {
+        DCHECK_EQ(type, it->type);
+        it = non_aliasing_ifields_.erase(it);
+      }
+    }
+  }
+}
+
+uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
+  const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  uint16_t res;
+  if (!field_info.IsResolved() || field_info.IsVolatile()) {
+    // Volatile fields always get a new memory version; field id is irrelevant.
+    // Unresolved fields may be volatile, so handle them as such to be safe.
+    // Use result s_reg - will be unique.
+    res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+    // to determine the version of the field.
+    uint16_t type = opcode - Instruction::SGET;
+    res = LookupValue(kResolvedSFieldOp, field_id,
+                      unresolved_sfield_version_[type], global_memory_version_);
+  }
+  if (opcode == Instruction::SGET_WIDE) {
+    SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  } else {
+    SetOperandValue(mir->ssa_rep->defs[0], res);
+  }
+  return res;
+}
+
+void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
+  uint16_t type = opcode - Instruction::SPUT;
+  const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+  if (!field_info.IsResolved()) {
+    // Unresolved fields always alias with everything of the same type.
+    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+    unresolved_sfield_version_[type] =
+        LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
+  } else if (field_info.IsVolatile()) {
+    // Nothing to do, resolved volatile fields always get a new memory version anyway and
+    // can't alias with resolved non-volatile fields.
+  } else {
+    uint16_t field_id = GetFieldId(field_info);
+    uint16_t value = (opcode == Instruction::SPUT_WIDE)
+                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
+                     : GetOperandValue(mir->ssa_rep->uses[0]);
+    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+    // to determine the version of the field.
+    uint16_t type = opcode - Instruction::SGET;
+    StoreValue(kResolvedSFieldOp, field_id,
+               unresolved_sfield_version_[type], global_memory_version_, value);
+  }
 }
 
 uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
-  uint16_t res = NO_VALUE;
+  uint16_t res = kNoValue;
   uint16_t opcode = mir->dalvikInsn.opcode;
   switch (opcode) {
     case Instruction::NOP:
@@ -176,9 +449,14 @@
       // Nothing defined but the result will be unique and non-null.
       if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
         MarkNonAliasingNonNull(mir->next);
+        // TUNING: We could track value names stored in the array.
         // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
       }
-      MakeArgsAliasing(mir);
+      // All args escaped (if references).
+      for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+        HandleEscapingRef(reg);
+      }
       break;
 
     case Instruction::INVOKE_DIRECT:
@@ -197,8 +475,17 @@
     case Instruction::INVOKE_STATIC:
     case Instruction::INVOKE_STATIC_RANGE:
       if ((mir->optimization_flags & MIR_INLINED) == 0) {
-        AdvanceGlobalMemory();
-        MakeArgsAliasing(mir);
+        // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+        global_memory_version_ = LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
+        // Make ref args aliasing.
+        for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+          uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+          non_aliasing_refs_.erase(reg);
+        }
+        // All fields of escaped references need to be treated as potentially modified.
+        non_aliasing_ifields_.clear();
+        // Array elements may also have been modified via escaped array refs.
+        escaped_array_refs_.clear();
       }
       break;
 
@@ -211,13 +498,24 @@
       break;
     case Instruction::MOVE_EXCEPTION:
     case Instruction::NEW_INSTANCE:
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
     case Instruction::CONST_CLASS:
     case Instruction::NEW_ARRAY:
       // 1 result, treat as unique each time, use result s_reg - will be unique.
       res = MarkNonAliasingNonNull(mir);
       break;
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+      // These strings are internalized, so assign value based on the string pool index.
+      res = LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
+                        High16Bits(mir->dalvikInsn.vB), 0);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
+      null_checked_.insert(res);  // May already be there.
+      // NOTE: Hacking the contents of an internalized string via reflection is possible
+      // but the behavior is undefined. Therefore, we consider the string constant and
+      // the reference non-aliasing.
+      // TUNING: We could keep this property even if the reference "escapes".
+      non_aliasing_refs_.insert(res);  // May already be there.
+      break;
     case Instruction::MOVE_RESULT_WIDE:
       // 1 wide result, treat as unique each time, use result s_reg - will be unique.
       res = GetOperandValueWide(mir->ssa_rep->defs[0]);
@@ -255,7 +553,7 @@
     case Instruction::CONST_4:
     case Instruction::CONST_16:
       res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                        High16Bits(mir->dalvikInsn.vB >> 16), 0);
+                        High16Bits(mir->dalvikInsn.vB), 0);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
 
@@ -310,7 +608,7 @@
     case Instruction::FLOAT_TO_INT: {
         // res = op + 1 operand
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -320,8 +618,8 @@
     case Instruction::DOUBLE_TO_FLOAT:
     case Instruction::DOUBLE_TO_INT: {
         // res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -334,7 +632,7 @@
     case Instruction::NEG_DOUBLE: {
         // wide res = op + 1 wide operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -344,8 +642,8 @@
     case Instruction::INT_TO_DOUBLE:
     case Instruction::INT_TO_LONG: {
         // wide res = op + 1 operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+        res = LookupValue(opcode, operand1, kNoValue, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -356,7 +654,7 @@
         // res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -388,7 +686,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -422,7 +720,7 @@
         // wide res = op + 2 wide operands
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -435,8 +733,8 @@
     case Instruction::USHR_LONG_2ADDR: {
         // wide res = op + 1 wide operand + 1 operand
         uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValueWide(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -454,7 +752,7 @@
         // res = op + 2 operands
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -481,7 +779,7 @@
         // Same as res = op + 2 operands, except use vC as operand 2
         uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
         uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
-        res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+        res = LookupValue(opcode, operand1, operand2, kNoValue);
         SetOperandValue(mir->ssa_rep->defs[0], res);
       }
       break;
@@ -492,21 +790,8 @@
     case Instruction::AGET_BOOLEAN:
     case Instruction::AGET_BYTE:
     case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT: {
-        uint16_t type = opcode - Instruction::AGET;
-        uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, array);
-        uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
-        HandleRangeCheck(mir, array, index);
-        // Establish value number for loaded register. Note use of memory version.
-        uint16_t memory_version = GetMemoryVersion(array, NO_VALUE, type);
-        uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
-        if (opcode == Instruction::AGET_WIDE) {
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::AGET_SHORT:
+      res = HandleAGet(mir, opcode);
       break;
 
     case Instruction::APUT_OBJECT:
@@ -517,17 +802,8 @@
     case Instruction::APUT_BYTE:
     case Instruction::APUT_BOOLEAN:
     case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR: {
-        uint16_t type = opcode - Instruction::APUT;
-        int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
-        int index_idx = array_idx + 1;
-        uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
-        HandleNullCheck(mir, array);
-        uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
-        HandleRangeCheck(mir, array, index);
-        // Rev the memory version
-        AdvanceMemoryVersion(array, NO_VALUE, type);
-      }
+    case Instruction::APUT_CHAR:
+      HandleAPut(mir, opcode);
       break;
 
     case Instruction::IGET_OBJECT:
@@ -536,33 +812,8 @@
     case Instruction::IGET_BOOLEAN:
     case Instruction::IGET_BYTE:
     case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT: {
-        uint16_t type = opcode - Instruction::IGET;
-        uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, base);
-        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
-        uint16_t memory_version;
-        uint16_t field_id;
-        if (!field_info.IsResolved() || field_info.IsVolatile()) {
-          // Volatile fields always get a new memory version; field id is irrelevant.
-          // Unresolved fields may be volatile, so handle them as such to be safe.
-          field_id = 0u;
-          memory_version = next_memory_version_;
-          ++next_memory_version_;
-        } else {
-          DCHECK(field_info.IsResolved());
-          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
-          memory_version = std::max(unresolved_ifield_version_[type],
-                                    GetMemoryVersion(base, field_id, type));
-        }
-        if (opcode == Instruction::IGET_WIDE) {
-          res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          res = LookupValue(Instruction::IGET, base, field_id, memory_version);
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::IGET_SHORT:
+      res = HandleIGet(mir, opcode);
       break;
 
     case Instruction::IPUT_OBJECT:
@@ -573,24 +824,8 @@
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
     case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT: {
-        uint16_t type = opcode - Instruction::IPUT;
-        int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
-        uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
-        HandleNullCheck(mir, base);
-        const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
-        if (!field_info.IsResolved()) {
-          // Unresolved fields always alias with everything of the same type.
-          unresolved_ifield_version_[type] = next_memory_version_;
-          ++next_memory_version_;
-        } else if (field_info.IsVolatile()) {
-          // Nothing to do, resolved volatile fields always get a new memory version anyway and
-          // can't alias with resolved non-volatile fields.
-        } else {
-          AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
-                                                field_info.DeclaringFieldIndex()), type);
-        }
-      }
+    case Instruction::IPUT_SHORT:
+      HandleIPut(mir, opcode);
       break;
 
     case Instruction::SGET_OBJECT:
@@ -599,31 +834,8 @@
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT: {
-        uint16_t type = opcode - Instruction::SGET;
-        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
-        uint16_t memory_version;
-        uint16_t field_id;
-        if (!field_info.IsResolved() || field_info.IsVolatile()) {
-          // Volatile fields always get a new memory version; field id is irrelevant.
-          // Unresolved fields may be volatile, so handle them as such to be safe.
-          field_id = 0u;
-          memory_version = next_memory_version_;
-          ++next_memory_version_;
-        } else {
-          DCHECK(field_info.IsResolved());
-          field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
-          memory_version = std::max(unresolved_sfield_version_[type],
-                                    GetMemoryVersion(NO_VALUE, field_id, type));
-        }
-        if (opcode == Instruction::SGET_WIDE) {
-          res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
-          SetOperandValueWide(mir->ssa_rep->defs[0], res);
-        } else {
-          res = LookupValue(Instruction::SGET, NO_VALUE, field_id, memory_version);
-          SetOperandValue(mir->ssa_rep->defs[0], res);
-        }
-      }
+    case Instruction::SGET_SHORT:
+      res = HandleSGet(mir, opcode);
       break;
 
     case Instruction::SPUT_OBJECT:
@@ -634,21 +846,8 @@
     case Instruction::SPUT_BOOLEAN:
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT: {
-        uint16_t type = opcode - Instruction::SPUT;
-        const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
-        if (!field_info.IsResolved()) {
-          // Unresolved fields always alias with everything of the same type.
-          unresolved_sfield_version_[type] = next_memory_version_;
-          ++next_memory_version_;
-        } else if (field_info.IsVolatile()) {
-          // Nothing to do, resolved volatile fields always get a new memory version anyway and
-          // can't alias with resolved non-volatile fields.
-        } else {
-          AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
-                                                    field_info.DeclaringFieldIndex()), type);
-        }
-      }
+    case Instruction::SPUT_SHORT:
+      HandleSPut(mir, opcode);
       break;
   }
   return res;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 0c2b6a7..2a815be 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -23,15 +23,33 @@
 #include "utils/scoped_arena_allocator.h"
 #include "utils/scoped_arena_containers.h"
 
-#define NO_VALUE 0xffff
-#define ARRAY_REF 0xfffe
-
 namespace art {
 
 class DexFile;
+class MirFieldInfo;
 
 class LocalValueNumbering {
+ public:
+  LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator);
+
+  uint16_t GetValueNumber(MIR* mir);
+
+  // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
+  static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+    return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+  }
+
+  // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
+  static void operator delete(void* ptr) { UNUSED(ptr); }
+
+  // Checks that the value names didn't overflow.
+  bool Good() const {
+    return last_value_ < kNoValue;
+  }
+
  private:
+  static constexpr uint16_t kNoValue = 0xffffu;
+
   // Field types correspond to the ordering of GET/PUT instructions; this order is the same
   // for IGET, IPUT, SGET, SPUT, AGET and APUT:
   // op         0
@@ -43,7 +61,7 @@
   // op_SHORT   6
   static constexpr size_t kFieldTypeCount = 7;
 
-  // FieldReference represents either a unique resolved field or all unresolved fields together.
+  // FieldReference represents a unique resolved field.
   struct FieldReference {
     const DexFile* dex_file;
     uint16_t field_idx;
@@ -58,48 +76,107 @@
     }
   };
 
-  struct MemoryVersionKey {
+  // Maps field key to field id for resolved fields.
+  typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+
+  struct RangeCheckKey {
+    uint16_t array;
+    uint16_t index;
+  };
+
+  struct RangeCheckKeyComparator {
+    bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
+      if (lhs.array != rhs.array) {
+        return lhs.array < rhs.array;
+      }
+      return lhs.index < rhs.index;
+    }
+  };
+
+  typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
+
+  typedef ScopedArenaSafeMap<uint16_t, uint16_t> AliasingIFieldVersionMap;
+  typedef ScopedArenaSafeMap<uint16_t, uint16_t> NonAliasingArrayVersionMap;
+
+  struct NonAliasingIFieldKey {
     uint16_t base;
     uint16_t field_id;
     uint16_t type;
   };
 
-  struct MemoryVersionKeyComparator {
-    bool operator()(const MemoryVersionKey& lhs, const MemoryVersionKey& rhs) const {
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
+  struct NonAliasingIFieldKeyComparator {
+    bool operator()(const NonAliasingIFieldKey& lhs, const NonAliasingIFieldKey& rhs) const {
+      // Compare the type first. This allows iterating across all the entries for a certain type
+      // as needed when we need to purge them for an unresolved field IPUT.
+      if (lhs.type != rhs.type) {
+        return lhs.type < rhs.type;
       }
+      // Compare the field second. This allows iterating across all the entries for a certain
+      // field as needed when we need to purge them for an aliasing field IPUT.
       if (lhs.field_id != rhs.field_id) {
         return lhs.field_id < rhs.field_id;
       }
-      return lhs.type < rhs.type;
+      // Compare the base last.
+      return lhs.base < rhs.base;
     }
   };
 
+  // Set of instance fields still holding non-aliased values after the base has been stored.
+  typedef ScopedArenaSet<NonAliasingIFieldKey, NonAliasingIFieldKeyComparator> NonAliasingFieldSet;
+
+  struct EscapedArrayKey {
+    uint16_t base;
+    uint16_t type;
+  };
+
+  struct EscapedArrayKeyComparator {
+    bool operator()(const EscapedArrayKey& lhs, const EscapedArrayKey& rhs) const {
+      // Compare the type first. This allows iterating across all the entries for a certain type
+      // as needed when we need to purge them for an unresolved field APUT.
+      if (lhs.type != rhs.type) {
+        return lhs.type < rhs.type;
+      }
+      // Compare the base last.
+      return lhs.base < rhs.base;
+    }
+  };
+
+  // Set of previously non-aliasing array refs that escaped.
+  typedef ScopedArenaSet<EscapedArrayKey, EscapedArrayKeyComparator> EscapedArraySet;
+
   // Key is s_reg, value is value name.
   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
   // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
   typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
   // Key represents a memory address, value is generation.
-  typedef ScopedArenaSafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator
-      > MemoryVersionMap;
-  // Maps field key to field id for resolved fields.
-  typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
   // A set of value names.
   typedef ScopedArenaSet<uint16_t> ValueNameSet;
 
- public:
-  static LocalValueNumbering* Create(CompilationUnit* cu) {
-    std::unique_ptr<ScopedArenaAllocator> allocator(ScopedArenaAllocator::Create(&cu->arena_stack));
-    void* addr = allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
-    return new(addr) LocalValueNumbering(cu, allocator.release());
-  }
-
   static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
             static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
   };
 
+  static uint16_t ExtractOp(uint64_t key) {
+    return static_cast<uint16_t>(key >> 48);
+  }
+
+  static uint16_t ExtractOperand1(uint64_t key) {
+    return static_cast<uint16_t>(key >> 32);
+  }
+
+  static uint16_t ExtractOperand2(uint64_t key) {
+    return static_cast<uint16_t>(key >> 16);
+  }
+
+  static uint16_t ExtractModifier(uint64_t key) {
+    return static_cast<uint16_t>(key);
+  }
+
+  static bool EqualOpAndOperand1(uint64_t key1, uint64_t key2) {
+    return static_cast<uint32_t>(key1 >> 32) == static_cast<uint32_t>(key2 >> 32);
+  }
+
   uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     uint16_t res;
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
@@ -107,12 +184,26 @@
     if (it != value_map_.end()) {
       res = it->second;
     } else {
-      res = value_map_.size() + 1;
+      ++last_value_;
+      res = last_value_;
       value_map_.Put(key, res);
     }
     return res;
   };
 
+  void StoreValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+                  uint16_t value) {
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    value_map_.Overwrite(key, value);
+  }
+
+  bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+                uint16_t value) const {
+    uint64_t key = BuildKey(op, operand1, operand2, modifier);
+    ValueMap::const_iterator it = value_map_.find(key);
+    return (it != value_map_.end() && it->second == value);
+  };
+
   bool ValueExists(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
     ValueMap::const_iterator it = value_map_.find(key);
@@ -129,13 +220,13 @@
   };
 
   uint16_t GetOperandValue(int s_reg) {
-    uint16_t res = NO_VALUE;
+    uint16_t res = kNoValue;
     SregValueMap::iterator it = sreg_value_map_.find(s_reg);
     if (it != sreg_value_map_.end()) {
       res = it->second;
     } else {
       // First use
-      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
       sreg_value_map_.Put(s_reg, res);
     }
     return res;
@@ -151,63 +242,61 @@
   };
 
   uint16_t GetOperandValueWide(int s_reg) {
-    uint16_t res = NO_VALUE;
+    uint16_t res = kNoValue;
     SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
     if (it != sreg_wide_value_map_.end()) {
       res = it->second;
     } else {
       // First use
-      res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+      res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
       sreg_wide_value_map_.Put(s_reg, res);
     }
     return res;
   };
 
-  uint16_t GetValueNumber(MIR* mir);
-
-  // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
-  static void operator delete(void* ptr) { UNUSED(ptr); }
-
- private:
-  LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
-      : cu_(cu),
-        allocator_(allocator),
-        sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-        sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-        value_map_(std::less<uint64_t>(), allocator->Adapter()),
-        next_memory_version_(1u),
-        global_memory_version_(0u),
-        memory_version_map_(MemoryVersionKeyComparator(), allocator->Adapter()),
-        field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
-        non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
-        null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
-    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
-    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
-  }
-
-  uint16_t GetFieldId(const DexFile* dex_file, uint16_t field_idx);
-  void AdvanceGlobalMemory();
-  uint16_t GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
-  uint16_t AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
+  uint16_t GetFieldId(const MirFieldInfo& field_info);
   uint16_t MarkNonAliasingNonNull(MIR* mir);
-  void MakeArgsAliasing(MIR* mir);
+  bool IsNonAliasing(uint16_t reg);
+  bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type);
+  bool IsNonAliasingArray(uint16_t reg, uint16_t type);
   void HandleNullCheck(MIR* mir, uint16_t reg);
   void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
   void HandlePutObject(MIR* mir);
+  void HandleEscapingRef(uint16_t base);
+  uint16_t HandleAGet(MIR* mir, uint16_t opcode);
+  void HandleAPut(MIR* mir, uint16_t opcode);
+  uint16_t HandleIGet(MIR* mir, uint16_t opcode);
+  void HandleIPut(MIR* mir, uint16_t opcode);
+  uint16_t HandleSGet(MIR* mir, uint16_t opcode);
+  void HandleSPut(MIR* mir, uint16_t opcode);
 
   CompilationUnit* const cu_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
+
+  // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
+  // We usually don't check Good() until the end of LVN unless we're about to modify code.
+  uint32_t last_value_;
+
   SregValueMap sreg_value_map_;
   SregValueMap sreg_wide_value_map_;
   ValueMap value_map_;
-  uint16_t next_memory_version_;
+
+  // Data for dealing with memory clobbering and store/load aliasing.
   uint16_t global_memory_version_;
   uint16_t unresolved_sfield_version_[kFieldTypeCount];
   uint16_t unresolved_ifield_version_[kFieldTypeCount];
-  MemoryVersionMap memory_version_map_;
+  uint16_t aliasing_array_version_[kFieldTypeCount];
+  AliasingIFieldVersionMap aliasing_ifield_version_map_;
+  NonAliasingArrayVersionMap non_aliasing_array_version_map_;
   FieldIndexMap field_index_map_;
   // Value names of references to objects that cannot be reached through a different value name.
   ValueNameSet non_aliasing_refs_;
+  // Instance fields still holding non-aliased values after the base has escaped.
+  NonAliasingFieldSet non_aliasing_ifields_;
+  // Previously non-aliasing array refs that escaped but can still be used for non-aliasing AGET.
+  EscapedArraySet escaped_array_refs_;
+
+  // Range check and null check elimination.
+  RangeCheckSet range_checked_;
   ValueNameSet null_checked_;
 
   DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index e56e016..efc4fc8 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -40,7 +40,7 @@
 
   struct MIRDef {
     static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 3;
+    static constexpr size_t kMaxSsaUses = 4;
 
     Instruction::Code opcode;
     int64_t value;
@@ -55,6 +55,8 @@
     { opcode, value, 0u, 0, { }, 1, { reg } }
 #define DEF_CONST_WIDE(opcode, reg, value) \
     { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(opcode, reg, index) \
+    { opcode, index, 0u, 0, { }, 1, { reg } }
 #define DEF_IGET(opcode, reg, obj, field_info) \
     { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
 #define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
@@ -71,6 +73,14 @@
     { opcode, 0u, field_info, 1, { reg }, 0, { } }
 #define DEF_SPUT_WIDE(opcode, reg, field_info) \
     { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(opcode, reg, obj, idx) \
+    { opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
 #define DEF_INVOKE1(opcode, reg) \
     { opcode, 0u, 0u, 1, { reg }, 0, { } }
 #define DEF_UNIQUE_REF(opcode, reg) \
@@ -163,6 +173,7 @@
     for (size_t i = 0; i != mir_count_; ++i) {
       value_names_[i] =  lvn_->GetValueNumber(&mirs_[i]);
     }
+    EXPECT_TRUE(lvn_->Good());
   }
 
   LocalValueNumberingTest()
@@ -170,8 +181,11 @@
         cu_(&pool_),
         mir_count_(0u),
         mirs_(nullptr),
-        lvn_(LocalValueNumbering::Create(&cu_)) {
+        allocator_(),
+        lvn_() {
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    lvn_.reset(new (allocator_.get()) LocalValueNumbering(&cu_, allocator_.get()));
   }
 
   ArenaPool pool_;
@@ -180,12 +194,13 @@
   MIR* mirs_;
   std::vector<SSARepresentation> ssa_reps_;
   std::vector<uint16_t> value_names_;
+  std::unique_ptr<ScopedArenaAllocator> allocator_;
   std::unique_ptr<LocalValueNumbering> lvn_;
 };
 
-TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
+TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
   static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false }
+      { 1u, 1u, 1u, false },
   };
   static const MIRDef mirs[] = {
       DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
@@ -206,15 +221,15 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
+TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
       { 2u, 1u, 2u, false },
   };
   static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // May alias.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+      DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT_OBJECT, 1u, 11u, 0u),  // May alias.
+      DEF_IGET(Instruction::IGET_OBJECT, 2u, 10u, 0u),
       DEF_IGET(Instruction::IGET, 3u,  0u, 1u),
       DEF_IGET(Instruction::IGET, 4u,  2u, 1u),
   };
@@ -232,7 +247,7 @@
   EXPECT_EQ(mirs_[4].optimization_flags, 0u);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
+TEST_F(LocalValueNumberingTest, UniquePreserve1) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -253,7 +268,7 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
+TEST_F(LocalValueNumberingTest, UniquePreserve2) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -274,7 +289,7 @@
   EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
+TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
   };
@@ -298,7 +313,7 @@
   EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
 }
 
-TEST_F(LocalValueNumberingTest, TestVolatile) {
+TEST_F(LocalValueNumberingTest, Volatile) {
   static const IFieldDef ifields[] = {
       { 1u, 1u, 1u, false },
       { 2u, 1u, 2u, true },
@@ -322,4 +337,264 @@
   EXPECT_EQ(mirs_[3].optimization_flags, 0u);
 }
 
+TEST_F(LocalValueNumberingTest, UnresolvedIField) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },  // Resolved field #1.
+      { 2u, 1u, 2u, false },  // Resolved field #2.
+      { 3u, 0u, 0u, false },  // Unresolved field.
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 2u, 21u, 0u),             // Resolved field #1.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 21u, 1u),   // Resolved field #2.
+      DEF_IGET(Instruction::IGET, 4u, 22u, 2u),             // IGET doesn't clobber anything.
+      DEF_IGET(Instruction::IGET, 5u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 6u, 21u, 0u),             // Resolved field #1.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 7u, 21u, 1u),   // Resolved field #2.
+      DEF_IPUT(Instruction::IPUT, 8u, 22u, 2u),             // IPUT clobbers field #1 (#2 if wide).
+      DEF_IGET(Instruction::IGET, 9u, 20u, 0u),             // Resolved field #1, unique object.
+      DEF_IGET(Instruction::IGET, 10u, 21u, 0u),            // Resolved field #1, new value name.
+      DEF_IGET_WIDE(Instruction::IGET_WIDE, 11u, 21u, 1u),  // Resolved field #2.
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 12u);
+  EXPECT_EQ(value_names_[1], value_names_[5]);
+  EXPECT_EQ(value_names_[2], value_names_[6]);
+  EXPECT_EQ(value_names_[3], value_names_[7]);
+  EXPECT_EQ(value_names_[1], value_names_[9]);
+  EXPECT_NE(value_names_[2], value_names_[10]);  // This aliased with unresolved IPUT.
+  EXPECT_EQ(value_names_[3], value_names_[11]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[4].optimization_flags, 0u);
+  for (size_t i = 5u; i != mir_count_; ++i) {
+    EXPECT_EQ(mirs_[i].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  }
+}
+
+TEST_F(LocalValueNumberingTest, UnresolvedSField) {
+  static const SFieldDef sfields[] = {
+      { 1u, 1u, 1u, false },  // Resolved field #1.
+      { 2u, 1u, 2u, false },  // Resolved field #2.
+      { 3u, 0u, 0u, false },  // Unresolved field.
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(Instruction::SGET, 0u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u),  // Resolved field #2.
+      DEF_SGET(Instruction::SGET, 2u, 2u),            // SGET doesn't clobber anything.
+      DEF_SGET(Instruction::SGET, 3u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 4u, 1u),  // Resolved field #2.
+      DEF_SPUT(Instruction::SPUT, 5u, 2u),            // SPUT clobbers field #1 (#2 is wide).
+      DEF_SGET(Instruction::SGET, 6u, 0u),            // Resolved field #1.
+      DEF_SGET_WIDE(Instruction::SGET_WIDE, 7u, 1u),  // Resolved field #2.
+  };
+
+  PrepareSFields(sfields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_NE(value_names_[0], value_names_[6]);  // This aliased with unresolved IPUT.
+  EXPECT_EQ(value_names_[1], value_names_[7]);
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    EXPECT_EQ(mirs_[i].optimization_flags, 0u) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, ConstString) {
+  static const MIRDef mirs[] = {
+      DEF_CONST_STRING(Instruction::CONST_STRING, 0u, 0u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 1u, 0u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 2u, 2u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 3u, 0u),
+      DEF_INVOKE1(Instruction::INVOKE_DIRECT, 2u),
+      DEF_CONST_STRING(Instruction::CONST_STRING, 4u, 2u),
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 6u);
+  EXPECT_EQ(value_names_[1], value_names_[0]);
+  EXPECT_NE(value_names_[2], value_names_[0]);
+  EXPECT_EQ(value_names_[3], value_names_[0]);
+  EXPECT_EQ(value_names_[5], value_names_[2]);
+}
+
+TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+      { 2u, 1u, 2u, false },
+  };
+  static const SFieldDef sfields[] = {
+      { 3u, 1u, 3u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IPUT(Instruction::IPUT, 0u, 10u, 1u),
+      DEF_SPUT(Instruction::SPUT, 0u, 0u),
+      DEF_APUT(Instruction::APUT, 0u, 11u, 12u),
+      DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 2u, 10u, 1u),
+      DEF_AGET(Instruction::AGET, 3u, 11u, 12u),
+      DEF_SGET(Instruction::SGET, 4u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareSFields(sfields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_EQ(value_names_[4], value_names_[0]);
+  EXPECT_EQ(value_names_[5], value_names_[0]);
+  EXPECT_EQ(value_names_[6], value_names_[0]);
+  EXPECT_EQ(value_names_[7], value_names_[0]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[3].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[4].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[6].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+  EXPECT_EQ(mirs_[7].optimization_flags, 0u);
+}
+
+TEST_F(LocalValueNumberingTest, UniqueArrayAliasing) {
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+      DEF_APUT(Instruction::APUT, 2u, 20u, 41u),  // May alias with index for sreg 40u.
+      DEF_AGET(Instruction::AGET, 3u, 20u, 40u),
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 4u);
+  EXPECT_NE(value_names_[1], value_names_[3]);
+  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+}
+
+TEST_F(LocalValueNumberingTest, EscapingRefs) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },  // Field #1.
+      { 2u, 1u, 2u, false },  // Field #2.
+      { 3u, 1u, 3u, false },  // Reference field for storing escaping refs.
+      { 4u, 1u, 4u, false },  // Wide.
+      { 5u, 0u, 0u, false },  // Unresolved field, int.
+      { 6u, 0u, 0u, false },  // Unresolved field, wide.
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 2u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT_OBJECT, 20u, 30u, 2u),      // Ref escapes.
+      DEF_IGET(Instruction::IGET, 4u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 5u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT, 6u, 31u, 0u),              // May alias with field #1.
+      DEF_IGET(Instruction::IGET, 7u, 20u, 0u),              // New value.
+      DEF_IGET(Instruction::IGET, 8u, 20u, 1u),              // Still the same.
+      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u),    // No aliasing, different type.
+      DEF_IGET(Instruction::IGET, 10u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 11u, 20u, 1u),
+      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 12u, 31u, 5u),   // No aliasing, different type.
+      DEF_IGET(Instruction::IGET, 13u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 14u, 20u, 1u),
+      DEF_IPUT(Instruction::IPUT, 15u, 31u, 4u),             // Aliasing, same type.
+      DEF_IGET(Instruction::IGET, 16u, 20u, 0u),
+      DEF_IGET(Instruction::IGET, 17u, 20u, 1u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 18u);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+  EXPECT_NE(value_names_[4], value_names_[7]);  // New value.
+  EXPECT_EQ(value_names_[5], value_names_[8]);
+  EXPECT_EQ(value_names_[7], value_names_[10]);
+  EXPECT_EQ(value_names_[8], value_names_[11]);
+  EXPECT_EQ(value_names_[10], value_names_[13]);
+  EXPECT_EQ(value_names_[11], value_names_[14]);
+  EXPECT_NE(value_names_[13], value_names_[16]);  // New value.
+  EXPECT_NE(value_names_[14], value_names_[17]);  // New value.
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected = (i != 0u && i != 3u && i != 6u) ? MIR_IGNORE_NULL_CHECK : 0u;
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, EscapingArrayRefs) {
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 2u, 20u, 41u),
+      DEF_APUT(Instruction::APUT_OBJECT, 20u, 30u, 42u),    // Array ref escapes.
+      DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
+      DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u),  // No aliasing, different type.
+      DEF_AGET(Instruction::AGET, 7u, 20u, 40u),
+      DEF_AGET(Instruction::AGET, 8u, 20u, 41u),
+      DEF_APUT(Instruction::APUT, 9u, 32u, 40u),            // May alias with all elements.
+      DEF_AGET(Instruction::AGET, 10u, 20u, 40u),           // New value (same index name).
+      DEF_AGET(Instruction::AGET, 11u, 20u, 41u),           // New value (different index name).
+  };
+
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 12u);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+  EXPECT_EQ(value_names_[4], value_names_[7]);
+  EXPECT_EQ(value_names_[5], value_names_[8]);
+  EXPECT_NE(value_names_[7], value_names_[10]);  // New value.
+  EXPECT_NE(value_names_[8], value_names_[11]);  // New value.
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected =
+        ((i != 0u && i != 3u && i != 6u && i != 9u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+        ((i >= 4 && i != 6u && i != 9u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
+TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
+  static const IFieldDef ifields[] = {
+      { 1u, 1u, 1u, false },
+  };
+  static const MIRDef mirs[] = {
+      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+      DEF_IGET(Instruction::IGET, 1u, 11u, 0u),
+      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),   // Store the same value.
+      DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
+      DEF_AGET(Instruction::AGET, 4u, 12u, 40u),
+      DEF_AGET(Instruction::AGET, 5u, 13u, 40u),
+      DEF_APUT(Instruction::APUT, 5u, 13u, 40u),  // Store the same value.
+      DEF_AGET(Instruction::AGET, 7u, 12u, 40u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformLVN();
+  ASSERT_EQ(value_names_.size(), 8u);
+  EXPECT_NE(value_names_[0], value_names_[1]);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+  EXPECT_NE(value_names_[4], value_names_[5]);
+  EXPECT_EQ(value_names_[4], value_names_[7]);
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    int expected =
+        ((i == 2u || i == 3u || i == 6u || i == 7u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+        ((i == 6u || i == 7u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+  }
+}
+
 }  // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 1d4aef2..256686e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -320,9 +320,11 @@
     return true;
   }
   bool use_lvn = bb->use_lvn;
+  std::unique_ptr<ScopedArenaAllocator> allocator;
   std::unique_ptr<LocalValueNumbering> local_valnum;
   if (use_lvn) {
-    local_valnum.reset(LocalValueNumbering::Create(cu_));
+    allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+    local_valnum.reset(new (allocator.get()) LocalValueNumbering(cu_, allocator.get()));
   }
   while (bb != NULL) {
     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -550,6 +552,9 @@
     }
     bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
   }
+  if (use_lvn && UNLIKELY(!local_valnum->Good())) {
+    LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
 
   return true;
 }
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 4ce040e..b4906d6 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -89,6 +89,21 @@
     return false;
   }
 
+  static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
+    // Check if we want to log something or not.
+    if (c_unit->print_pass) {
+      // Stringify the message.
+      va_list args;
+      va_start(args, message);
+      std::string stringified_message;
+      StringAppendV(&stringified_message, message, args);
+      va_end(args);
+
+      // Log the message and ensure to include pass name.
+      LOG(INFO) << pass_name << ": " << stringified_message;
+    }
+  }
+
  protected:
   /** @brief The pass name: used for searching for a pass when running a particular pass or debugging. */
   const char* const pass_name_;
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index aa0d1ae..bd8f53c 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -141,7 +141,6 @@
     }
   }
 
- protected:
   /**
    * @brief Gets the list of passes currently schedule to execute.
    * @return pass_list_
@@ -150,14 +149,27 @@
     return pass_list_;
   }
 
-  virtual void InitializePasses() {
-    SetDefaultPasses();
+  static void SetPrintAllPasses() {
+    default_print_passes_ = true;
+  }
+
+  static void SetDumpPassList(const std::string& list) {
+    dump_pass_list_ = list;
+  }
+
+  static void SetPrintPassList(const std::string& list) {
+    print_pass_list_ = list;
   }
 
   void SetDefaultPasses() {
     pass_list_ = PassDriver<PassDriverType>::g_default_pass_list;
   }
 
+ protected:
+  virtual void InitializePasses() {
+    SetDefaultPasses();
+  }
+
   /**
    * @brief Apply a patch: perform start/work/end functions.
    */
@@ -185,6 +197,15 @@
 
   /** @brief The default pass list is used to initialize pass_list_. */
   static std::vector<const Pass*> g_default_pass_list;
+
+  /** @brief Do we, by default, want to be printing the log messages? */
+  static bool default_print_passes_;
+
+  /** @brief What are the passes we want to be printing the log messages? */
+  static std::string print_pass_list_;
+
+  /** @brief What are the passes we want to be dumping the CFG? */
+  static std::string dump_pass_list_;
 };
 
 }  // namespace art
diff --git a/compiler/dex/pass_driver_me.cc b/compiler/dex/pass_driver_me.cc
index d054500..e6d90e0 100644
--- a/compiler/dex/pass_driver_me.cc
+++ b/compiler/dex/pass_driver_me.cc
@@ -77,6 +77,19 @@
 template<>
 std::vector<const Pass*> PassDriver<PassDriverME>::g_default_pass_list(PassDriver<PassDriverME>::g_passes, PassDriver<PassDriverME>::g_passes + PassDriver<PassDriverME>::g_passes_size);
 
+// By default, do not have a dump pass list.
+template<>
+std::string PassDriver<PassDriverME>::dump_pass_list_ = std::string();
+
+// By default, do not have a print pass list.
+template<>
+std::string PassDriver<PassDriverME>::print_pass_list_ = std::string();
+
+// By default, we do not print the pass' information.
+template<>
+bool PassDriver<PassDriverME>::default_print_passes_ = false;
+
+
 PassDriverME::PassDriverME(CompilationUnit* cu)
     : PassDriver(), pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
   pass_me_data_holder_.bb = nullptr;
@@ -136,26 +149,51 @@
 
   // Check the pass gate first.
   bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
+
   if (should_apply_pass) {
+    bool old_print_pass = c_unit->print_pass;
+
+    c_unit->print_pass = default_print_passes_;
+
+    const char* print_pass_list = print_pass_list_.c_str();
+
+    if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
+      c_unit->print_pass = true;
+    }
+
     // Applying the pass: first start, doWork, and end calls.
     ApplyPass(&pass_me_data_holder_, pass);
 
     // Do we want to log it?
-    if ((c_unit->enable_debug&  (1 << kDebugDumpCFG)) != 0) {
-      // Do we have a pass folder?
-      const PassME* me_pass = (down_cast<const PassME*>(pass));
-      const char* passFolder = me_pass->GetDumpCFGFolder();
-      DCHECK(passFolder != nullptr);
+    bool should_dump = ((c_unit->enable_debug & (1 << kDebugDumpCFG)) != 0);
 
-      if (passFolder[0] != 0) {
-        // Create directory prefix.
-        std::string prefix = GetDumpCFGFolder();
-        prefix += passFolder;
-        prefix += "/";
+    const char* dump_pass_list = dump_pass_list_.c_str();
 
-        c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
+    if (dump_pass_list != nullptr) {
+      bool found = strstr(dump_pass_list, pass->GetName());
+      should_dump = (should_dump || found);
+    }
+
+    if (should_dump) {
+      // Do we want to log it?
+      if ((c_unit->enable_debug&  (1 << kDebugDumpCFG)) != 0) {
+        // Do we have a pass folder?
+        const PassME* me_pass = (down_cast<const PassME*>(pass));
+        const char* passFolder = me_pass->GetDumpCFGFolder();
+        DCHECK(passFolder != nullptr);
+
+        if (passFolder[0] != 0) {
+          // Create directory prefix.
+          std::string prefix = GetDumpCFGFolder();
+          prefix += passFolder;
+          prefix += "/";
+
+          c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
+        }
       }
     }
+
+    c_unit->print_pass = old_print_pass;
   }
 
   // If the pass gate passed, we can declare success.
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 0222447..598d05b 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -258,7 +258,6 @@
   unsigned i;
   unsigned reg_size = (is_wide) ? 64 : 32;
   uint64_t result = value & BIT_MASK(width);
-  DCHECK_NE(width, reg_size);
   for (i = width; i < reg_size; i *= 2) {
     result |= (result << i);
   }
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index eca0d2f..d0ab4f6 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -504,7 +504,7 @@
   CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
   CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
   if (EncodingMap[opcode].flags & IS_QUAD_OP) {
-    DCHECK_EQ(shift, ENCODE_NO_SHIFT);
+    DCHECK(!IsExtendEncoding(shift));
     return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
   } else {
     DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
@@ -706,40 +706,46 @@
 LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                    int scale, OpSize size) {
   LIR* load;
+  int expected_scale = 0;
   ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode wide = kA64NotWide;
-
-  DCHECK(scale == 0 || scale == 1);
 
   if (r_dest.IsFloat()) {
-    bool is_double = r_dest.IsDouble();
-    bool is_single = !is_double;
-    DCHECK_EQ(is_single, r_dest.IsSingle());
+    if (r_dest.IsDouble()) {
+      DCHECK(size == k64 || size == kDouble);
+      expected_scale = 3;
+      opcode = FWIDE(kA64Ldr4fXxG);
+    } else {
+      DCHECK(r_dest.IsSingle());
+      DCHECK(size == k32 || size == kSingle);
+      expected_scale = 2;
+      opcode = kA64Ldr4fXxG;
+    }
 
-    // If r_dest is a single, then size must be either k32 or kSingle.
-    // If r_dest is a double, then size must be either k64 or kDouble.
-    DCHECK(!is_single || size == k32 || size == kSingle);
-    DCHECK(!is_double || size == k64 || size == kDouble);
-    return NewLIR4((is_double) ? FWIDE(kA64Ldr4fXxG) : kA64Ldr4fXxG,
-                   r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    DCHECK(scale == 0 || scale == expected_scale);
+    return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                   (scale != 0) ? 1 : 0);
   }
 
   switch (size) {
     case kDouble:
     case kWord:
     case k64:
-      wide = kA64Wide;
-      // Intentional fall-trough.
+      opcode = WIDE(kA64Ldr4rXxG);
+      expected_scale = 3;
+      break;
     case kSingle:
     case k32:
     case kReference:
       opcode = kA64Ldr4rXxG;
+      expected_scale = 2;
       break;
     case kUnsignedHalf:
       opcode = kA64Ldrh4wXxd;
+      expected_scale = 1;
       break;
     case kSignedHalf:
       opcode = kA64Ldrsh4rXxd;
+      expected_scale = 1;
       break;
     case kUnsignedByte:
       opcode = kA64Ldrb3wXx;
@@ -751,13 +757,14 @@
       LOG(FATAL) << "Bad size: " << size;
   }
 
-  if (UNLIKELY((EncodingMap[opcode].flags & IS_TERTIARY_OP) != 0)) {
-    // Tertiary ops (e.g. ldrb, ldrsb) do not support scale.
+  if (UNLIKELY(expected_scale == 0)) {
+    // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
+    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
     DCHECK_EQ(scale, 0);
-    load = NewLIR3(opcode | wide, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
+    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
   } else {
-    DCHECK(scale == 0 || scale == ((wide == kA64Wide) ? 3 : 2));
-    load = NewLIR4(opcode | wide, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
+    DCHECK(scale == 0 || scale == expected_scale);
+    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
                    (scale != 0) ? 1 : 0);
   }
 
@@ -767,39 +774,43 @@
 LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                     int scale, OpSize size) {
   LIR* store;
+  int expected_scale = 0;
   ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode wide = kA64NotWide;
-
-  DCHECK(scale == 0 || scale == 1);
 
   if (r_src.IsFloat()) {
-    bool is_double = r_src.IsDouble();
-    bool is_single = !is_double;
-    DCHECK_EQ(is_single, r_src.IsSingle());
+    if (r_src.IsDouble()) {
+      DCHECK(size == k64 || size == kDouble);
+      expected_scale = 3;
+      opcode = FWIDE(kA64Str4fXxG);
+    } else {
+      DCHECK(r_src.IsSingle());
+      DCHECK(size == k32 || size == kSingle);
+      expected_scale = 2;
+      opcode = kA64Str4fXxG;
+    }
 
-    // If r_src is a single, then size must be either k32 or kSingle.
-    // If r_src is a double, then size must be either k64 or kDouble.
-    DCHECK(!is_single || size == k32 || size == kSingle);
-    DCHECK(!is_double || size == k64 || size == kDouble);
-    return NewLIR4((is_double) ? FWIDE(kA64Str4fXxG) : kA64Str4fXxG,
-                   r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    DCHECK(scale == 0 || scale == expected_scale);
+    return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                   (scale != 0) ? 1 : 0);
   }
 
   switch (size) {
     case kDouble:     // Intentional fall-trough.
     case kWord:       // Intentional fall-trough.
     case k64:
-      opcode = kA64Str4rXxG;
-      wide = kA64Wide;
+      opcode = WIDE(kA64Str4rXxG);
+      expected_scale = 3;
       break;
     case kSingle:     // Intentional fall-trough.
     case k32:         // Intentional fall-trough.
     case kReference:
       opcode = kA64Str4rXxG;
+      expected_scale = 2;
       break;
     case kUnsignedHalf:
     case kSignedHalf:
       opcode = kA64Strh4wXxd;
+      expected_scale = 1;
       break;
     case kUnsignedByte:
     case kSignedByte:
@@ -809,12 +820,14 @@
       LOG(FATAL) << "Bad size: " << size;
   }
 
-  if (UNLIKELY((EncodingMap[opcode].flags & IS_TERTIARY_OP) != 0)) {
-    // Tertiary ops (e.g. strb) do not support scale.
+  if (UNLIKELY(expected_scale == 0)) {
+    // This is a tertiary op (e.g. strb), it does not not support scale.
+    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
     DCHECK_EQ(scale, 0);
-    store = NewLIR3(opcode | wide, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
+    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
   } else {
-    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
+    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
+                    (scale != 0) ? 1 : 0);
   }
 
   return store;
@@ -842,8 +855,8 @@
         opcode = FWIDE(kA64Ldr3fXD);
         alt_opcode = FWIDE(kA64Ldur3fXd);
       } else {
-        opcode = FWIDE(kA64Ldr3rXD);
-        alt_opcode = FWIDE(kA64Ldur3rXd);
+        opcode = WIDE(kA64Ldr3rXD);
+        alt_opcode = WIDE(kA64Ldur3rXd);
       }
       break;
     case kSingle:     // Intentional fall-through.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 7e3c8ce..4f2a876 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -562,8 +562,8 @@
     RegStorage r_base;
     if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
-      RegLocation rl_method  = LoadCurrMethod();
-      r_base = AllocTemp();
+      RegLocation rl_method = LoadCurrMethod();
+      r_base = AllocTempWord();
       LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
       if (IsTemp(rl_method.reg)) {
         FreeTemp(rl_method.reg);
@@ -658,7 +658,7 @@
     if (field_info.IsReferrersClass()) {
       // Fast path, static storage base is this method's class
       RegLocation rl_method  = LoadCurrMethod();
-      r_base = AllocTemp();
+      r_base = AllocTempWord();
       LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
     } else {
       // Medium path, static storage base in a different class which requires checks that the other
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 361aba8..8d572ca 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -699,6 +699,7 @@
     virtual RegStorage AllocFreeTemp();
     virtual RegStorage AllocTemp();
     virtual RegStorage AllocTempWide();
+    virtual RegStorage AllocTempWord();
     virtual RegStorage AllocTempSingle();
     virtual RegStorage AllocTempDouble();
     virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e5ca460..59ae16e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -406,6 +406,10 @@
   return res;
 }
 
+RegStorage Mir2Lir::AllocTempWord() {
+  return (Is64BitInstructionSet(cu_->instruction_set)) ? AllocTempWide() : AllocTemp();
+}
+
 RegStorage Mir2Lir::AllocTempSingle() {
   RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, true);
   DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 2126625..4d3d664 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -923,6 +923,14 @@
     } else if (option.starts_with("--disable-passes=")) {
       std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
       PassDriverME::CreateDefaultPassList(disable_passes);
+    } else if (option.starts_with("--print-passes=")) {
+      std::string print_passes = option.substr(strlen("--print-passes=")).data();
+      PassDriverME::SetPrintPassList(print_passes);
+    } else if (option == "--print-all-passes") {
+      PassDriverME::SetPrintAllPasses();
+    } else if (option.starts_with("--dump-cfg-passes=")) {
+      std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
+      PassDriverME::SetDumpPassList(dump_passes);
     } else {
       Usage("Unknown argument %s", option.data());
     }
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c2507b1..a0648b0 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -402,11 +402,13 @@
     endif
   endif
   LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
+  LOCAL_C_INCLUDES += art/sigchainlib
+
   LOCAL_SHARED_LIBRARIES += liblog libnativehelper
   include external/libcxx/libcxx.mk
   LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
   ifeq ($$(art_target_or_host),target)
-    LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils
+    LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils libsigchain
     LOCAL_STATIC_LIBRARIES := libziparchive libz
   else # host
     LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
@@ -459,3 +461,4 @@
 ifeq ($(ART_BUILD_TARGET_DEBUG),true)
   $(eval $(call build-libart,target,debug,$(ART_TARGET_CLANG)))
 endif
+
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 8d750c5..15c38c1 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -29,12 +29,22 @@
 #include "mirror/object-inl.h"
 #include "object_utils.h"
 #include "scoped_thread_state_change.h"
+#ifdef HAVE_ANDROID_OS
+#include "sigchain.h"
+#endif
 #include "verify_object-inl.h"
 
 namespace art {
 // Static fault manger object accessed by signal handler.
 FaultManager fault_manager;
 
+extern "C" {
+void art_sigsegv_fault() {
+  // Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
+  VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
+}
+}
+
 // Signal handler called on SIGSEGV.
 static void art_fault_handler(int sig, siginfo_t* info, void* context) {
   fault_manager.HandleFault(sig, info, context);
@@ -45,9 +55,13 @@
 }
 
 FaultManager::~FaultManager() {
+#ifdef HAVE_ANDROID_OS
+  UnclaimSignalChain(SIGSEGV);
+#endif
   sigaction(SIGSEGV, &oldaction_, nullptr);   // Restore old handler.
 }
 
+
 void FaultManager::Init() {
   struct sigaction action;
   action.sa_sigaction = art_fault_handler;
@@ -56,7 +70,13 @@
 #if !defined(__mips__)
   action.sa_restorer = nullptr;
 #endif
+
+  // Set our signal handler now.
   sigaction(SIGSEGV, &action, &oldaction_);
+#ifdef HAVE_ANDROID_OS
+  // Make sure our signal handler is called before any user handlers.
+  ClaimSignalChain(SIGSEGV, &oldaction_);
+#endif
 }
 
 void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -79,8 +99,13 @@
       return;
     }
   }
-  VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler";
+  art_sigsegv_fault();
+
+#ifdef HAVE_ANDROID_OS
+  InvokeUserSignalHandler(sig, info, context);
+#else
   oldaction_.sa_sigaction(sig, info, context);
+#endif
 }
 
 void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 43331c3..c062706 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,6 +103,13 @@
       gc_barrier_(new Barrier(0)),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
       is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
+  std::string error_msg;
+  MemMap* mem_map = MemMap::MapAnonymous(
+      "mark sweep sweep array free buffer", nullptr,
+      RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
+      PROT_READ | PROT_WRITE, false, &error_msg);
+  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
+  sweep_array_free_buffer_mem_map_.reset(mem_map);
 }
 
 void MarkSweep::InitializePhase() {
@@ -1022,7 +1029,8 @@
 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   timings_.StartSplit("SweepArray");
   Thread* self = Thread::Current();
-  mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
+  mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
+      sweep_array_free_buffer_mem_map_->BaseBegin());
   size_t chunk_free_pos = 0;
   size_t freed_bytes = 0;
   size_t freed_large_object_bytes = 0;
@@ -1121,6 +1129,10 @@
   timings_.StartSplit("ResetStack");
   allocations->Reset();
   timings_.EndSplit();
+
+  int success = madvise(sweep_array_free_buffer_mem_map_->BaseBegin(),
+                        sweep_array_free_buffer_mem_map_->BaseSize(), MADV_DONTNEED);
+  DCHECK_EQ(success, 0) << "Failed to madvise the sweep array free buffer pages.";
 }
 
 void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d73bf3f..a0a0dd8 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -313,6 +313,8 @@
   // Verification.
   size_t live_stack_freeze_size_;
 
+  std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+
  private:
   friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
   friend class CardScanTask;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 42a9757..790f4d0 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -71,12 +71,16 @@
   return true;
 }
 
+template<ReadBarrierOption kReadBarrierOption>
 inline mirror::Object* IndirectReferenceTable::Get(IndirectRef iref) const {
   if (!GetChecked(iref)) {
     return kInvalidIndirectRefObject;
   }
-  mirror::Object* obj = table_[ExtractIndex(iref)];
+  mirror::Object** root = &table_[ExtractIndex(iref)];
+  mirror::Object* obj = *root;
   if (LIKELY(obj != kClearedJniWeakGlobal)) {
+    // The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
+    obj = ReadBarrier::BarrierForWeakRoot<mirror::Object, kReadBarrierOption>(root);
     VerifyObject(obj);
   }
   return obj;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 432481b..756ac96 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -266,11 +266,22 @@
 
 void IndirectReferenceTable::Dump(std::ostream& os) const {
   os << kind_ << " table dump:\n";
-  ReferenceTable::Table entries(table_, table_ + Capacity());
-  // Remove NULLs.
-  for (int i = entries.size() - 1; i >= 0; --i) {
-    if (entries[i] == NULL) {
-      entries.erase(entries.begin() + i);
+  ReferenceTable::Table entries;
+  for (size_t i = 0; i < Capacity(); ++i) {
+    mirror::Object** root = &table_[i];
+    mirror::Object* obj = *root;
+    if (UNLIKELY(obj == nullptr)) {
+      // Remove NULLs.
+    } else if (UNLIKELY(obj == kClearedJniWeakGlobal)) {
+      // ReferenceTable::Dump() will handle kClearedJniWeakGlobal
+      // while the read barrier won't.
+      entries.push_back(obj);
+    } else {
+      // We need a read barrier if weak globals. Since this is for
+      // debugging where performance isn't top priority, we
+      // unconditionally enable the read barrier, which is conservative.
+      obj = ReadBarrier::BarrierForWeakRoot<mirror::Object, kWithReadBarrier>(root);
+      entries.push_back(obj);
     }
   }
   ReferenceTable::Dump(os, entries);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5015410..5b3ed68 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -263,14 +263,16 @@
    *
    * Returns kInvalidIndirectRefObject if iref is invalid.
    */
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       ALWAYS_INLINE;
 
   // Synchronized get which reads a reference, acquiring a lock if necessary.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
                                   IndirectRef iref) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return Get(iref);
+    return Get<kReadBarrierOption>(iref);
   }
 
   /*
@@ -366,7 +368,9 @@
   std::unique_ptr<MemMap> table_mem_map_;
   // Mem map where we store the extended debugging info.
   std::unique_ptr<MemMap> slot_mem_map_;
-  /* bottom of the stack */
+  // bottom of the stack. If a JNI weak global table, do not directly
+  // access the object references in this as they are weak roots. Use
+  // Get() that has a read barrier.
   mirror::Object** table_;
   /* bit mask, ORed into all irefs */
   IndirectRefKind kind_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index b51e1d5..a660183 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2441,7 +2441,9 @@
     switch (kind) {
     case kLocal: {
       ScopedObjectAccess soa(env);
-      if (static_cast<JNIEnvExt*>(env)->locals.Get(ref) != kInvalidIndirectRefObject) {
+      // The local refs don't need a read barrier.
+      if (static_cast<JNIEnvExt*>(env)->locals.Get<kWithoutReadBarrier>(ref) !=
+          kInvalidIndirectRefObject) {
         return JNILocalRefType;
       }
       return JNIInvalidRefType;
@@ -3118,7 +3120,9 @@
   while (UNLIKELY(!allow_new_weak_globals_)) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
-  return weak_globals_.Get(ref);
+  // The weak globals do need a read barrier as they are weak roots.
+  mirror::Object* obj = weak_globals_.Get<kWithReadBarrier>(ref);
+  return obj;
 }
 
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
@@ -3298,6 +3302,7 @@
 void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
   MutexLock mu(Thread::Current(), weak_globals_lock_);
   for (mirror::Object** entry : weak_globals_) {
+    // Since this is called by the GC, we don't need a read barrier.
     mirror::Object* obj = *entry;
     mirror::Object* new_obj = callback(obj, arg);
     if (new_obj == nullptr) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 7e76e11..4072da4 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -129,6 +129,9 @@
   // TODO: Make the other members of this class also private.
   // JNI weak global references.
   Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Since weak_globals_ contain weak roots, be careful not to
+  // directly access the object references in it. Use Get() with the
+  // read barrier enabled.
   IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
   bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
   ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 8f74dd7..a0a294a 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -206,8 +206,8 @@
     if (result != nullptr) {
       for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
         const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
-        const char* descriptor = dex_file->GetClassDescriptor(class_def);
-        ScopedLocalRef<jstring> jdescriptor(env, env->NewStringUTF(descriptor));
+        std::string descriptor(DescriptorToDot(dex_file->GetClassDescriptor(class_def)));
+        ScopedLocalRef<jstring> jdescriptor(env, env->NewStringUTF(descriptor.c_str()));
         if (jdescriptor.get() == nullptr) {
           return nullptr;
         }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 23a49cb..361070c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -562,9 +562,20 @@
     GetInstrumentation()->ForceInterpretOnly();
   }
 
-  if (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
-        ParsedOptions::kExplicitNullCheck |
-        ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler) {
+  bool implicit_checks_supported = false;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      implicit_checks_supported = true;
+      break;
+    default:
+      break;
+  }
+
+  if (implicit_checks_supported &&
+      (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
+          ParsedOptions::kExplicitNullCheck |
+          ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
     fault_manager.Init();
 
     // These need to be in a specific order.  The null point check handler must be
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 190db60..bf3a15e 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -65,6 +65,9 @@
   iterator find(const K& k) { return map_.find(k); }
   const_iterator find(const K& k) const { return map_.find(k); }
 
+  iterator lower_bound(const K& k) { return map_.lower_bound(k); }
+  const_iterator lower_bound(const K& k) const { return map_.lower_bound(k); }
+
   size_type count(const K& k) const { return map_.count(k); }
 
   // Note that unlike std::map's operator[], this doesn't return a reference to the value.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 55bec1e..1355aa1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1253,7 +1253,8 @@
   // The "kinds" below are sorted by the frequency we expect to encounter them.
   if (kind == kLocal) {
     IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
-    result = locals.Get(ref);
+    // Local references do not need a read barrier.
+    result = locals.Get<kWithoutReadBarrier>(ref);
   } else if (kind == kHandleScopeOrInvalid) {
     // TODO: make stack indirect reference table lookup more efficient.
     // Check if this is a local reference in the handle scope.
@@ -1266,7 +1267,9 @@
     }
   } else if (kind == kGlobal) {
     JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
-    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
+    // Strong global references do not need a read barrier.
+    result = vm->globals.SynchronizedGet<kWithoutReadBarrier>(
+        const_cast<Thread*>(this), &vm->globals_lock, ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
diff --git a/runtime/thread.h b/runtime/thread.h
index 9a7cb48..08bbcae 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -121,7 +121,7 @@
   // of the stack (lowest memory).  The higher portion of the memory
   // is protected against reads and the lower is available for use while
   // throwing the StackOverflow exception.
-  static constexpr size_t kStackOverflowProtectedSize = 32 * KB;
+  static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
   static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
     kStackOverflowReservedBytes;
 
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
new file mode 100644
index 0000000..cb1778d
--- /dev/null
+++ b/sigchainlib/Android.mk
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+include art/build/Android.common.mk
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_MODULE:= libsigchain
+LOCAL_SHARED_LIBRARIES += liblog libdl
+LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+include $(BUILD_SHARED_LIBRARY)
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
new file mode 100644
index 0000000..26e7d31
--- /dev/null
+++ b/sigchainlib/sigchain.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/log.h>
+#include <dlfcn.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+namespace art {
+
+class SignalAction {
+ public:
+  SignalAction() : claimed_(false) {
+  }
+
+  // Claim the signal and keep the action specified.
+  void Claim(const struct sigaction& action) {
+    action_ = action;
+    claimed_ = true;
+  }
+
+  // Unclaim the signal and restore the old action.
+  void Unclaim(int signal) {
+    claimed_ = false;
+    sigaction(signal, &action_, NULL);        // Restore old action.
+  }
+
+  // Get the action associated with this signal.
+  const struct sigaction& GetAction() const {
+    return action_;
+  }
+
+  // Is the signal claimed?
+  bool IsClaimed() const {
+    return claimed_;
+  }
+
+  // Change the recorded action to that specified.
+  void SetAction(const struct sigaction& action) {
+    action_ = action;
+  }
+
+ private:
+  struct sigaction action_;     // Action to be performed.
+  bool claimed_;                // Whether signal is claimed or not.
+};
+
+// User's signal handlers
+static SignalAction user_sigactions[_NSIG];
+
+static void log(const char* format, ...) {
+  char buf[256];
+  va_list ap;
+  va_start(ap, format);
+  vsnprintf(buf, sizeof(buf), format, ap);
+  __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+  va_end(ap);
+}
+
+static void CheckSignalValid(int signal) {
+  if (signal <= 0 || signal >= _NSIG) {
+    log("Invalid signal %d", signal);
+    abort();
+  }
+}
+
+// Claim a signal chain for a particular signal.
+void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+  CheckSignalValid(signal);
+  user_sigactions[signal].Claim(*oldaction);
+}
+
+void UnclaimSignalChain(int signal) {
+  CheckSignalValid(signal);
+
+  user_sigactions[signal].Unclaim(signal);
+}
+
+// Invoke the user's signal handler.
+void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+  // Check the arguments.
+  CheckSignalValid(sig);
+
+  // The signal must have been claimed in order to get here.  Check it.
+  if (!user_sigactions[sig].IsClaimed()) {
+    abort();
+  }
+
+  const struct sigaction& action = user_sigactions[sig].GetAction();
+
+  // Only deliver the signal if the signal was not masked out.
+  if (sigismember(&action.sa_mask, sig)) {
+     return;
+  }
+  if ((action.sa_flags & SA_SIGINFO) == 0) {
+    if (action.sa_handler != NULL) {
+      action.sa_handler(sig);
+    }
+  } else {
+    if (action.sa_sigaction != NULL) {
+      action.sa_sigaction(sig, info, context);
+    }
+  }
+}
+
+extern "C" {
+// These functions are C linkage since they replace the functions in libc.
+
+int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
+  // If this signal has been claimed as a signal chain, record the user's
+  // action but don't pass it on to the kernel.
+  // Note that we check that the signal number is in range here.  An out of range signal
+  // number should behave exactly as the libc sigaction.
+  if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed()) {
+    if (old_action != NULL) {
+      *old_action = user_sigactions[signal].GetAction();
+    }
+    if (new_action != NULL) {
+      user_sigactions[signal].SetAction(*new_action);
+    }
+    return 0;
+  }
+
+  // Will only get here if the signal chain has not been claimed.  We want
+  // to pass the sigaction on to the kernel via the real sigaction in libc.
+
+  void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
+  if (linked_sigaction_sym == nullptr) {
+    log("Unable to find next sigaction in signal chain");
+    abort();
+  }
+
+  typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
+  SigAction linked_sigaction = reinterpret_cast<SigAction>(linked_sigaction_sym);
+  return linked_sigaction(signal, new_action, old_action);
+}
+
+
+int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
+  const sigset_t* new_set_ptr = bionic_new_set;
+  sigset_t tmpset;
+  if (bionic_new_set != NULL) {
+    tmpset = *bionic_new_set;
+
+    if (how == SIG_BLOCK) {
+      // Don't allow claimed signals in the mask.  If a signal chain has been claimed
+      // we can't allow the user to block that signal.
+      for (int i = 0 ; i < _NSIG; ++i) {
+        if (user_sigactions[i].IsClaimed() && sigismember(&tmpset, i)) {
+            sigdelset(&tmpset, i);
+        }
+      }
+    }
+    new_set_ptr = &tmpset;
+  }
+
+  void* linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
+  if (linked_sigprocmask_sym == nullptr) {
+    log("Unable to find next sigprocmask in signal chain");
+    abort();
+  }
+
+  typedef int (*SigProcMask)(int how, const sigset_t*, sigset_t*);
+  SigProcMask linked_sigprocmask= reinterpret_cast<SigProcMask>(linked_sigprocmask_sym);
+  return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
+}
+}   // extern "C"
+}   // namespace art
+
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
new file mode 100644
index 0000000..f6f2253
--- /dev/null
+++ b/sigchainlib/sigchain.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SIGCHAINLIB_SIGCHAIN_H_
+#define ART_SIGCHAINLIB_SIGCHAIN_H_
+
+#include <signal.h>
+namespace art {
+
+void ClaimSignalChain(int signal, struct sigaction* oldaction);
+void UnclaimSignalChain(int signal);
+void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
+
+}   // namespace art
+
+#endif  // ART_SIGCHAINLIB_SIGCHAIN_H_
diff --git a/test/071-dexfile/expected.txt b/test/071-dexfile/expected.txt
index b7af75e..d14c986 100644
--- a/test/071-dexfile/expected.txt
+++ b/test/071-dexfile/expected.txt
@@ -1,3 +1,4 @@
 Constructing another
 Got expected ULE
+Another
 done
diff --git a/test/071-dexfile/src/Main.java b/test/071-dexfile/src/Main.java
index 117a391..2f85790 100644
--- a/test/071-dexfile/src/Main.java
+++ b/test/071-dexfile/src/Main.java
@@ -17,6 +17,8 @@
 import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.Enumeration;
 
 /**
  * DexFile tests (Dalvik-specific).
@@ -24,47 +26,37 @@
 public class Main {
     private static final String CLASS_PATH = System.getenv("DEX_LOCATION") + "/071-dexfile-ex.jar";
     private static final String ODEX_DIR = System.getenv("DEX_LOCATION");
-    //private static final String ODEX_DIR = ".";
     private static final String ODEX_ALT = "/tmp";
     private static final String LIB_DIR = "/nowhere/nothing/";
 
+    private static final String getOdexDir() {
+        return new File(ODEX_DIR).isDirectory() ? ODEX_DIR : ODEX_ALT;
+    }
+
     /**
      * Prep the environment then run the test.
      */
-    public static void main(String[] args) {
-        Process p;
-        try {
-            /*
-             * Create a sub-process to see if the ProcessManager wait
-             * interferes with the dexopt invocation wait.
-             *
-             * /dev/random never hits EOF, so we're sure that we'll still
-             * be waiting for the process to complete.  On the device it
-             * stops pretty quickly (which means the child won't be
-             * spinning).
-             */
-            ProcessBuilder pb = new ProcessBuilder("cat", "/dev/random");
-            p = pb.start();
-        } catch (IOException ioe) {
-            System.err.println("cmd failed: " + ioe.getMessage());
-            p = null;
-        }
+    public static void main(String[] args) throws Exception {
+        /*
+         * Create a sub-process to see if the ProcessManager wait
+         * interferes with the dexopt invocation wait.
+         *
+         * /dev/random never hits EOF, so we're sure that we'll still
+         * be waiting for the process to complete.  On the device it
+         * stops pretty quickly (which means the child won't be
+         * spinning).
+         */
+        ProcessBuilder pb = new ProcessBuilder("cat", "/dev/random");
+        Process p = pb.start();
 
-        try {
-            testDexClassLoader();
-        } finally {
-            // shouldn't be necessary, but it's good to be tidy
-            if (p != null) {
-                p.destroy();
-            }
+        testDexClassLoader();
+        testDexFile();
 
-            // let the ProcessManager's daemon thread finish before we shut down
-            // (avoids the occasional segmentation fault)
-            try {
-                Thread.sleep(500);
-            } catch (Exception ex) {}
-        }
-
+        // shouldn't be necessary, but it's good to be tidy
+        p.destroy();
+        // let the ProcessManager's daemon thread finish before we shut down
+        // (avoids the occasional segmentation fault)
+        Thread.sleep(500);
         System.out.println("done");
     }
 
@@ -72,25 +64,10 @@
      * Create a class loader, explicitly specifying the source DEX and
      * the location for the optimized DEX.
      */
-    private static void testDexClassLoader() {
+    private static void testDexClassLoader() throws Exception {
         ClassLoader dexClassLoader = getDexClassLoader();
-
-        Class anotherClass;
-        try {
-            anotherClass = dexClassLoader.loadClass("Another");
-        } catch (ClassNotFoundException cnfe) {
-            throw new RuntimeException("Another?", cnfe);
-        }
-
-        Object another;
-        try {
-            another = anotherClass.newInstance();
-        } catch (IllegalAccessException ie) {
-            throw new RuntimeException("new another", ie);
-        } catch (InstantiationException ie) {
-            throw new RuntimeException("new another", ie);
-        }
-
+        Class Another = dexClassLoader.loadClass("Another");
+        Object another = Another.newInstance();
         // not expected to work; just exercises the call
         dexClassLoader.getResource("nonexistent");
     }
@@ -100,51 +77,30 @@
      * have visibility into dalvik.system.*, so we do this through
      * reflection.
      */
-    private static ClassLoader getDexClassLoader() {
-        String odexDir;
-
-        if (false) {
-            String androidData = System.getenv("ANDROID_DATA");
-            if (androidData == null) {
-                androidData = "";
-            }
-            odexDir = androidData + "/" + ODEX_DIR;
-        }
-
-        File test = new File(ODEX_DIR);
-        if (test.isDirectory()) {
-            odexDir = ODEX_DIR;
-        } else {
-            odexDir = ODEX_ALT;
-        }
-        if (false) {
-            System.out.println("Output dir is " + odexDir);
-        }
-
-        ClassLoader myLoader = Main.class.getClassLoader();
-        Class dclClass;
-        try {
-            dclClass = myLoader.loadClass("dalvik.system.DexClassLoader");
-        } catch (ClassNotFoundException cnfe) {
-            throw new RuntimeException("dalvik.system.DexClassLoader not found", cnfe);
-        }
-
-        Constructor ctor;
-        try {
-            ctor = dclClass.getConstructor(String.class, String.class,
-                String.class, ClassLoader.class);
-        } catch (NoSuchMethodException nsme) {
-            throw new RuntimeException("DCL ctor", nsme);
-        }
-
+    private static ClassLoader getDexClassLoader() throws Exception {
+        ClassLoader classLoader = Main.class.getClassLoader();
+        Class DexClassLoader = classLoader.loadClass("dalvik.system.DexClassLoader");
+        Constructor DexClassLoader_init = DexClassLoader.getConstructor(String.class,
+                                                                        String.class,
+                                                                        String.class,
+                                                                        ClassLoader.class);
         // create an instance, using the path we found
-        Object dclObj;
-        try {
-            dclObj = ctor.newInstance(CLASS_PATH, odexDir, LIB_DIR, myLoader);
-        } catch (Exception ex) {
-            throw new RuntimeException("DCL newInstance", ex);
-        }
+        return (ClassLoader) DexClassLoader_init.newInstance(CLASS_PATH, getOdexDir(), LIB_DIR, classLoader);
+    }
 
-        return (ClassLoader) dclObj;
+    private static void testDexFile() throws Exception {
+        ClassLoader classLoader = Main.class.getClassLoader();
+        Class DexFile = classLoader.loadClass("dalvik.system.DexFile");
+        Method DexFile_loadDex = DexFile.getMethod("loadDex",
+                                                   String.class,
+                                                   String.class,
+                                                   Integer.TYPE);
+        Method DexFile_entries = DexFile.getMethod("entries");
+        Object dexFile = DexFile_loadDex.invoke(null, CLASS_PATH, null, 0);
+        Enumeration<String> e = (Enumeration<String>) DexFile_entries.invoke(dexFile);
+        while (e.hasMoreElements()) {
+            String className = e.nextElement();
+            System.out.println(className);
+        }
     }
 }
diff --git a/test/Android.mk b/test/Android.mk
index 8caa033..c15259c 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -46,6 +46,7 @@
 	HelloWorld \
 	InterfaceTest \
 	JniTest \
+	SignalTest \
 	NativeAllocations \
 	ParallelGC \
 	ReferenceMap \
diff --git a/test/SignalTest/SignalTest.java b/test/SignalTest/SignalTest.java
new file mode 100644
index 0000000..7f15aea
--- /dev/null
+++ b/test/SignalTest/SignalTest.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class SignalTest {
+    private static native void initSignalTest();
+    private static native void terminateSignalTest();
+    private static native int testSignal();
+
+    private static void stackOverflow() {
+       stackOverflow();
+    }
+
+    public static void main(String[] args) {
+        System.loadLibrary("arttest");
+
+        System.out.println("init signal test");
+        initSignalTest();
+        try {
+            Object o = null;
+            int hash = o.hashCode();
+
+            // Should never get here.
+            System.out.println("hash: " + hash);
+            throw new AssertionError();
+        } catch (NullPointerException e) {
+            System.out.println("Caught NullPointerException");
+        }
+        try {
+            stackOverflow();
+
+            // Should never get here.
+            throw new AssertionError();
+        } catch (StackOverflowError e) {
+            System.out.println("Caught StackOverflowError");
+        }
+
+        // Test that a signal in native code works.  This will return
+        // the value 1234 if the signal is caught.
+        int x = testSignal();
+        if (x != 1234) {
+            throw new AssertionError();
+        }
+
+        terminateSignalTest();
+        System.out.println("Signal test OK");
+    }
+}
diff --git a/test/SignalTest/signaltest.cc b/test/SignalTest/signaltest.cc
new file mode 100644
index 0000000..b84e395
--- /dev/null
+++ b/test/SignalTest/signaltest.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "jni.h"
+
+#ifdef __arm__
+#include <sys/ucontext.h>
+#endif
+
+static void signalhandler(int sig, siginfo_t* info, void* context) {
+  printf("signal caught\n");
+#ifdef __arm__
+  // On ARM we do a more exhaustive test to make sure the signal
+  // context is OK.
+  // We can do this because we know that the instruction causing
+  // the signal is 2 bytes long (thumb mov instruction).  On
+  // other architectures this is more difficult.
+  // TODO: we could do this on other architectures too if necessary, it's just harder.
+  struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+  sc->arm_pc += 2;          // Skip instruction causing segv.
+#endif
+}
+
+static struct sigaction oldaction;
+
+extern "C" JNIEXPORT void JNICALL Java_SignalTest_initSignalTest(JNIEnv*, jclass) {
+  struct sigaction action;
+  action.sa_sigaction = signalhandler;
+  sigemptyset(&action.sa_mask);
+  action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+#if !defined(__mips__)
+  action.sa_restorer = nullptr;
+#endif
+
+  sigaction(SIGSEGV, &action, &oldaction);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_SignalTest_terminateSignalTest(JNIEnv*, jclass) {
+  sigaction(SIGSEGV, &oldaction, nullptr);
+}
+
+// Prevent the compiler being a smart-alec and optimizing out the assignment
+// to nullptr.
+char *p = nullptr;
+
+extern "C" JNIEXPORT jint JNICALL Java_SignalTest_testSignal(JNIEnv*, jclass) {
+#ifdef __arm__
+  // On ARM we cause a real SEGV.
+  *p = 'a';
+#else
+  // On other architectures we simulate SEGV.
+  kill(getpid(), SIGSEGV);
+#endif
+  return 1234;
+}
+
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index d95559f..5d6d16a 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -89,11 +89,6 @@
 
 msg "------------------------------"
 
-mkdir $DEX_LOCATION/dalvik-cache
-if [ $? -ne 0 ]; then
-    exit
-fi
-
 export ANDROID_PRINTF_LOG=brief
 if [ "$DEV_MODE" = "y" ]; then
     export ANDROID_LOG_TAGS='*:d'