Merge "Split .debug_info section to compilation units."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index d9d09bc..7283710 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -67,6 +67,7 @@
 ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
 ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
 ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex Nested
+ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex
 ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
 ART_GTEST_proxy_test_DEX_DEPS := Interfaces
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 33eacba..0fa4fa4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1380,7 +1380,7 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
   if (instruction->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
   }
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 966165b..53f1f3c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -194,7 +194,8 @@
 
   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
 
-  if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) {
+  if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
+      instr->IsCompare() || instr->IsBoundsCheck()) {
     // Uses aliases of ADD/SUB instructions.
     return vixl::Assembler::IsImmAddSub(value);
   } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
index c20c8a1..af93438 100644
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ b/compiler/optimizing/primitive_type_propagation.cc
@@ -65,6 +65,10 @@
         if (equivalent->IsPhi()) {
           equivalent->AsPhi()->SetLive();
           AddToWorklist(equivalent->AsPhi());
+        } else if (equivalent == input) {
+          // The input has changed its type. It can be an input of other phis,
+          // so we need to put phi users in the work list.
+          AddDependentInstructionsToWorklist(equivalent);
         }
       }
     }
@@ -117,10 +121,10 @@
   worklist_.Add(instruction);
 }
 
-void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HPhi* instruction) {
+void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
   for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
     HPhi* phi = it.Current()->GetUser()->AsPhi();
-    if (phi != nullptr && phi->IsLive()) {
+    if (phi != nullptr && phi->IsLive() && phi->GetType() != instruction->GetType()) {
       AddToWorklist(phi);
     }
   }
diff --git a/compiler/optimizing/primitive_type_propagation.h b/compiler/optimizing/primitive_type_propagation.h
index 1374cbb..6d370ed 100644
--- a/compiler/optimizing/primitive_type_propagation.h
+++ b/compiler/optimizing/primitive_type_propagation.h
@@ -33,7 +33,7 @@
   void VisitBasicBlock(HBasicBlock* block);
   void ProcessWorklist();
   void AddToWorklist(HPhi* phi);
-  void AddDependentInstructionsToWorklist(HPhi* phi);
+  void AddDependentInstructionsToWorklist(HInstruction* instruction);
   bool UpdateType(HPhi* phi);
 
   HGraph* const graph_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index eda7ec6..7e32b43 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1237,6 +1237,11 @@
       for (auto& class_path_file : class_path_files_) {
         class_path_files.push_back(class_path_file.get());
       }
+
+      // Store the classpath we have right now.
+      key_value_store_->Put(OatHeader::kClassPathKey,
+                            OatFile::EncodeDexFileDependencies(class_path_files));
+
       // Then the dex files we'll compile. Thus we'll resolve the class-path first.
       class_path_files.insert(class_path_files.end(), dex_files_.begin(), dex_files_.end());
 
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 3b3e2c9..e59ff58 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -66,13 +66,12 @@
       return fix_cortex_a53_843419_;
   }
 
-  // TODO: Tune this on a per CPU basis. For now, we pessimistically assume
-  // that all ARM64 CPUs prefer explicit memory barriers over acquire-release.
-  //
-  // NOTE: This should not be the case! However we want to exercise the
-  // explicit memory barriers code paths in the Optimizing Compiler.
+  // NOTE: This flag can be tunned on a CPU basis. In general all ARMv8 CPUs
+  // should prefer the Acquire-Release semantics over the explicit DMBs when
+  // handling load/store-volatile. For a specific use case see the ARM64
+  // Optimizing backend.
   bool PreferAcquireRelease() const {
-    return false;
+    return true;
   }
 
   virtual ~Arm64InstructionSetFeatures() {}
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 753107b..599f24e 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -31,7 +31,7 @@
   EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
   EXPECT_EQ(arm64_features->AsBitmap(), 3U);
   // See the comments in instruction_set_features_arm64.h.
-  EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
+  EXPECT_TRUE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
 }
 
 }  // namespace art
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 812ed86..0f969b9 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -165,6 +165,23 @@
 }
 
 template <class Value>
+inline void Histogram<Value>::DumpBins(std::ostream& os) const {
+  DCHECK_GT(sample_size_, 0ull);
+  bool dumped_one = false;
+  for (size_t bin_idx = 0; bin_idx < frequency_.size(); ++bin_idx) {
+    if (frequency_[bin_idx] != 0U) {
+      if (dumped_one) {
+        // Prepend a comma if not the first bin.
+        os << ",";
+      } else {
+        dumped_one = true;
+      }
+      os << GetRange(bin_idx) << ":" << frequency_[bin_idx];
+    }
+  }
+}
+
+template <class Value>
 inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double interval,
                                                        const CumulativeData& data) const {
   static constexpr size_t kFractionalDigits = 3;
@@ -249,4 +266,3 @@
 
 }  // namespace art
 #endif  // ART_RUNTIME_BASE_HISTOGRAM_INL_H_
-
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 78f6e1c..c312fb2 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -61,6 +61,7 @@
   void PrintConfidenceIntervals(std::ostream& os, double interval,
                                 const CumulativeData& data) const;
   void PrintBins(std::ostream& os, const CumulativeData& data) const;
+  void DumpBins(std::ostream& os) const;
   Value GetRange(size_t bucket_idx) const;
   size_t GetBucketCount() const;
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 935c401..4e59217 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1259,94 +1259,124 @@
   return ClassPathEntry(nullptr, nullptr);
 }
 
-mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                                       Thread* self, const char* descriptor,
-                                                       size_t hash,
-                                                       Handle<mirror::ClassLoader> class_loader) {
-  // Can we special case for a well understood PathClassLoader with the BootClassLoader as parent?
-  if (class_loader->GetClass() !=
-      soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
-      class_loader->GetParent()->GetClass() !=
-          soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
-    return nullptr;
-  }
-  ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
-  // Check if this would be found in the parent boot class loader.
-  if (pair.second != nullptr) {
-    mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
-    if (klass != nullptr) {
-      // May return null if resolution on another thread fails.
-      klass = EnsureResolved(self, descriptor, klass);
+static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                              mirror::ClassLoader* class_loader)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  return class_loader == nullptr ||
+      class_loader->GetClass() ==
+          soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader);
+}
+
+bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                             Thread* self, const char* descriptor,
+                                             size_t hash,
+                                             Handle<mirror::ClassLoader> class_loader,
+                                             mirror::Class** result) {
+  // Termination case: boot class-loader.
+  if (IsBootClassLoader(soa, class_loader.Get())) {
+    // The boot class loader, search the boot class path.
+    ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
+    if (pair.second != nullptr) {
+      mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
+      if (klass != nullptr) {
+        *result = EnsureResolved(self, descriptor, klass);
+      } else {
+        *result = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(),
+                              *pair.first, *pair.second);
+      }
+      if (*result == nullptr) {
+        CHECK(self->IsExceptionPending()) << descriptor;
+        self->ClearException();
+      }
     } else {
-      // May OOME.
-      klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
-                          *pair.second);
+      *result = nullptr;
     }
-    if (klass == nullptr) {
-      CHECK(self->IsExceptionPending()) << descriptor;
-      self->ClearException();
-    }
-    return klass;
-  } else {
-    // Handle as if this is the child PathClassLoader.
-    // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
-    StackHandleScope<3> hs(self);
-    // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
-    // We need to get the DexPathList and loop through it.
-    ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
-    ArtField* const dex_file_field =
-        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
-    mirror::Object* dex_path_list =
-        soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
-        GetObject(class_loader.Get());
-    if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
-      // DexPathList has an array dexElements of Elements[] which each contain a dex file.
-      mirror::Object* dex_elements_obj =
-          soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
-          GetObject(dex_path_list);
-      // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
-      // at the mCookie which is a DexFile vector.
-      if (dex_elements_obj != nullptr) {
-        Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
-            hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
-        for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-          mirror::Object* element = dex_elements->GetWithoutChecks(i);
-          if (element == nullptr) {
-            // Should never happen, fall back to java code to throw a NPE.
+    return true;
+  }
+
+  // Unsupported class-loader?
+  if (class_loader->GetClass() !=
+      soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader)) {
+    *result = nullptr;
+    return false;
+  }
+
+  // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
+  StackHandleScope<4> hs(self);
+  Handle<mirror::ClassLoader> h_parent(hs.NewHandle(class_loader->GetParent()));
+  bool recursive_result = FindClassInPathClassLoader(soa, self, descriptor, hash, h_parent, result);
+
+  if (!recursive_result) {
+    // Something wrong up the chain.
+    return false;
+  }
+
+  if (*result != nullptr) {
+    // Found the class up the chain.
+    return true;
+  }
+
+  // Handle this step.
+  // Handle as if this is the child PathClassLoader.
+  // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
+  // We need to get the DexPathList and loop through it.
+  ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const dex_file_field =
+      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+  mirror::Object* dex_path_list =
+      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
+      GetObject(class_loader.Get());
+  if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
+    // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+    mirror::Object* dex_elements_obj =
+        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+        GetObject(dex_path_list);
+    // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+    // at the mCookie which is a DexFile vector.
+    if (dex_elements_obj != nullptr) {
+      Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
+          hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
+      for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+        mirror::Object* element = dex_elements->GetWithoutChecks(i);
+        if (element == nullptr) {
+          // Should never happen, fall back to java code to throw a NPE.
+          break;
+        }
+        mirror::Object* dex_file = dex_file_field->GetObject(element);
+        if (dex_file != nullptr) {
+          mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+          if (long_array == nullptr) {
+            // This should never happen so log a warning.
+            LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
             break;
           }
-          mirror::Object* dex_file = dex_file_field->GetObject(element);
-          if (dex_file != nullptr) {
-            mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
-            if (long_array == nullptr) {
-              // This should never happen so log a warning.
-              LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
-              break;
-            }
-            int32_t long_array_size = long_array->GetLength();
-            for (int32_t j = 0; j < long_array_size; ++j) {
-              const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
-                  long_array->GetWithoutChecks(j)));
-              const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
-              if (dex_class_def != nullptr) {
-                RegisterDexFile(*cp_dex_file);
-                mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
-                                                   *cp_dex_file, *dex_class_def);
-                if (klass == nullptr) {
-                  CHECK(self->IsExceptionPending()) << descriptor;
-                  self->ClearException();
-                  return nullptr;
-                }
-                return klass;
+          int32_t long_array_size = long_array->GetLength();
+          for (int32_t j = 0; j < long_array_size; ++j) {
+            const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+                long_array->GetWithoutChecks(j)));
+            const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
+            if (dex_class_def != nullptr) {
+              RegisterDexFile(*cp_dex_file);
+              mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
+                                                 *cp_dex_file, *dex_class_def);
+              if (klass == nullptr) {
+                CHECK(self->IsExceptionPending()) << descriptor;
+                self->ClearException();
+                // TODO: Is it really right to break here, and not check the other dex files?
+                return true;
               }
+              *result = klass;
+              return true;
             }
           }
         }
       }
     }
     self->AssertNoPendingException();
-    return nullptr;
   }
+
+  // Result is still null from the parent call, no need to set it again...
+  return true;
 }
 
 mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
@@ -1384,10 +1414,18 @@
     }
   } else {
     ScopedObjectAccessUnchecked soa(self);
-    mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, hash,
-                                                         class_loader);
-    if (cp_klass != nullptr) {
-      return cp_klass;
+    mirror::Class* cp_klass;
+    if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+      // The chain was understood. So the value in cp_klass is either the class we were looking
+      // for, or not found.
+      if (cp_klass != nullptr) {
+        return cp_klass;
+      }
+      // TODO: We handle the boot classpath loader in FindClassInPathClassLoader. Try to unify this
+      //       and the branch above. TODO: throw the right exception here.
+
+      // We'll let the Java-side rediscover all this and throw the exception with the right stack
+      // trace.
     }
 
     if (Runtime::Current()->IsAotCompiler()) {
@@ -1927,7 +1965,6 @@
   ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr;
   for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
     CHECK_LT(i, num_sfields);
-    self->AllowThreadSuspension();
     LoadField(it, klass, &sfields[i]);
   }
   klass->SetSFields(sfields);
@@ -1938,13 +1975,14 @@
   ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr;
   for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
     CHECK_LT(i, num_ifields);
-    self->AllowThreadSuspension();
     LoadField(it, klass, &ifields[i]);
   }
   klass->SetIFields(ifields);
   klass->SetNumInstanceFields(num_ifields);
   DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
-
+  // Note: We cannot have thread suspension until the field arrays are setup or else
+  // Class::VisitFieldRoots may miss some fields.
+  self->AllowThreadSuspension();
   // Load methods.
   if (it.NumDirectMethods() != 0) {
     // TODO: append direct methods to class object
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 2427462..68624b0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -117,11 +117,15 @@
                            Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Find a class in the path class loader, loading it if necessary without using JNI. Hash
-  // function is supposed to be ComputeModifiedUtf8Hash(descriptor).
-  mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                            Thread* self, const char* descriptor, size_t hash,
-                                            Handle<mirror::ClassLoader> class_loader)
+  // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
+  // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
+  // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
+  // was encountered while walking the parent chain (currently only BootClassLoader and
+  // PathClassLoader are supported).
+  bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                  Thread* self, const char* descriptor, size_t hash,
+                                  Handle<mirror::ClassLoader> class_loader,
+                                  mirror::Class** result)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor using the "system" class loader, ie by searching the
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ed2e295..bb8d876 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -490,29 +490,21 @@
 
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
-  explicit VerifyRootVisitor(MarkSweep* collector) : collector_(collector) { }
-
   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    collector_->VerifyRoot(root, info);
-  }
-
- private:
-  MarkSweep* const collector_;
-};
-
-void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) {
-  // See if the root is on any space bitmap.
-  if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
-    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    if (large_object_space != nullptr && !large_object_space->Contains(root)) {
-      LOG(ERROR) << "Found invalid root: " << root << " " << root_info;
+    // See if the root is on any space bitmap.
+    auto* heap = Runtime::Current()->GetHeap();
+    if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
+      space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
+      if (large_object_space != nullptr && !large_object_space->Contains(root)) {
+        LOG(ERROR) << "Found invalid root: " << root << " " << info;
+      }
     }
   }
-}
+};
 
 void MarkSweep::VerifyRoots() {
-  VerifyRootVisitor visitor(this);
+  VerifyRootVisitor visitor;
   Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
 }
 
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 31cea17..fad3403 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -248,9 +248,6 @@
   // whether or not we care about pauses.
   size_t GetThreadCount(bool paused) const;
 
-  void VerifyRoot(const mirror::Object* root, const RootInfo& root_info)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
   // Push a single reference on a mark stack.
   void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 83da5a8..beaf067 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -195,7 +195,17 @@
       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
       pending_collector_transition_(nullptr),
       pending_heap_trim_(nullptr),
-      use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
+      use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+      running_collection_is_blocking_(false),
+      blocking_gc_count_(0U),
+      blocking_gc_time_(0U),
+      last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
+          (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
+      gc_count_last_window_(0U),
+      blocking_gc_count_last_window_(0U),
+      gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
+      blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
+                                        kGcCountRateMaxBucketCount) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -926,7 +936,6 @@
     total_duration += collector->GetCumulativeTimings().GetTotalNs();
     total_paused_time += collector->GetTotalPausedTimeNs();
     collector->DumpPerformanceInfo(os);
-    collector->ResetMeasurements();
   }
   uint64_t allocation_time =
       static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
@@ -940,8 +949,8 @@
   }
   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
   os << "Total number of allocations " << total_objects_allocated << "\n";
-  uint64_t total_bytes_allocated = GetBytesAllocatedEver();
-  os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+  os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
+  os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
@@ -956,10 +965,68 @@
     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
   }
   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
-  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_);
+  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
+  os << "Total GC count: " << GetGcCount() << "\n";
+  os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
+  os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
+  os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
+
+  {
+    MutexLock mu(Thread::Current(), *gc_complete_lock_);
+    if (gc_count_rate_histogram_.SampleSize() > 0U) {
+      os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+      gc_count_rate_histogram_.DumpBins(os);
+      os << "\n";
+    }
+    if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+      os << "Histogram of blocking GC count per "
+         << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+      blocking_gc_count_rate_histogram_.DumpBins(os);
+      os << "\n";
+    }
+  }
+
   BaseMutex::DumpAll(os);
 }
 
+uint64_t Heap::GetGcCount() const {
+  uint64_t gc_count = 0U;
+  for (auto& collector : garbage_collectors_) {
+    gc_count += collector->GetCumulativeTimings().GetIterations();
+  }
+  return gc_count;
+}
+
+uint64_t Heap::GetGcTime() const {
+  uint64_t gc_time = 0U;
+  for (auto& collector : garbage_collectors_) {
+    gc_time += collector->GetCumulativeTimings().GetTotalNs();
+  }
+  return gc_time;
+}
+
+uint64_t Heap::GetBlockingGcCount() const {
+  return blocking_gc_count_;
+}
+
+uint64_t Heap::GetBlockingGcTime() const {
+  return blocking_gc_time_;
+}
+
+void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
+  MutexLock mu(Thread::Current(), *gc_complete_lock_);
+  if (gc_count_rate_histogram_.SampleSize() > 0U) {
+    gc_count_rate_histogram_.DumpBins(os);
+  }
+}
+
+void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
+  MutexLock mu(Thread::Current(), *gc_complete_lock_);
+  if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+    blocking_gc_count_rate_histogram_.DumpBins(os);
+  }
+}
+
 Heap::~Heap() {
   VLOG(heap) << "Starting ~Heap()";
   STLDeleteElements(&garbage_collectors_);
@@ -2274,7 +2341,6 @@
     }
     collector_type_running_ = collector_type_;
   }
-
   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
     ++runtime->GetStats()->gc_for_alloc_count;
     ++self->GetStats()->gc_for_alloc_count;
@@ -2389,11 +2455,55 @@
   collector_type_running_ = kCollectorTypeNone;
   if (gc_type != collector::kGcTypeNone) {
     last_gc_type_ = gc_type;
+
+    // Update stats.
+    ++gc_count_last_window_;
+    if (running_collection_is_blocking_) {
+      // If the currently running collection was a blocking one,
+      // increment the counters and reset the flag.
+      ++blocking_gc_count_;
+      blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
+      ++blocking_gc_count_last_window_;
+    }
+    // Update the gc count rate histograms if due.
+    UpdateGcCountRateHistograms();
   }
+  // Reset.
+  running_collection_is_blocking_ = false;
   // Wake anyone who may have been waiting for the GC to complete.
   gc_complete_cond_->Broadcast(self);
 }
 
+void Heap::UpdateGcCountRateHistograms() {
+  // Invariant: if the time since the last update includes more than
+  // one windows, all the GC runs (if > 0) must have happened in first
+  // window because otherwise the update must have already taken place
+  // at an earlier GC run. So, we report the non-first windows with
+  // zero counts to the histograms.
+  DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+  uint64_t now = NanoTime();
+  DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
+  uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
+  uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
+  if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
+    // Record the first window.
+    gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
+    blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
+        blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
+    // Record the other windows (with zero counts).
+    for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
+      gc_count_rate_histogram_.AddValue(0);
+      blocking_gc_count_rate_histogram_.AddValue(0);
+    }
+    // Update the last update time and reset the counters.
+    last_update_time_gc_count_rate_histograms_ =
+        (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+    gc_count_last_window_ = 1;  // Include the current run.
+    blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
+  }
+  DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+}
+
 class RootMatchesObjectVisitor : public SingleRootVisitor {
  public:
   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
@@ -3003,6 +3113,14 @@
   collector::GcType last_gc_type = collector::kGcTypeNone;
   uint64_t wait_start = NanoTime();
   while (collector_type_running_ != kCollectorTypeNone) {
+    if (self != task_processor_->GetRunningThread()) {
+      // The current thread is about to wait for a currently running
+      // collection to finish. If the waiting thread is not the heap
+      // task daemon thread, the currently running collection is
+      // considered as a blocking GC.
+      running_collection_is_blocking_ = true;
+      VLOG(gc) << "Waiting for a blocking GC " << cause;
+    }
     ATRACE_BEGIN("GC: Wait For Completion");
     // We must wait, change thread state then sleep on gc_complete_cond_;
     gc_complete_cond_->Wait(self);
@@ -3015,6 +3133,13 @@
     LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
         << " for cause " << cause;
   }
+  if (self != task_processor_->GetRunningThread()) {
+    // The current thread is about to run a collection. If the thread
+    // is not the heap task daemon thread, it's considered as a
+    // blocking GC (i.e., blocking itself).
+    running_collection_is_blocking_ = true;
+    VLOG(gc) << "Starting a blocking GC " << cause;
+  }
   return last_gc_type;
 }
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 603cbfd..2f62798 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -672,6 +672,14 @@
     min_interval_homogeneous_space_compaction_by_oom_ = interval;
   }
 
+  // Helpers for android.os.Debug.getRuntimeStat().
+  uint64_t GetGcCount() const;
+  uint64_t GetGcTime() const;
+  uint64_t GetBlockingGcCount() const;
+  uint64_t GetBlockingGcTime() const;
+  void DumpGcCountRateHistogram(std::ostream& os) const;
+  void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
@@ -873,6 +881,8 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
+  void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
   // All-known continuous spaces, where objects lie within fixed bounds.
   std::vector<space::ContinuousSpace*> continuous_spaces_;
 
@@ -1156,6 +1166,28 @@
   // Whether or not we use homogeneous space compaction to avoid OOM errors.
   bool use_homogeneous_space_compaction_for_oom_;
 
+  // True if the currently running collection has made some thread wait.
+  bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
+  // The number of blocking GC runs.
+  uint64_t blocking_gc_count_;
+  // The total duration of blocking GC runs.
+  uint64_t blocking_gc_time_;
+  // The duration of the window for the GC count rate histograms.
+  static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
+  // The last time when the GC count rate histograms were updated.
+  // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
+  uint64_t last_update_time_gc_count_rate_histograms_;
+  // The running count of GC runs in the last window.
+  uint64_t gc_count_last_window_;
+  // The running count of blocking GC runs in the last window.
+  uint64_t blocking_gc_count_last_window_;
+  // The maximum number of buckets in the GC count rate histograms.
+  static constexpr size_t kGcCountRateMaxBucketCount = 200;
+  // The histogram of the number of GC invocations per window duration.
+  Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+  // The histogram of the number of blocking GC invocations per window duration.
+  Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+
   friend class CollectorTransitionTask;
   friend class collector::GarbageCollector;
   friend class collector::MarkCompact;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 5c8e4b9..a4a9d80 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -37,6 +37,15 @@
   explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
   }
 
+  ~ValgrindLargeObjectMapSpace() OVERRIDE {
+    // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
+    // freed since they are held live by the class linker.
+    MutexLock mu(Thread::Current(), lock_);
+    for (auto& m : mem_maps_) {
+      delete m.second;
+    }
+  }
+
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       OVERRIDE {
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 2ca4b3f..ef34c68 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -22,7 +22,8 @@
 namespace gc {
 
 TaskProcessor::TaskProcessor()
-    : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false) {
+    : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false),
+      running_thread_(nullptr) {
   // Piggyback off the reference processor lock level.
   cond_.reset(new ConditionVariable("Task processor condition", *lock_));
 }
@@ -96,15 +97,22 @@
   return is_running_;
 }
 
+Thread* TaskProcessor::GetRunningThread() const {
+  MutexLock mu(Thread::Current(), *lock_);
+  return running_thread_;
+}
+
 void TaskProcessor::Stop(Thread* self) {
   MutexLock mu(self, *lock_);
   is_running_ = false;
+  running_thread_ = nullptr;
   cond_->Broadcast(self);
 }
 
 void TaskProcessor::Start(Thread* self) {
   MutexLock mu(self, *lock_);
   is_running_ = true;
+  running_thread_ = self;
 }
 
 void TaskProcessor::RunAllTasks(Thread* self) {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 765f035..67e3a54 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -63,6 +63,7 @@
   bool IsRunning() const LOCKS_EXCLUDED(lock_);
   void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
       LOCKS_EXCLUDED(lock_);
+  Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
 
  private:
   class CompareByTargetRunTime {
@@ -76,6 +77,7 @@
   bool is_running_ GUARDED_BY(lock_);
   std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
   std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
+  Thread* running_thread_ GUARDED_BY(lock_);
 };
 
 }  // namespace gc
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0d3c93b..b67e9c2 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -50,7 +50,7 @@
 };
 std::ostream& operator<<(std::ostream& os, const RootType& root_type);
 
-// Only used by hprof. tid and root_type are only used by hprof.
+// Only used by hprof. thread_id_ and type_ are only used by hprof.
 class RootInfo {
  public:
   // Thread id 0 is for non thread roots.
@@ -85,12 +85,13 @@
  public:
   virtual ~RootVisitor() { }
 
-  // Single root versions, not overridable.
+  // Single root version, not overridable.
   ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     VisitRoots(&roots, 1, info);
   }
 
+  // Single root version, not overridable.
   ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (*roots != nullptr) {
@@ -161,6 +162,9 @@
   ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
+  // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
+  // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
   mutable mirror::CompressedReference<mirror::Object> root_;
 
   template <size_t kBufferSize> friend class BufferedRootVisitor;
diff --git a/runtime/handle.h b/runtime/handle.h
index 3ebb2d5..d94d875 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -70,8 +70,8 @@
     return reinterpret_cast<jobject>(reference_);
   }
 
-  StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      ALWAYS_INLINE {
+  ALWAYS_INLINE StackReference<mirror::Object>* GetReference()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return reference_;
   }
 
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 5012965..d6f9682 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -258,7 +258,10 @@
 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
   BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
   for (auto ref : *this) {
-    root_visitor.VisitRootIfNonNull(*ref);
+    if (!ref->IsNull()) {
+      root_visitor.VisitRoot(*ref);
+      DCHECK(!ref->IsNull());
+    }
   }
 }
 
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 4fb634b..fbb07e8 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -39,6 +39,7 @@
 #include "thread.h"
 #include "transaction.h"
 #include "well_known_classes.h"
+#include "zip_archive.h"
 
 namespace art {
 namespace interpreter {
@@ -123,7 +124,12 @@
 static void UnstartedClassForNameLong(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+  mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
+  if (param == nullptr) {
+    AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
+    return;
+  }
+  mirror::String* class_name = param->AsString();
   bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
   mirror::ClassLoader* class_loader =
       down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
@@ -636,6 +642,100 @@
   }
 }
 
+// This allows reading security.properties in an unstarted runtime and initialize Security.
+static void UnstartedSecurityGetSecurityPropertiesReader(
+    Thread* self,
+    ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+    JValue* result,
+    size_t arg_offset ATTRIBUTE_UNUSED)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  Runtime* runtime = Runtime::Current();
+  const std::vector<const DexFile*>& path = runtime->GetClassLinker()->GetBootClassPath();
+  std::string canonical(DexFile::GetDexCanonicalLocation(path[0]->GetLocation().c_str()));
+  mirror::String* string_data;
+
+  // Use a block to enclose the I/O and MemMap code so buffers are released early.
+  {
+    std::string error_msg;
+    std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(canonical.c_str(), &error_msg));
+    if (zip_archive.get() == nullptr) {
+      AbortTransactionOrFail(self, "Could not open zip file %s: %s", canonical.c_str(),
+                             error_msg.c_str());
+      return;
+    }
+    std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find("java/security/security.properties",
+                                                          &error_msg));
+    if (zip_entry.get() == nullptr) {
+      AbortTransactionOrFail(self, "Could not find security.properties file in %s: %s",
+                             canonical.c_str(), error_msg.c_str());
+      return;
+    }
+    std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(canonical.c_str(),
+                                                           "java/security/security.properties",
+                                                           &error_msg));
+    if (map.get() == nullptr) {
+      AbortTransactionOrFail(self, "Could not unzip security.properties file in %s: %s",
+                             canonical.c_str(), error_msg.c_str());
+      return;
+    }
+
+    uint32_t length = zip_entry->GetUncompressedLength();
+    std::unique_ptr<char[]> tmp(new char[length + 1]);
+    memcpy(tmp.get(), map->Begin(), length);
+    tmp.get()[length] = 0;  // null terminator
+
+    string_data = mirror::String::AllocFromModifiedUtf8(self, tmp.get());
+  }
+
+  if (string_data == nullptr) {
+    AbortTransactionOrFail(self, "Could not create string from file content of %s",
+                           canonical.c_str());
+    return;
+  }
+
+  // Create a StringReader.
+  StackHandleScope<3> hs(self);
+  Handle<mirror::String> h_string(hs.NewHandle(string_data));
+
+  Handle<mirror::Class> h_class(hs.NewHandle(
+      runtime->GetClassLinker()->FindClass(self,
+                                           "Ljava/io/StringReader;",
+                                           NullHandle<mirror::ClassLoader>())));
+  if (h_class.Get() == nullptr) {
+    AbortTransactionOrFail(self, "Could not find StringReader class");
+    return;
+  }
+
+  if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+    AbortTransactionOrFail(self, "Could not initialize StringReader class");
+    return;
+  }
+
+  Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
+  if (h_obj.Get() == nullptr) {
+    AbortTransactionOrFail(self, "Could not allocate StringReader object");
+    return;
+  }
+
+  mirror::ArtMethod* constructor = h_class->FindDeclaredDirectMethod("<init>",
+                                                                     "(Ljava/lang/String;)V");
+  if (constructor == nullptr) {
+    AbortTransactionOrFail(self, "Could not find StringReader constructor");
+    return;
+  }
+
+  uint32_t args[1];
+  args[0] = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_string.Get()));
+  EnterInterpreterFromInvoke(self, constructor, h_obj.Get(), args, nullptr);
+
+  if (self->IsExceptionPending()) {
+    AbortTransactionOrFail(self, "Could not run StringReader constructor");
+    return;
+  }
+
+  result->SetL(h_obj.Get());
+}
+
 static void UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
                                                   mirror::ArtMethod* method ATTRIBUTE_UNUSED,
                                                   mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -958,6 +1058,8 @@
           &UnstartedMemoryPeekEntry },
       { "void libcore.io.Memory.peekByteArray(long, byte[], int, int)",
           &UnstartedMemoryPeekArrayEntry },
+      { "java.io.Reader java.security.Security.getSecurityPropertiesReader()",
+          &UnstartedSecurityGetSecurityPropertiesReader },
   };
 
   for (auto& def : defs) {
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index a503b17..8dffee6 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -51,22 +51,24 @@
  * Fundamental types.
  *
  * ObjectId and RefTypeId must be the same size.
+ * Its OK to change MethodId and FieldId sizes as long as the size is <= 8 bytes.
+ * Note that ArtFields are 64 bit pointers on 64 bit targets. So this one must remain 8 bytes.
  */
-typedef uint32_t FieldId;     /* static or instance field */
-typedef uint32_t MethodId;    /* any kind of method, including constructors */
+typedef uint64_t FieldId;     /* static or instance field */
+typedef uint64_t MethodId;    /* any kind of method, including constructors */
 typedef uint64_t ObjectId;    /* any object (threadID, stringID, arrayID, etc) */
 typedef uint64_t RefTypeId;   /* like ObjectID, but unique for Class objects */
 typedef uint64_t FrameId;     /* short-lived stack frame ID */
 
 ObjectId ReadObjectId(const uint8_t** pBuf);
 
-static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set4BE(buf, val); }
-static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set4BE(buf, val); }
+static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set8BE(buf, val); }
+static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set8BE(buf, val); }
 static inline void SetObjectId(uint8_t* buf, ObjectId val) { return Set8BE(buf, val); }
 static inline void SetRefTypeId(uint8_t* buf, RefTypeId val) { return Set8BE(buf, val); }
 static inline void SetFrameId(uint8_t* buf, FrameId val) { return Set8BE(buf, val); }
-static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd4BE(pReply, id); }
-static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd4BE(pReply, id); }
+static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd8BE(pReply, id); }
+static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd8BE(pReply, id); }
 static inline void expandBufAddObjectId(ExpandBuf* pReply, ObjectId id) { expandBufAdd8BE(pReply, id); }
 static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expandBufAdd8BE(pReply, id); }
 static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index ccf8bff..1ec800f 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -957,7 +957,7 @@
     VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, instance_id);
     VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, field_type_id) << " "
         << Dbg::GetClassName(field_id);
-    VLOG(jdwp) << StringPrintf("  field=%#" PRIx32, field_id) << " "
+    VLOG(jdwp) << StringPrintf("  field=%#" PRIx64, field_id) << " "
         << Dbg::GetFieldName(field_id);
     VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
   }
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d0ca214..2457f14 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -38,11 +38,11 @@
 namespace JDWP {
 
 std::string DescribeField(const FieldId& field_id) {
-  return StringPrintf("%#x (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
+  return StringPrintf("%#" PRIx64 " (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
 }
 
 std::string DescribeMethod(const MethodId& method_id) {
-  return StringPrintf("%#x (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
+  return StringPrintf("%#" PRIx64 " (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
 }
 
 std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
@@ -101,8 +101,8 @@
 
   VLOG(jdwp) << StringPrintf("    --> thread_id=%#" PRIx64 " object_id=%#" PRIx64,
                              thread_id, object_id);
-  VLOG(jdwp) << StringPrintf("        class_id=%#" PRIx64 " method_id=%x %s.%s", class_id,
-                             method_id, Dbg::GetClassName(class_id).c_str(),
+  VLOG(jdwp) << StringPrintf("        class_id=%#" PRIx64 " method_id=%#" PRIx64 " %s.%s",
+                             class_id, method_id, Dbg::GetClassName(class_id).c_str(),
                              Dbg::GetMethodName(method_id).c_str());
   VLOG(jdwp) << StringPrintf("        %d args:", arg_count);
 
@@ -256,8 +256,6 @@
 
 /*
  * Respond with the sizes of the basic debugger types.
- *
- * All IDs are 8 bytes.
  */
 static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/jdwp/jdwp_request.cc b/runtime/jdwp/jdwp_request.cc
index 7b15d6d..18f40a1 100644
--- a/runtime/jdwp/jdwp_request.cc
+++ b/runtime/jdwp/jdwp_request.cc
@@ -87,13 +87,13 @@
 }
 
 FieldId Request::ReadFieldId() {
-  FieldId id = Read4BE();
+  FieldId id = Read8BE();
   VLOG(jdwp) << "    field id " << DescribeField(id);
   return id;
 }
 
 MethodId Request::ReadMethodId() {
-  MethodId id = Read4BE();
+  MethodId id = Read8BE();
   VLOG(jdwp) << "    method id " << DescribeMethod(id);
   return id;
 }
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index f4656ec..aaa66f9 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -809,18 +809,24 @@
 template<class Visitor>
 void mirror::Class::VisitFieldRoots(Visitor& visitor) {
   ArtField* const sfields = GetSFieldsUnchecked();
-  for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
-    if (kIsDebugBuild && GetStatus() != kStatusRetired) {
-      CHECK_EQ(sfields[i].GetDeclaringClass(), this);
+  // Since we visit class roots while we may be writing these fields, check against null.
+  // TODO: Is this safe for concurrent compaction?
+  if (sfields != nullptr) {
+    for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
+      if (kIsDebugBuild && IsResolved()) {
+        CHECK_EQ(sfields[i].GetDeclaringClass(), this) << GetStatus();
+      }
+      visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
     }
-    visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
   }
   ArtField* const ifields = GetIFieldsUnchecked();
-  for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
-    if (kIsDebugBuild && GetStatus() != kStatusRetired) {
-      CHECK_EQ(ifields[i].GetDeclaringClass(), this);
+  if (ifields != nullptr) {
+    for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
+      if (kIsDebugBuild && IsResolved()) {
+        CHECK_EQ(ifields[i].GetDeclaringClass(), this) << GetStatus();
+      }
+      visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
     }
-    visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
   }
 }
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 5edda8b..055be85 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -91,7 +91,7 @@
       : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
 };
 
-// Standard compressed reference used in the runtime. Used for StackRefernce and GC roots.
+// Standard compressed reference used in the runtime. Used for StackReference and GC roots.
 template<class MirrorType>
 class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
  public:
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2724d91..876e29a 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -19,6 +19,9 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <sstream>
+
+#include "base/histogram-inl.h"
 #include "class_linker.h"
 #include "common_throws.h"
 #include "debugger.h"
@@ -329,6 +332,123 @@
   env->ReleasePrimitiveArrayCritical(data, arr, 0);
 }
 
+// The runtime stat names for VMDebug.getRuntimeStat().
+enum class VMDebugRuntimeStatId {
+  kArtGcGcCount = 0,
+  kArtGcGcTime,
+  kArtGcBytesAllocated,
+  kArtGcBytesFreed,
+  kArtGcBlockingGcCount,
+  kArtGcBlockingGcTime,
+  kArtGcGcCountRateHistogram,
+  kArtGcBlockingGcCountRateHistogram,
+  kNumRuntimeStats,
+};
+
+static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId) {
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  switch (static_cast<VMDebugRuntimeStatId>(statId)) {
+    case VMDebugRuntimeStatId::kArtGcGcCount: {
+      std::string output = std::to_string(heap->GetGcCount());
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcGcTime: {
+      std::string output = std::to_string(NsToMs(heap->GetGcTime()));
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcBytesAllocated: {
+      std::string output = std::to_string(heap->GetBytesAllocatedEver());
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcBytesFreed: {
+      std::string output = std::to_string(heap->GetBytesFreedEver());
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcBlockingGcCount: {
+      std::string output = std::to_string(heap->GetBlockingGcCount());
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcBlockingGcTime: {
+      std::string output = std::to_string(NsToMs(heap->GetBlockingGcTime()));
+      return env->NewStringUTF(output.c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcGcCountRateHistogram: {
+      std::ostringstream output;
+      heap->DumpGcCountRateHistogram(output);
+      return env->NewStringUTF(output.str().c_str());
+    }
+    case VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram: {
+      std::ostringstream output;
+      heap->DumpBlockingGcCountRateHistogram(output);
+      return env->NewStringUTF(output.str().c_str());
+    }
+    default:
+      return nullptr;
+  }
+}
+
+static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
+                                std::string value) {
+  ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
+  if (jvalue.get() == nullptr) {
+    return false;
+  }
+  env->SetObjectArrayElement(result, static_cast<jint>(id), jvalue.get());
+  return true;
+}
+
+static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) {
+  jobjectArray result = env->NewObjectArray(
+      static_cast<jint>(VMDebugRuntimeStatId::kNumRuntimeStats),
+      WellKnownClasses::java_lang_String,
+      nullptr);
+  if (result == nullptr) {
+    return nullptr;
+  }
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCount,
+                           std::to_string(heap->GetGcCount()))) {
+    return nullptr;
+  }
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcTime,
+                           std::to_string(NsToMs(heap->GetGcTime())))) {
+    return nullptr;
+  }
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesAllocated,
+                           std::to_string(heap->GetBytesAllocatedEver()))) {
+    return nullptr;
+  }
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesFreed,
+                           std::to_string(heap->GetBytesFreedEver()))) {
+    return nullptr;
+  }
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCount,
+                           std::to_string(heap->GetBlockingGcCount()))) {
+    return nullptr;
+  }
+  if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcTime,
+                           std::to_string(NsToMs(heap->GetBlockingGcTime())))) {
+    return nullptr;
+  }
+  {
+    std::ostringstream output;
+    heap->DumpGcCountRateHistogram(output);
+    if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCountRateHistogram,
+                             output.str())) {
+      return nullptr;
+    }
+  }
+  {
+    std::ostringstream output;
+    heap->DumpBlockingGcCountRateHistogram(output);
+    if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram,
+                             output.str())) {
+      return nullptr;
+    }
+  }
+  return result;
+}
+
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
   NATIVE_METHOD(VMDebug, crash, "()V"),
@@ -359,6 +479,8 @@
   NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
   NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
   NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
+  NATIVE_METHOD(VMDebug, getRuntimeStatInternal, "(I)Ljava/lang/String;"),
+  NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;")
 };
 
 void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 1a6adf8..196a231 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -250,8 +250,7 @@
 
 class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
  public:
-  explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) {
-  }
+  explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
 
   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 35932e0..0c39f2b 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -44,8 +44,8 @@
   if (loader != nullptr) {
     // Try the common case.
     StackHandleScope<1> hs(soa.Self());
-    c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
-                                       hs.NewHandle(loader));
+    cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
+                                   hs.NewHandle(loader), &c);
     if (c != nullptr) {
       return soa.AddLocalReference<jclass>(c);
     }
diff --git a/runtime/oat.h b/runtime/oat.h
index de95fef..a31e09a 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -38,6 +38,7 @@
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
   static constexpr const char* kDex2OatHostKey = "dex2oat-host";
   static constexpr const char* kPicKey = "pic";
+  static constexpr const char* kClassPathKey = "classpath";
 
   static OatHeader* Create(InstructionSet instruction_set,
                            const InstructionSetFeatures* instruction_set_features,
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 81703b1..d3c4b49 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -20,6 +20,7 @@
 #include <string.h>
 #include <unistd.h>
 
+#include <cstdlib>
 #include <sstream>
 
 #include "base/bit_vector.h"
@@ -592,4 +593,90 @@
   // TODO: Check against oat_patches. b/18144996
 }
 
+static constexpr char kDexClassPathEncodingSeparator = '*';
+
+std::string OatFile::EncodeDexFileDependencies(const std::vector<const DexFile*>& dex_files) {
+  std::ostringstream out;
+
+  for (const DexFile* dex_file : dex_files) {
+    out << dex_file->GetLocation().c_str();
+    out << kDexClassPathEncodingSeparator;
+    out << dex_file->GetLocationChecksum();
+    out << kDexClassPathEncodingSeparator;
+  }
+
+  return out.str();
+}
+
+bool OatFile::CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg) {
+  if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
+    // No dependencies.
+    return true;
+  }
+
+  // Assumption: this is not performance-critical. So it's OK to do this with a std::string and
+  //             Split() instead of manual parsing of the combined char*.
+  std::vector<std::string> split;
+  Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+  if (split.size() % 2 != 0) {
+    // Expected pairs of location and checksum.
+    *msg = StringPrintf("Odd number of elements in dependency list %s", dex_dependencies);
+    return false;
+  }
+
+  for (auto it = split.begin(), end = split.end(); it != end; it += 2) {
+    std::string& location = *it;
+    std::string& checksum = *(it + 1);
+    int64_t converted = strtoll(checksum.c_str(), nullptr, 10);
+    if (converted == 0) {
+      // Conversion error.
+      *msg = StringPrintf("Conversion error for %s", checksum.c_str());
+      return false;
+    }
+
+    uint32_t dex_checksum;
+    std::string error_msg;
+    if (DexFile::GetChecksum(DexFile::GetDexCanonicalLocation(location.c_str()).c_str(),
+                             &dex_checksum,
+                             &error_msg)) {
+      if (converted != dex_checksum) {
+        *msg = StringPrintf("Checksums don't match for %s: %" PRId64 " vs %u",
+                            location.c_str(), converted, dex_checksum);
+        return false;
+      }
+    } else {
+      // Problem retrieving checksum.
+      // TODO: odex files?
+      *msg = StringPrintf("Could not retrieve checksum for %s: %s", location.c_str(),
+                          error_msg.c_str());
+      return false;
+    }
+  }
+
+  return true;
+}
+
+bool OatFile::GetDexLocationsFromDependencies(const char* dex_dependencies,
+                                              std::vector<std::string>* locations) {
+  DCHECK(locations != nullptr);
+  if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
+    return true;
+  }
+
+  // Assumption: this is not performance-critical. So it's OK to do this with a std::string and
+  //             Split() instead of manual parsing of the combined char*.
+  std::vector<std::string> split;
+  Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+  if (split.size() % 2 != 0) {
+    // Expected pairs of location and checksum.
+    return false;
+  }
+
+  for (auto it = split.begin(), end = split.end(); it != end; it += 2) {
+    locations->push_back(*it);
+  }
+
+  return true;
+}
+
 }  // namespace art
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 73a8c8e..a5d5ae8 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -248,6 +248,18 @@
   static std::string ResolveRelativeEncodedDexLocation(
       const char* abs_dex_location, const std::string& rel_dex_location);
 
+  // Create a dependency list (dex locations and checksums) for the given dex files.
+  static std::string EncodeDexFileDependencies(const std::vector<const DexFile*>& dex_files);
+
+  // Check the given dependency list against their dex files - thus the name "Static," this does
+  // not check the class-loader environment, only whether there have been file updates.
+  static bool CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg);
+
+  // Get the dex locations of a dependency list. Note: this is *not* cleaned for synthetic
+  // locations of multidex files.
+  static bool GetDexLocationsFromDependencies(const char* dex_dependencies,
+                                              std::vector<std::string>* locations);
+
  private:
   static void CheckLocation(const std::string& location);
 
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index f2213e9..a88553c 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -20,9 +20,15 @@
 
 #include <gtest/gtest.h>
 
+#include "common_runtime_test.h"
+#include "scoped_thread_state_change.h"
+
 namespace art {
 
-TEST(OatFileTest, ResolveRelativeEncodedDexLocation) {
+class OatFileTest : public CommonRuntimeTest {
+};
+
+TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
   EXPECT_EQ(std::string("/data/app/foo/base.apk"),
       OatFile::ResolveRelativeEncodedDexLocation(
         nullptr, "/data/app/foo/base.apk"));
@@ -56,4 +62,54 @@
         "/data/app/foo/base.apk", "o/base.apk"));
 }
 
+static std::vector<const DexFile*> ToConstDexFiles(
+    const std::vector<std::unique_ptr<const DexFile>>& in) {
+  std::vector<const DexFile*> ret;
+  for (auto& d : in) {
+    ret.push_back(d.get());
+  }
+  return ret;
+}
+
+TEST_F(OatFileTest, DexFileDependencies) {
+  std::string error_msg;
+
+  // No dependencies.
+  EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(nullptr, &error_msg)) << error_msg;
+  EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies("", &error_msg)) << error_msg;
+
+  // Ill-formed dependencies.
+  EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc", &error_msg));
+  EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*def", &error_msg));
+  EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*def*", &error_msg));
+
+  // Unsatisfiable dependency.
+  EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*", &error_msg));
+
+  // Load some dex files to be able to do a real test.
+  ScopedObjectAccess soa(Thread::Current());
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Main");
+  std::vector<const DexFile*> dex_files_const1 = ToConstDexFiles(dex_files1);
+  std::string encoding1 = OatFile::EncodeDexFileDependencies(dex_files_const1);
+  EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding1.c_str(), &error_msg))
+      << error_msg << " " << encoding1;
+  std::vector<std::string> split1;
+  EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding1.c_str(), &split1));
+  ASSERT_EQ(split1.size(), 1U);
+  EXPECT_EQ(split1[0], dex_files_const1[0]->GetLocation());
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+  EXPECT_GT(dex_files2.size(), 1U);
+  std::vector<const DexFile*> dex_files_const2 = ToConstDexFiles(dex_files2);
+  std::string encoding2 = OatFile::EncodeDexFileDependencies(dex_files_const2);
+  EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding2.c_str(), &error_msg))
+      << error_msg << " " << encoding2;
+  std::vector<std::string> split2;
+  EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding2.c_str(), &split2));
+  ASSERT_EQ(split2.size(), 2U);
+  EXPECT_EQ(split2[0], dex_files_const2[0]->GetLocation());
+  EXPECT_EQ(split2[1], dex_files_const2[1]->GetLocation());
+}
+
 }  // namespace art
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 58b272b..5ca51fb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1070,12 +1070,7 @@
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      // b/20040863. Temporary workaround for x86 libunwind issue.
-#if defined(__i386__) && defined(HAVE_ANDROID_OS)
-      os << "Cannot dump native stack. b/20040863.\n";
-#else
       DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
-#endif
     }
     DumpJavaStack(os);
   } else {
@@ -1391,6 +1386,8 @@
       visitor, RootInfo(kRootNativeStack, thread_id));
   for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
     for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
+      // GetReference returns a pointer to the stack reference within the handle scope. If this
+      // needs to be updated, it will be done by the root visitor.
       buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
     }
   }
@@ -2317,6 +2314,7 @@
   ReleaseLongJumpContext(context);
   for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
     visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
+    DCHECK(frame.method_ != nullptr);
     visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
                        RootInfo(kRootVMInternal, thread_id));
   }
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 4d781c3..a8db069 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,7 @@
 import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Method;
+import java.util.Map;
 
 public class Main {
     public static void main(String[] args) throws Exception {
@@ -26,6 +27,7 @@
             return;
         }
         testMethodTracing();
+        testRuntimeStat();
     }
 
     private static File createTempFile() throws Exception {
@@ -109,10 +111,108 @@
         tempFile.delete();
     }
 
+    private static void checkNumber(String s) throws Exception {
+        if (s == null) {
+            System.out.println("Got null string");
+            return;
+        }
+        long n = Long.valueOf(s);
+        if (n < 0) {
+            System.out.println("Got negative number " + n);
+        }
+    }
+
+    private static void checkHistogram(String s) throws Exception {
+        if (s == null || s.length() == 0) {
+            System.out.println("Got null or empty string");
+            return;
+        }
+        String[] buckets = s.split(",");
+        long last_key = 0;
+        for (int i = 0; i < buckets.length; ++i) {
+            String bucket = buckets[i];
+            if (bucket.length() == 0) {
+                System.out.println("Got empty bucket");
+                continue;
+            }
+            String[] kv = bucket.split(":");
+            if (kv.length != 2 || kv[0].length() == 0 || kv[1].length() == 0) {
+                System.out.println("Got bad bucket " + bucket);
+                continue;
+            }
+            long key = Long.valueOf(kv[0]);
+            long value = Long.valueOf(kv[1]);
+            if (key < 0 || value < 0) {
+                System.out.println("Got negative key or value " + bucket);
+                continue;
+            }
+            if (key < last_key) {
+                System.out.println("Got decreasing key " + bucket);
+                continue;
+            }
+            last_key = key;
+        }
+    }
+
+    private static void testRuntimeStat() throws Exception {
+        // Invoke at least one GC and wait for 20 seconds or so so we get at
+        // least one bucket in the histograms.
+        for (int i = 0; i < 20; ++i) {
+          Runtime.getRuntime().gc();
+          Thread.sleep(1000L);
+        }
+        String gc_count = VMDebug.getRuntimeStat("art.gc.gc-count");
+        String gc_time = VMDebug.getRuntimeStat("art.gc.gc-time");
+        String bytes_allocated = VMDebug.getRuntimeStat("art.gc.bytes-allocated");
+        String bytes_freed = VMDebug.getRuntimeStat("art.gc.bytes-freed");
+        String blocking_gc_count = VMDebug.getRuntimeStat("art.gc.blocking-gc-count");
+        String blocking_gc_time = VMDebug.getRuntimeStat("art.gc.blocking-gc-time");
+        String gc_count_rate_histogram = VMDebug.getRuntimeStat("art.gc.gc-count-rate-histogram");
+        String blocking_gc_count_rate_histogram =
+            VMDebug.getRuntimeStat("art.gc.blocking-gc-count-rate-histogram");
+        checkNumber(gc_count);
+        checkNumber(gc_time);
+        checkNumber(bytes_allocated);
+        checkNumber(bytes_freed);
+        checkNumber(blocking_gc_count);
+        checkNumber(blocking_gc_time);
+        checkHistogram(gc_count_rate_histogram);
+        checkHistogram(blocking_gc_count_rate_histogram);
+    }
+
+    private static void testRuntimeStats() throws Exception {
+        // Invoke at least one GC and wait for 20 seconds or so so we get at
+        // least one bucket in the histograms.
+        for (int i = 0; i < 20; ++i) {
+          Runtime.getRuntime().gc();
+          Thread.sleep(1000L);
+        }
+        Map<String, String> map = VMDebug.getRuntimeStats();
+        String gc_count = map.get("art.gc.gc-count");
+        String gc_time = map.get("art.gc.gc-time");
+        String bytes_allocated = map.get("art.gc.bytes-allocated");
+        String bytes_freed = map.get("art.gc.bytes-freed");
+        String blocking_gc_count = map.get("art.gc.blocking-gc-count");
+        String blocking_gc_time = map.get("art.gc.blocking-gc-time");
+        String gc_count_rate_histogram = map.get("art.gc.gc-count-rate-histogram");
+        String blocking_gc_count_rate_histogram =
+            map.get("art.gc.blocking-gc-count-rate-histogram");
+        checkNumber(gc_count);
+        checkNumber(gc_time);
+        checkNumber(bytes_allocated);
+        checkNumber(bytes_freed);
+        checkNumber(blocking_gc_count);
+        checkNumber(blocking_gc_time);
+        checkHistogram(gc_count_rate_histogram);
+        checkHistogram(blocking_gc_count_rate_histogram);
+    }
+
     private static class VMDebug {
         private static final Method startMethodTracingMethod;
         private static final Method stopMethodTracingMethod;
         private static final Method getMethodTracingModeMethod;
+        private static final Method getRuntimeStatMethod;
+        private static final Method getRuntimeStatsMethod;
         static {
             try {
                 Class c = Class.forName("dalvik.system.VMDebug");
@@ -120,6 +220,8 @@
                         Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
                 stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
                 getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+                getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
+                getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
             } catch (Exception e) {
                 throw new RuntimeException(e);
             }
@@ -136,5 +238,11 @@
         public static int getMethodTracingMode() throws Exception {
             return (int) getMethodTracingModeMethod.invoke(null);
         }
+        public static String getRuntimeStat(String statName) throws Exception {
+            return (String) getRuntimeStatMethod.invoke(null, statName);
+        }
+        public static Map<String, String> getRuntimeStats() throws Exception {
+            return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
+        }
     }
 }
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index 55469db..d666377 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -21,8 +21,14 @@
 public class Main {
 
     public static void main(String[] args) throws Exception {
-
         int alloc1 = 1;
+        // Setup reflection stuff before allocating to prevent OOME caused by allocations from
+        // Class.forName or getDeclaredMethod.
+        // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
+        final Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+        final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+        final Object runtime = get_runtime.invoke(null);
+        final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
         try {
             List<byte[]> l = new ArrayList<byte[]>();
             while (true) {
@@ -33,13 +39,7 @@
         } catch (OutOfMemoryError e) {
         }
         // Expand the heap to the maximum size.
-        // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
-        Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
-        Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
-        Object runtime = get_runtime.invoke(null);
-        Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
         clear_growth_limit.invoke(runtime);
-
         int alloc2 = 1;
         try {
             List<byte[]> l = new ArrayList<byte[]>();
diff --git a/test/472-type-propagation/expected.txt b/test/472-type-propagation/expected.txt
new file mode 100644
index 0000000..0b29bb1
--- /dev/null
+++ b/test/472-type-propagation/expected.txt
@@ -0,0 +1,2 @@
+4.3
+1.2
diff --git a/test/472-type-propagation/info.txt b/test/472-type-propagation/info.txt
new file mode 100644
index 0000000..b86e5a2
--- /dev/null
+++ b/test/472-type-propagation/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing's type propagation:
+If a phi requests its inputs to be of a certain type, the inputs need
+to propagate that type to their users, as those users might be phis.
diff --git a/test/472-type-propagation/src/Main.java b/test/472-type-propagation/src/Main.java
new file mode 100644
index 0000000..f9e302f
--- /dev/null
+++ b/test/472-type-propagation/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+  public static void main(String[] args) {
+    ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2 });
+    ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2, 6.8 });
+  }
+
+  public static void ssaBuilderDouble(double[] array) {
+    double x;
+    if (array.length > 3) {
+      x = array[0];
+    } else {
+      x = array[1];
+    }
+    array[2] = x;
+    System.out.println(x);
+  }
+}