Move Heap parameters to ObjPtr

Deleted some unused object dumping code.

Test: test-art-host

Bug: 31113334

Change-Id: I747220caafe6679591fd4b361d7f50383a046164
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 8a51dc2..5ec9898 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -274,7 +274,7 @@
       AbortF("field operation on NULL object: %p", java_object);
       return false;
     }
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o.Ptr())) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
       AbortF("field operation on invalid %s: %p",
              ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
@@ -782,7 +782,7 @@
       }
     }
 
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj.Ptr())) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
       AbortF("%s is an invalid %s: %p (%p)",
              what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
@@ -939,7 +939,7 @@
         ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(jc);
         if (c == nullptr) {
           *msg += "NULL";
-        } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+        } else if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c.Ptr())) {
           StringAppendF(msg, "INVALID POINTER:%p", jc);
         } else if (!c->IsClass()) {
           *msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
@@ -1108,7 +1108,7 @@
     }
 
     ObjPtr<mirror::Array> a = soa.Decode<mirror::Array>(java_array);
-    if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
+    if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a.Ptr()))) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
       AbortF("jarray is an invalid %s: %p (%p)",
              ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
@@ -1145,7 +1145,7 @@
     }
     ArtField* f = soa.DecodeField(fid);
     // TODO: Better check here.
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass())) {
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass().Ptr())) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
       AbortF("invalid jfieldID: %p", fid);
       return nullptr;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7006f70..d8a6ba9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -892,15 +892,16 @@
                                        std::vector<uint64_t>* counts) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
   heap->CollectGarbage(false);
-  std::vector<mirror::Class*> classes;
+  StackHandleScopeCollection hs(Thread::Current());
+  std::vector<Handle<mirror::Class>> classes;
   counts->clear();
   for (size_t i = 0; i < class_ids.size(); ++i) {
     JDWP::JdwpError error;
-    mirror::Class* c = DecodeClass(class_ids[i], &error);
+    ObjPtr<mirror::Class> c = DecodeClass(class_ids[i], &error);
     if (c == nullptr) {
       return error;
     }
-    classes.push_back(c);
+    classes.push_back(hs.NewHandle(c));
     counts->push_back(0);
   }
   heap->CountInstances(classes, false, &(*counts)[0]);
@@ -913,12 +914,13 @@
   // We only want reachable instances, so do a GC.
   heap->CollectGarbage(false);
   JDWP::JdwpError error;
-  mirror::Class* c = DecodeClass(class_id, &error);
+  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
   if (c == nullptr) {
     return error;
   }
-  std::vector<mirror::Object*> raw_instances;
-  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
+  std::vector<ObjPtr<mirror::Object>> raw_instances;
+  StackHandleScope<1> hs(Thread::Current());
+  Runtime::Current()->GetHeap()->GetInstances(hs.NewHandle(c), max_count, raw_instances);
   for (size_t i = 0; i < raw_instances.size(); ++i) {
     instances->push_back(gRegistry->Add(raw_instances[i]));
   }
@@ -930,11 +932,11 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   heap->CollectGarbage(false);
   JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
+  ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
   if (o == nullptr) {
     return JDWP::ERR_INVALID_OBJECT;
   }
-  std::vector<mirror::Object*> raw_instances;
+  std::vector<ObjPtr<mirror::Object>> raw_instances;
   heap->GetReferringObjects(o, max_count, raw_instances);
   for (size_t i = 0; i < raw_instances.size(); ++i) {
     referring_objects->push_back(gRegistry->Add(raw_instances[i]));
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 7c7e2da..20fa0d8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -144,7 +144,7 @@
     HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&o));
     CheckReferenceResult(h_obj, self);
   }
-  VerifyObject(o.Ptr());
+  VerifyObject(o);
   return o.Ptr();
 }
 
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 6fb74d3..f60bc0c 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -22,6 +22,7 @@
 
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "obj_ptr.h"
 #include "object_callbacks.h"
 #include "gc_root.h"
 
@@ -39,7 +40,7 @@
  public:
   virtual ~AllocationListener() {}
 
-  virtual void ObjectAllocated(Thread* self, mirror::Object** obj, size_t byte_count)
+  virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
 
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 13ebb27..d921900 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -19,6 +19,7 @@
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/stl_util.h"
+#include "obj_ptr-inl.h"
 #include "stack.h"
 
 #ifdef ART_TARGET_ANDROID
@@ -263,7 +264,7 @@
 }
 
 void AllocRecordObjectMap::RecordAllocation(Thread* self,
-                                            mirror::Object** obj,
+                                            ObjPtr<mirror::Object>* obj,
                                             size_t byte_count) {
   // Get stack trace outside of lock in case there are allocations during the stack walk.
   // b/27858645.
@@ -305,7 +306,7 @@
   trace.SetTid(self->GetTid());
 
   // Add the record.
-  Put(*obj, AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
+  Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
   DCHECK_LE(Size(), alloc_record_max_);
 }
 
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index f1f013b..c8b2b89 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -21,6 +21,7 @@
 #include <memory>
 
 #include "base/mutex.h"
+#include "obj_ptr.h"
 #include "object_callbacks.h"
 #include "gc_root.h"
 
@@ -210,7 +211,7 @@
   // Caller needs to check that it is enabled before calling since we read the stack trace before
   // checking the enabled boolean.
   void RecordAllocation(Thread* self,
-                        mirror::Object** obj,
+                        ObjPtr<mirror::Object>* obj,
                         size_t byte_count)
       REQUIRES(!Locks::alloc_tracker_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2e97172..76a478e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -296,7 +296,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (from_space_->HasAddress(ref)) {
-      Runtime::Current()->GetHeap()->DumpObject(LOG_STREAM(INFO), obj);
       LOG(FATAL) << ref << " found in from space";
     }
   }
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 00adefb..05ce9c7 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -41,7 +41,7 @@
 
 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
-                                                      mirror::Class* klass,
+                                                      ObjPtr<mirror::Class> klass,
                                                       size_t byte_count,
                                                       AllocatorType allocator,
                                                       const PreFenceVisitor& pre_fence_visitor) {
@@ -52,16 +52,19 @@
     CHECK_EQ(self->GetState(), kRunnable);
     self->AssertThreadSuspensionIsAllowable();
     self->AssertNoPendingException();
+    // Make sure to preserve klass.
+    StackHandleScope<1> hs(self);
+    HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
     self->PoisonObjectPointers();
   }
   // Need to check that we arent the large object allocator since the large object allocation code
   // path this function. If we didn't check we would have an infinite loop.
-  mirror::Object* obj;
+  ObjPtr<mirror::Object> obj;
   if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
     obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
                                                            pre_fence_visitor);
     if (obj != nullptr) {
-      return obj;
+      return obj.Ptr();
     } else {
       // There should be an OOM exception, since we are retrying, clear it.
       self->ClearException();
@@ -85,7 +88,7 @@
     obj->SetClass(klass);
     if (kUseBakerOrBrooksReadBarrier) {
       if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj);
+        obj->SetReadBarrierPointer(obj.Ptr());
       }
       obj->AssertReadBarrierPointer();
     }
@@ -93,14 +96,15 @@
     usable_size = bytes_allocated;
     pre_fence_visitor(obj, usable_size);
     QuasiAtomic::ThreadFenceForConstructor();
-  } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
-             (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
-             LIKELY(obj != nullptr)) {
+  } else if (
+      !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+      (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
+      LIKELY(obj != nullptr)) {
     DCHECK(!is_running_on_memory_tool_);
     obj->SetClass(klass);
     if (kUseBakerOrBrooksReadBarrier) {
       if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj);
+        obj->SetReadBarrierPointer(obj.Ptr());
       }
       obj->AssertReadBarrierPointer();
     }
@@ -141,7 +145,7 @@
     obj->SetClass(klass);
     if (kUseBakerOrBrooksReadBarrier) {
       if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj);
+        obj->SetReadBarrierPointer(obj.Ptr());
       }
       obj->AssertReadBarrierPointer();
     }
@@ -213,25 +217,25 @@
   }
   VerifyObject(obj);
   self->VerifyStack();
-  return obj;
+  return obj.Ptr();
 }
 
 // The size of a thread-local allocation stack in the number of references.
 static constexpr size_t kThreadLocalAllocationStackSize = 128;
 
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
   if (kUseThreadLocalAllocationStack) {
-    if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
+    if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
       PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
     }
-  } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
+  } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
     PushOnAllocationStackWithInternalGC(self, obj);
   }
 }
 
 template <bool kInstrumented, typename PreFenceVisitor>
 inline mirror::Object* Heap::AllocLargeObject(Thread* self,
-                                              mirror::Class** klass,
+                                              ObjPtr<mirror::Class>* klass,
                                               size_t byte_count,
                                               const PreFenceVisitor& pre_fence_visitor) {
   // Save and restore the class in case it moves.
@@ -405,7 +409,7 @@
   return ret;
 }
 
-inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
+inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
   // We need to have a zygote space or else our newly allocated large object can end up in the
   // Zygote resulting in it being prematurely freed.
   // We can only do this for primitive objects since large objects will not be within the card table
@@ -435,7 +439,7 @@
 
 inline void Heap::CheckConcurrentGC(Thread* self,
                                     size_t new_num_bytes_allocated,
-                                    mirror::Object** obj) {
+                                    ObjPtr<mirror::Object>* obj) {
   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
     RequestConcurrentGCAndSaveObject(self, false, obj);
   }
@@ -447,6 +451,16 @@
   card_table_->MarkCard(dst.Ptr());
 }
 
+inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
+                                    int start_offset ATTRIBUTE_UNUSED,
+                                    size_t length ATTRIBUTE_UNUSED) {
+  card_table_->MarkCard(dst.Ptr());
+}
+
+inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
+  card_table_->MarkCard(obj.Ptr());
+}
+
 }  // namespace gc
 }  // namespace art
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 33f849a..640787c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -760,83 +760,6 @@
   }
 }
 
-std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
-  if (!IsValidContinuousSpaceObjectAddress(klass)) {
-    return StringPrintf("<non heap address klass %p>", klass);
-  }
-  mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
-  if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
-    std::string result("[");
-    result += SafeGetClassDescriptor(component_type);
-    return result;
-  } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
-    return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
-  } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
-    return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
-  } else {
-    mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
-    if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
-      return StringPrintf("<non heap address dex_cache %p>", dex_cache);
-    }
-    const DexFile* dex_file = dex_cache->GetDexFile();
-    uint16_t class_def_idx = klass->GetDexClassDefIndex();
-    if (class_def_idx == DexFile::kDexNoIndex16) {
-      return "<class def not found>";
-    }
-    const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
-    const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
-    return dex_file->GetTypeDescriptor(type_id);
-  }
-}
-
-std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
-  if (obj == nullptr) {
-    return "null";
-  }
-  mirror::Class* klass = obj->GetClass<kVerifyNone>();
-  if (klass == nullptr) {
-    return "(class=null)";
-  }
-  std::string result(SafeGetClassDescriptor(klass));
-  if (obj->IsClass()) {
-    result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
-  }
-  return result;
-}
-
-void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
-  if (obj == nullptr) {
-    stream << "(obj=null)";
-    return;
-  }
-  if (IsAligned<kObjectAlignment>(obj)) {
-    space::Space* space = nullptr;
-    // Don't use find space since it only finds spaces which actually contain objects instead of
-    // spaces which may contain objects (e.g. cleared bump pointer spaces).
-    for (const auto& cur_space : continuous_spaces_) {
-      if (cur_space->HasAddress(obj)) {
-        space = cur_space;
-        break;
-      }
-    }
-    // Unprotect all the spaces.
-    for (const auto& con_space : continuous_spaces_) {
-      mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
-    }
-    stream << "Object " << obj;
-    if (space != nullptr) {
-      stream << " in space " << *space;
-    }
-    mirror::Class* klass = obj->GetClass<kVerifyNone>();
-    stream << "\nclass=" << klass;
-    if (klass != nullptr) {
-      stream << " type= " << SafePrettyTypeOf(obj);
-    }
-    // Re-protect the address we faulted on.
-    mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
-  }
-}
-
 bool Heap::IsCompilingBoot() const {
   if (!Runtime::Current()->IsAotCompiler()) {
     return false;
@@ -1325,33 +1248,42 @@
   VLOG(heap) << "Finished ~Heap()";
 }
 
-space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
-                                                            bool fail_ok) const {
+
+space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
   for (const auto& space : continuous_spaces_) {
-    if (space->Contains(obj)) {
+    if (space->Contains(addr)) {
       return space;
     }
   }
-  if (!fail_ok) {
-    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
-  }
   return nullptr;
 }
 
-space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
+space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
+                                                            bool fail_ok) const {
+  space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
+  if (space != nullptr) {
+    return space;
+  }
+  if (!fail_ok) {
+    LOG(FATAL) << "object " << obj << " not inside any spaces!";
+  }
+  return nullptr;
+}
+
+space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
                                                                   bool fail_ok) const {
   for (const auto& space : discontinuous_spaces_) {
-    if (space->Contains(obj)) {
+    if (space->Contains(obj.Ptr())) {
       return space;
     }
   }
   if (!fail_ok) {
-    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+    LOG(FATAL) << "object " << obj << " not inside any spaces!";
   }
   return nullptr;
 }
 
-space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
+space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
   space::Space* result = FindContinuousSpaceFromObject(obj, true);
   if (result != nullptr) {
     return result;
@@ -1359,6 +1291,21 @@
   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
 }
 
+space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
+  for (const auto& space : continuous_spaces_) {
+    if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+      return space;
+    }
+  }
+  for (const auto& space : discontinuous_spaces_) {
+    if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+      return space;
+    }
+  }
+  return nullptr;
+}
+
+
 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
   // If we're in a stack overflow, do not create a new exception. It would require running the
   // constructor, which will of course still be in a stack overflow.
@@ -1523,62 +1470,49 @@
       << static_cast<int>(100 * managed_utilization) << "%.";
 }
 
-bool Heap::IsValidObjectAddress(ObjPtr<mirror::Object> obj) const {
-  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
-  // taking the lock.
-  if (obj == nullptr) {
+bool Heap::IsValidObjectAddress(const void* addr) const {
+  if (addr == nullptr) {
     return true;
   }
-  return IsAligned<kObjectAlignment>(obj.Ptr()) &&
-      FindSpaceFromObject(obj.Ptr(), true) != nullptr;
+  return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
 }
 
-bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
-  return FindContinuousSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
+  return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
 }
 
-bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
-  if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
+                              bool search_allocation_stack,
+                              bool search_live_stack,
+                              bool sorted) {
+  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
     return false;
   }
-  for (const auto& space : continuous_spaces_) {
-    if (space->HasAddress(obj)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
-                              bool search_live_stack, bool sorted) {
-  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
-    return false;
-  }
-  if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
+  if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
     mirror::Class* klass = obj->GetClass<kVerifyNone>();
     if (obj == klass) {
       // This case happens for java.lang.Class.
       return true;
     }
     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
-  } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
+  } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
-    return temp_space_->Contains(obj);
+    return temp_space_->Contains(obj.Ptr());
   }
-  if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
+  if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
     return true;
   }
   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
   space::DiscontinuousSpace* d_space = nullptr;
   if (c_space != nullptr) {
-    if (c_space->GetLiveBitmap()->Test(obj)) {
+    if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
       return true;
     }
   } else {
     d_space = FindDiscontinuousSpaceFromObject(obj, true);
     if (d_space != nullptr) {
-      if (d_space->GetLiveBitmap()->Test(obj)) {
+      if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
         return true;
       }
     }
@@ -1590,20 +1524,20 @@
     }
     if (search_allocation_stack) {
       if (sorted) {
-        if (allocation_stack_->ContainsSorted(obj)) {
+        if (allocation_stack_->ContainsSorted(obj.Ptr())) {
           return true;
         }
-      } else if (allocation_stack_->Contains(obj)) {
+      } else if (allocation_stack_->Contains(obj.Ptr())) {
         return true;
       }
     }
 
     if (search_live_stack) {
       if (sorted) {
-        if (live_stack_->ContainsSorted(obj)) {
+        if (live_stack_->ContainsSorted(obj.Ptr())) {
           return true;
         }
-      } else if (live_stack_->Contains(obj)) {
+      } else if (live_stack_->Contains(obj.Ptr())) {
         return true;
       }
     }
@@ -1611,12 +1545,12 @@
   // We need to check the bitmaps again since there is a race where we mark something as live and
   // then clear the stack containing it.
   if (c_space != nullptr) {
-    if (c_space->GetLiveBitmap()->Test(obj)) {
+    if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
       return true;
     }
   } else {
     d_space = FindDiscontinuousSpaceFromObject(obj, true);
-    if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
+    if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
       return true;
     }
   }
@@ -1646,7 +1580,7 @@
   }
 }
 
-void Heap::VerifyObjectBody(mirror::Object* obj) {
+void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
     return;
   }
@@ -1655,7 +1589,7 @@
   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
     return;
   }
-  CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
+  CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
   CHECK(c != nullptr) << "Null class in object " << obj;
   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
@@ -1734,14 +1668,13 @@
                                              size_t* bytes_allocated,
                                              size_t* usable_size,
                                              size_t* bytes_tl_bulk_allocated,
-                                             mirror::Class** klass) {
+                                             ObjPtr<mirror::Class>* klass) {
   bool was_default_allocator = allocator == GetCurrentAllocator();
   // Make sure there is no pending exception since we may need to throw an OOME.
   self->AssertNoPendingException();
   DCHECK(klass != nullptr);
   StackHandleScope<1> hs(self);
-  HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
-  klass = nullptr;  // Invalidate for safety.
+  HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1944,7 +1877,7 @@
 
 class InstanceCounter {
  public:
-  InstanceCounter(const std::vector<mirror::Class*>& classes,
+  InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
                   bool use_is_assignable_from,
                   uint64_t* counts)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1956,7 +1889,7 @@
     mirror::Class* instance_class = obj->GetClass();
     CHECK(instance_class != nullptr);
     for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
-      mirror::Class* klass = instance_counter->classes_[i];
+      ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
       if (instance_counter->use_is_assignable_from_) {
         if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
           ++instance_counter->counts_[i];
@@ -1968,13 +1901,14 @@
   }
 
  private:
-  const std::vector<mirror::Class*>& classes_;
+  const std::vector<Handle<mirror::Class>>& classes_;
   bool use_is_assignable_from_;
   uint64_t* const counts_;
   DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
 };
 
-void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
+void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
+                          bool use_is_assignable_from,
                           uint64_t* counts) {
   InstanceCounter counter(classes, use_is_assignable_from, counts);
   VisitObjects(InstanceCounter::Callback, &counter);
@@ -1982,15 +1916,17 @@
 
 class InstanceCollector {
  public:
-  InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+  InstanceCollector(Handle<mirror::Class> c,
+                    int32_t max_count,
+                    std::vector<ObjPtr<mirror::Object>>& instances)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      : class_(c), max_count_(max_count), instances_(instances) {
-  }
+      : class_(c), max_count_(max_count), instances_(instances) {}
+
   static void Callback(mirror::Object* obj, void* arg)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     DCHECK(arg != nullptr);
     InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
-    if (obj->GetClass() == instance_collector->class_) {
+    if (obj->GetClass() == instance_collector->class_.Get()) {
       if (instance_collector->max_count_ == 0 ||
           instance_collector->instances_.size() < instance_collector->max_count_) {
         instance_collector->instances_.push_back(obj);
@@ -1999,27 +1935,28 @@
   }
 
  private:
-  const mirror::Class* const class_;
+  Handle<mirror::Class> const class_;
   const uint32_t max_count_;
-  std::vector<mirror::Object*>& instances_;
+  std::vector<ObjPtr<mirror::Object>>& instances_;
   DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
 };
 
-void Heap::GetInstances(mirror::Class* c,
+void Heap::GetInstances(Handle<mirror::Class> c,
                         int32_t max_count,
-                        std::vector<mirror::Object*>& instances) {
+                        std::vector<ObjPtr<mirror::Object>>& instances) {
   InstanceCollector collector(c, max_count, instances);
   VisitObjects(&InstanceCollector::Callback, &collector);
 }
 
 class ReferringObjectsFinder {
  public:
-  ReferringObjectsFinder(mirror::Object* object,
+  ReferringObjectsFinder(ObjPtr<mirror::Object> object,
                          int32_t max_count,
-                         std::vector<mirror::Object*>& referring_objects)
+                         std::vector<ObjPtr<mirror::Object>>& referring_objects)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
-  }
+      : object_(object),
+        max_count_(max_count),
+        referring_objects_(referring_objects) {}
 
   static void Callback(mirror::Object* obj, void* arg)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2029,12 +1966,14 @@
   // For bitmap Visit.
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
   // annotalysis on visitors.
-  void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
     o->VisitReferences(*this, VoidFunctor());
   }
 
   // For Object::VisitReferences.
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<mirror::Object> obj,
+                  MemberOffset offset,
+                  bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
@@ -2047,14 +1986,15 @@
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
  private:
-  const mirror::Object* const object_;
+  ObjPtr<mirror::Object> const object_;
   const uint32_t max_count_;
-  std::vector<mirror::Object*>& referring_objects_;
+  std::vector<ObjPtr<mirror::Object>>& referring_objects_;
   DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
 };
 
-void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
-                               std::vector<mirror::Object*>& referring_objects) {
+void Heap::GetReferringObjects(ObjPtr<mirror::Object> o,
+                               int32_t max_count,
+                               std::vector<ObjPtr<mirror::Object>>& referring_objects) {
   ReferringObjectsFinder finder(o, max_count, referring_objects);
   VisitObjects(&ReferringObjectsFinder::Callback, &finder);
 }
@@ -3113,41 +3053,42 @@
   const bool verify_referent_;
 };
 
-void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
   // Slow path, the allocation stack push back must have already failed.
-  DCHECK(!allocation_stack_->AtomicPushBack(*obj));
+  DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
   do {
     // TODO: Add handle VerifyObject.
     StackHandleScope<1> hs(self);
-    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+    HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
     // Push our object into the reserve region of the allocaiton stack. This is only required due
     // to heap verification requiring that roots are live (either in the live bitmap or in the
     // allocation stack).
-    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-  } while (!allocation_stack_->AtomicPushBack(*obj));
+  } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
 }
 
-void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
+                                                          ObjPtr<mirror::Object>* obj) {
   // Slow path, the allocation stack push back must have already failed.
-  DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
+  DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
   StackReference<mirror::Object>* start_address;
   StackReference<mirror::Object>* end_address;
   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
                                             &end_address)) {
     // TODO: Add handle VerifyObject.
     StackHandleScope<1> hs(self);
-    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+    HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
     // Push our object into the reserve region of the allocaiton stack. This is only required due
     // to heap verification requiring that roots are live (either in the live bitmap or in the
     // allocation stack).
-    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
     // Push into the reserve allocation stack.
     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
   }
   self->SetThreadLocalAllocationStack(start_address, end_address);
   // Retry on the new thread-local allocation stack.
-  CHECK(self->PushOnThreadLocalAllocationStack(*obj));  // Must succeed.
+  CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
 }
 
 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
@@ -3737,7 +3678,7 @@
   }
 }
 
-void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
+void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
   ScopedObjectAccess soa(self);
   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
   jvalue args[1];
@@ -3747,9 +3688,11 @@
   *object = soa.Decode<mirror::Object>(arg.get()).Ptr();
 }
 
-void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
+                                            bool force_full,
+                                            ObjPtr<mirror::Object>* obj) {
   StackHandleScope<1> hs(self);
-  HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+  HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
   RequestConcurrentGC(self, force_full);
 }
 
@@ -4026,7 +3969,7 @@
   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
 }
 
-void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
+void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
         (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
   CHECK_GE(byte_count, sizeof(mirror::Object));
@@ -4152,7 +4095,7 @@
   return state.GetFrameCount();
 }
 
-void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
+void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
   auto* const runtime = Runtime::Current();
   if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
       !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
@@ -4191,9 +4134,9 @@
   gc_disabled_for_shutdown_ = true;
 }
 
-bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
+bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
   for (gc::space::ImageSpace* space : boot_image_spaces_) {
-    if (space->HasAddress(obj)) {
+    if (space->HasAddress(obj.Ptr())) {
       return true;
     }
   }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5e17a52..95db4dd 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -34,6 +34,7 @@
 #include "gc/collector_type.h"
 #include "gc/space/large_object_space.h"
 #include "globals.h"
+#include "handle.h"
 #include "obj_ptr.h"
 #include "object_callbacks.h"
 #include "offsets.h"
@@ -194,36 +195,48 @@
   // Allocates and initializes storage for an object instance.
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocObject(Thread* self,
-                              mirror::Class* klass,
+                              ObjPtr<mirror::Class> klass,
                               size_t num_bytes,
                               const PreFenceVisitor& pre_fence_visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+      REQUIRES(!*gc_complete_lock_,
+               !*pending_task_lock_,
+               !*backtrace_lock_,
                !Roles::uninterruptible_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(
-        self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
+    return AllocObjectWithAllocator<kInstrumented, true>(self,
+                                                         klass,
+                                                         num_bytes,
+                                                         GetCurrentAllocator(),
+                                                         pre_fence_visitor);
   }
 
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocNonMovableObject(Thread* self,
-                                        mirror::Class* klass,
+                                        ObjPtr<mirror::Class> klass,
                                         size_t num_bytes,
                                         const PreFenceVisitor& pre_fence_visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+      REQUIRES(!*gc_complete_lock_,
+               !*pending_task_lock_,
+               !*backtrace_lock_,
                !Roles::uninterruptible_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(
-        self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
+    return AllocObjectWithAllocator<kInstrumented, true>(self,
+                                                         klass,
+                                                         num_bytes,
+                                                         GetCurrentNonMovingAllocator(),
+                                                         pre_fence_visitor);
   }
 
   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
-                                                         mirror::Class* klass,
+                                                         ObjPtr<mirror::Class> klass,
                                                          size_t byte_count,
                                                          AllocatorType allocator,
                                                          const PreFenceVisitor& pre_fence_visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+      REQUIRES(!*gc_complete_lock_,
+               !*pending_task_lock_,
+               !*backtrace_lock_,
                !Roles::uninterruptible_);
 
   AllocatorType GetCurrentAllocator() const {
@@ -241,7 +254,7 @@
   void VisitObjectsPaused(ObjectCallback callback, void* arg)
       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
-  void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
+  void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
@@ -263,7 +276,7 @@
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
   // proper lock ordering for it.
-  void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
+  void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
 
   // Check sanity of all live references.
   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
@@ -276,16 +289,16 @@
   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
   // and doesn't abort on error, allowing the caller to report more
   // meaningful diagnostics.
-  bool IsValidObjectAddress(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
   // very slow.
-  bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
+  bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
   // Requires the heap lock to be held.
-  bool IsLiveObjectLocked(mirror::Object* obj,
+  bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
                           bool search_allocation_stack = true,
                           bool search_live_stack = true,
                           bool sorted = false)
@@ -321,19 +334,23 @@
 
   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
-  void CountInstances(const std::vector<mirror::Class*>& classes,
+  void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
                       bool use_is_assignable_from,
                       uint64_t* counts)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Implements JDWP RT_Instances.
-  void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+  void GetInstances(Handle<mirror::Class> c,
+                    int32_t max_count,
+                    std::vector<ObjPtr<mirror::Object>>& instances)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Implements JDWP OR_ReferringObjects.
-  void GetReferringObjects(mirror::Object* o,
+  void GetReferringObjects(ObjPtr<mirror::Object> o,
                            int32_t max_count,
-                           std::vector<mirror::Object*>& referring_objects)
+                           std::vector<ObjPtr<mirror::Object>>& referring_objects)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -445,16 +462,14 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Write barrier for array operations that update many field positions
-  ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst,
-                                       int start_offset ATTRIBUTE_UNUSED,
+  ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
+                                       int start_offset,
                                        // TODO: element_count or byte_count?
-                                       size_t length ATTRIBUTE_UNUSED) {
-    card_table_->MarkCard(dst);
-  }
+                                       size_t length)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
-    card_table_->MarkCard(obj);
-  }
+  ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   accounting::CardTable* GetCardTable() const {
     return card_table_.get();
@@ -464,7 +479,7 @@
     return rb_table_.get();
   }
 
-  void AddFinalizerReference(Thread* self, mirror::Object** object);
+  void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
 
   // Returns the number of bytes currently allocated.
   size_t GetBytesAllocated() const {
@@ -527,12 +542,20 @@
   // get the space that corresponds to an object's address. Current implementation searches all
   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
   // TODO: consider using faster data structure like binary tree.
-  space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
+  space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
+
+  space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
                                                               bool fail_ok) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
+
+  space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  space::Space* FindSpaceFromAddress(const void* ptr) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -598,7 +621,7 @@
     return boot_image_spaces_;
   }
 
-  bool ObjectIsInBootImageSpace(mirror::Object* obj) const
+  bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsInBootImageOatFile(const void* p) const
@@ -650,12 +673,6 @@
   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Dump object should only be used by the signal handler.
-  void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
-  // Safe version of pretty type of which check to make sure objects are heap addresses.
-  std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
-  std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
-
   // GC performance measuring
   void DumpGcPerformanceInfo(std::ostream& os)
       REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -837,11 +854,11 @@
         collector_type == kCollectorTypeMC ||
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
-  bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
+  bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
       REQUIRES_SHARED(Locks::mutator_lock_);
   ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
                                        size_t new_num_bytes_allocated,
-                                       mirror::Object** obj)
+                                       ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
 
@@ -852,7 +869,7 @@
   // We don't force this to be inlined since it is a slow path.
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocLargeObject(Thread* self,
-                                   mirror::Class** klass,
+                                   ObjPtr<mirror::Class>* klass,
                                    size_t byte_count,
                                    const PreFenceVisitor& pre_fence_visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -867,14 +884,14 @@
                                          size_t* bytes_allocated,
                                          size_t* usable_size,
                                          size_t* bytes_tl_bulk_allocated,
-                                         mirror::Class** klass)
+                                         ObjPtr<mirror::Class>* klass)
       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Allocate into a specific space.
   mirror::Object* AllocateInto(Thread* self,
                                space::AllocSpace* space,
-                               mirror::Class* c,
+                               ObjPtr<mirror::Class> c,
                                size_t bytes)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -899,10 +916,6 @@
   template <bool kGrow>
   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
 
-  // Returns true if the address passed in is within the address range of a continuous space.
-  bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
   void RunFinalization(JNIEnv* env, uint64_t timeout);
 
@@ -914,7 +927,7 @@
   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
       REQUIRES(!*pending_task_lock_);
 
-  void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
+  void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*pending_task_lock_);
   bool IsGCRequestPending() const;
@@ -986,13 +999,13 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Push an object onto the allocation stack.
-  void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+  void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
-  void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
+  void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
-  void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
+  void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
 
@@ -1023,7 +1036,7 @@
   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
 
   // GC stress mode attempts to do one GC per unique backtrace.
-  void CheckGcStressMode(Thread* self, mirror::Object** obj)
+  void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
 
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e05f8f3..e357fa6 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -90,7 +90,7 @@
   }
   uint32_t idx = ExtractIndex(iref);
   ObjPtr<mirror::Object> obj = table_[idx].GetReference()->Read<kReadBarrierOption>();
-  VerifyObject(obj.Ptr());
+  VerifyObject(obj);
   return obj;
 }
 
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d59bb39..6109ec6 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -99,7 +99,7 @@
   size_t topIndex = segment_state_.parts.topIndex;
 
   CHECK(obj != nullptr);
-  VerifyObject(obj.Ptr());
+  VerifyObject(obj);
   DCHECK(table_ != nullptr);
   DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f0a7c16..273c67d 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2481,7 +2481,7 @@
       // Sanity check: If elements is not the same as the java array's data, it better not be a
       // heap address. TODO: This might be slow to check, may be worth keeping track of which
       // copies we make?
-      if (heap->IsNonDiscontinuousSpaceHeapAddress(reinterpret_cast<mirror::Object*>(elements))) {
+      if (heap->IsNonDiscontinuousSpaceHeapAddress(elements)) {
         soa.Vm()->JniAbortF("ReleaseArrayElements",
                             "invalid element pointer %p, array elements are %p",
                             reinterpret_cast<void*>(elements), array_data);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 9d7f98f..7cbcac8 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,10 +100,10 @@
   explicit SetLengthVisitor(int32_t length) : length_(length) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
-    Array* array = down_cast<Array*>(obj);
+    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
     // DCHECK(array->IsArrayInstance());
     array->SetLength(length_);
   }
@@ -124,10 +124,10 @@
       component_size_shift_(component_size_shift) {
   }
 
-  void operator()(Object* obj, size_t usable_size) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
-    Array* array = down_cast<Array*>(obj);
+    ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
     // DCHECK(array->IsArrayInstance());
     int32_t length = (usable_size - header_size_) >> component_size_shift_;
     DCHECK_GE(length, minimum_length_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cc088b8..98d383d 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -707,9 +707,13 @@
   if (!kCheckAddFinalizer) {
     DCHECK(!IsFinalizable());
   }
-  mirror::Object* obj =
-      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
-                                                             allocator_type, VoidFunctor());
+  // Note that the this pointer may be invalidated after the allocation.
+  ObjPtr<mirror::Object> obj =
+      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
+                                                             this,
+                                                             this->object_size_,
+                                                             allocator_type,
+                                                             VoidFunctor());
   if (add_finalizer && LIKELY(obj != nullptr)) {
     heap->AddFinalizerReference(self, &obj);
     if (UNLIKELY(self->IsExceptionPending())) {
@@ -717,7 +721,7 @@
       obj = nullptr;
     }
   }
-  return obj;
+  return obj.Ptr();
 }
 
 inline Object* Class::AllocObject(Thread* self) {
@@ -879,11 +883,11 @@
   SetFieldBoolean<false, false>(GetSlowPathFlagOffset(), enabled);
 }
 
-inline void Class::InitializeClassVisitor::operator()(
-    mirror::Object* obj, size_t usable_size) const {
+inline void Class::InitializeClassVisitor::operator()(ObjPtr<mirror::Object> obj,
+                                                      size_t usable_size) const {
   DCHECK_LE(class_size_, usable_size);
   // Avoid AsClass as object is not yet in live bitmap or allocation stack.
-  mirror::Class* klass = down_cast<mirror::Class*>(obj);
+  ObjPtr<mirror::Class> klass = ObjPtr<mirror::Class>::DownCast(obj);
   // DCHECK(klass->IsClass());
   klass->SetClassSize(class_size_);
   klass->SetPrimitiveType(Primitive::kPrimNot);  // Default to not being primitive.
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 40742d2..7606915 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -998,7 +998,7 @@
         copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
   }
 
-  void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<mirror::Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self_);
     Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a0d6f37..725939a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1232,7 +1232,7 @@
     explicit InitializeClassVisitor(uint32_t class_size) : class_size_(class_size) {
     }
 
-    void operator()(mirror::Object* obj, size_t usable_size) const
+    void operator()(ObjPtr<mirror::Object> obj, size_t usable_size) const
         REQUIRES_SHARED(Locks::mutator_lock_);
 
    private:
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 9d3c26e..dbfe1d9 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -43,10 +43,10 @@
 
 class CopyReferenceFieldsWithReadBarrierVisitor {
  public:
-  explicit CopyReferenceFieldsWithReadBarrierVisitor(Object* dest_obj)
+  explicit CopyReferenceFieldsWithReadBarrierVisitor(ObjPtr<Object> dest_obj)
       : dest_obj_(dest_obj) {}
 
-  void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+  void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     // GetFieldObject() contains a RB.
     Object* ref = obj->GetFieldObject<Object>(offset);
@@ -55,7 +55,7 @@
     dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
   }
 
-  void operator()(mirror::Class* klass, mirror::Reference* ref) const
+  void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
     // Copy java.lang.ref.Reference.referent which isn't visited in
     // Object::VisitReferences().
@@ -69,18 +69,18 @@
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
  private:
-  Object* const dest_obj_;
+  ObjPtr<Object> const dest_obj_;
 };
 
 Object* Object::CopyObject(Thread* self,
-                           mirror::Object* dest,
-                           mirror::Object* src,
+                           ObjPtr<mirror::Object> dest,
+                           ObjPtr<mirror::Object> src,
                            size_t num_bytes) {
   // Copy instance data.  Don't assume memcpy copies by words (b/32012820).
   {
     const size_t offset = sizeof(Object);
-    uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src) + offset;
-    uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest) + offset;
+    uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src.Ptr()) + offset;
+    uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest.Ptr()) + offset;
     num_bytes -= offset;
     DCHECK_ALIGNED(src_bytes, sizeof(uintptr_t));
     DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t));
@@ -131,7 +131,7 @@
   if (c->IsFinalizable()) {
     heap->AddFinalizerReference(self, &dest);
   }
-  return dest;
+  return dest.Ptr();
 }
 
 // An allocation pre-fence visitor that copies the object.
@@ -141,7 +141,7 @@
       : self_(self), orig_(orig), num_bytes_(num_bytes) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
   }
@@ -161,14 +161,14 @@
   size_t num_bytes = SizeOf();
   StackHandleScope<1> hs(self);
   Handle<Object> this_object(hs.NewHandle(this));
-  Object* copy;
+  ObjPtr<Object> copy;
   CopyObjectVisitor visitor(self, &this_object, num_bytes);
   if (heap->IsMovableObject(this)) {
     copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
   } else {
     copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
   }
-  return copy;
+  return copy.Ptr();
 }
 
 uint32_t Object::GenerateIdentityHashCode() {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 9ddf995..175b0c3 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -612,7 +612,9 @@
   // A utility function that copies an object in a read barrier and
   // write barrier-aware way. This is internally used by Clone() and
   // Class::CopyOf().
-  static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+  static Object* CopyObject(Thread* self,
+                            ObjPtr<mirror::Object> dest,
+                            ObjPtr<mirror::Object> src,
                             size_t num_bytes)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index aea6ff1..cf902af 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -43,10 +43,10 @@
   explicit SetStringCountVisitor(int32_t count) : count_(count) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    String* string = down_cast<String*>(obj);
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
     string->SetCount(count_);
     DCHECK(!string->IsCompressed() || kUseStringCompression);
   }
@@ -63,10 +63,10 @@
       : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    String* string = down_cast<String*>(obj);
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
     string->SetCount(count_);
     DCHECK(!string->IsCompressed() || kUseStringCompression);
     int32_t length = String::GetLengthFromCount(count_);
@@ -99,10 +99,10 @@
     count_(count), src_array_(src_array), offset_(offset) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    String* string = down_cast<String*>(obj);
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
     string->SetCount(count_);
     const uint16_t* const src = src_array_->GetData() + offset_;
     const int32_t length = String::GetLengthFromCount(count_);
@@ -131,10 +131,10 @@
     count_(count), src_string_(src_string), offset_(offset) {
   }
 
-  void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
-    String* string = down_cast<String*>(obj);
+    ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
     string->SetCount(count_);
     const int32_t length = String::GetLengthFromCount(count_);
     bool compressible = kUseStringCompression && String::GetCompressionFlagFromCount(count_);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 8e81bc9..31ce4c1 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -254,7 +254,9 @@
   LOG(INFO) << "VMDebug infopoint " << id << " hit";
 }
 
-static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass,
+static jlong VMDebug_countInstancesOfClass(JNIEnv* env,
+                                           jclass,
+                                           jclass javaClass,
                                            jboolean countAssignable) {
   ScopedObjectAccess soa(env);
   gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -263,13 +265,16 @@
   if (c == nullptr) {
     return 0;
   }
-  std::vector<mirror::Class*> classes {c.Ptr()};
+  StackHandleScopeCollection hs(soa.Self());
+  std::vector<Handle<mirror::Class>> classes {hs.NewHandle(c)};
   uint64_t count = 0;
   heap->CountInstances(classes, countAssignable, &count);
   return count;
 }
 
-static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectArray javaClasses,
+static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env,
+                                                  jclass,
+                                                  jobjectArray javaClasses,
                                                   jboolean countAssignable) {
   ScopedObjectAccess soa(env);
   gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -279,14 +284,15 @@
   if (decoded_classes == nullptr) {
     return nullptr;
   }
-  std::vector<mirror::Class*> classes;
+  StackHandleScopeCollection hs(soa.Self());
+  std::vector<Handle<mirror::Class>> classes;
   for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) {
-    classes.push_back(decoded_classes->Get(i));
+    classes.push_back(hs.NewHandle(decoded_classes->Get(i)));
   }
   std::vector<uint64_t> counts(classes.size(), 0u);
   // Heap::CountInstances can handle null and will put 0 for these classes.
   heap->CountInstances(classes, countAssignable, &counts[0]);
-  auto* long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
+  ObjPtr<mirror::LongArray> long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
   if (long_counts == nullptr) {
     soa.Self()->AssertPendingOOMException();
     return nullptr;
diff --git a/runtime/obj_ptr-inl.h b/runtime/obj_ptr-inl.h
index f0a5f6f..d0be6dc 100644
--- a/runtime/obj_ptr-inl.h
+++ b/runtime/obj_ptr-inl.h
@@ -33,7 +33,7 @@
 template<class MirrorType, bool kPoison>
 inline void ObjPtr<MirrorType, kPoison>::AssertValid() const {
   if (kPoison) {
-    CHECK(IsValid()) << "Stale object pointer " << Ptr() << " , expected cookie "
+    CHECK(IsValid()) << "Stale object pointer " << PtrUnchecked() << " , expected cookie "
         << TrimCookie(Thread::Current()->GetPoisonObjectCookie()) << " but got " << GetCookie();
   }
 }
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 4d5b7e0..7e4ee84 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -128,7 +128,7 @@
  public:
   explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
 
-  void ObjectAllocated(art::Thread* self, art::mirror::Object** obj, size_t byte_count)
+  void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     DCHECK_EQ(self, art::Thread::Current());
 
@@ -153,7 +153,7 @@
       ScopedLocalRef<jobject> object(
           jni_env, jni_env->AddLocalReference<jobject>(*obj));
       ScopedLocalRef<jclass> klass(
-          jni_env, jni_env->AddLocalReference<jclass>((*obj)->GetClass()));
+          jni_env, jni_env->AddLocalReference<jclass>(obj->Ptr()->GetClass()));
 
       handler_->DispatchEvent(self,
                               JVMTI_EVENT_VM_OBJECT_ALLOC,
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index e0ba8e7..a9f39d0 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -41,7 +41,7 @@
 
 void ReferenceTable::Add(ObjPtr<mirror::Object> obj) {
   DCHECK(obj != nullptr);
-  VerifyObject(obj.Ptr());
+  VerifyObject(obj);
   if (entries_.size() >= max_size_) {
     LOG(FATAL) << "ReferenceTable '" << name_ << "' "
                << "overflowed (" << max_size_ << " entries)";
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index aed6a2b..be97860 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -27,7 +27,6 @@
 
 namespace art {
 
-static constexpr bool kDumpHeapObjectOnSigsevg = false;
 static constexpr bool kUseSignalHandler = false;
 
 struct sigaction old_action;
@@ -48,11 +47,6 @@
   if (runtime != nullptr) {
     // Print this out first in case DumpObject faults.
     LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
-    gc::Heap* heap = runtime->GetHeap();
-    if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
-      LOG(FATAL_WITHOUT_ABORT) << "Dump heap object at fault address: ";
-      heap->DumpObject(LOG_STREAM(FATAL_WITHOUT_ABORT), reinterpret_cast<mirror::Object*>(info->si_addr));
-    }
   }
   // Run the old signal handler.
   old_action.sa_sigaction(signal_number, info, raw_context);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 1401f07..93704a9 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -36,7 +36,6 @@
 
 namespace art {
 
-static constexpr bool kDumpHeapObjectOnSigsevg = false;
 static constexpr bool kUseSigRTTimeout = true;
 static constexpr bool kDumpNativeStackOnTimeout = true;
 
@@ -365,13 +364,7 @@
       runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
       std::cerr << std::endl;
     }
-    gc::Heap* heap = runtime->GetHeap();
     std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
-    if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
-      std::cerr << "Dump heap object at fault address: " << std::endl;
-      heap->DumpObject(std::cerr, reinterpret_cast<mirror::Object*>(info->si_addr));
-      std::cerr << std::endl;
-    }
   }
   if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
     std::cerr << "********************************************************\n"
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3e2ecfe..7f88035 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1875,7 +1875,7 @@
     if (LIKELY(HandleScopeContains(obj))) {
       // Read from handle scope.
       result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
-      VerifyObject(result.Ptr());
+      VerifyObject(result);
     } else {
       tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
       expect_null = true;
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 4892b49..43151dd 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -29,7 +29,7 @@
   if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
     if (kVerifyObjectSupport > kVerifyObjectModeFast) {
       // Slow object verification, try the heap right away.
-      Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr());
+      Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
     } else {
       // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
       // print the diagnostic message.
@@ -40,7 +40,7 @@
         failed = failed || !VerifyClassClass(c);
       }
       if (UNLIKELY(failed)) {
-        Runtime::Current()->GetHeap()->VerifyObjectBody(obj.Ptr());
+        Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
       }
     }
   }