Add root verification when we try to mark an invalid object.

Now when we try to mark an object not contained by any spaces, We call verify
roots. This prints the root's vreg and method when it finds an invalid root.

Fixed a error in the total paused time statistic.

Change-Id: Id10e4097cce56bc54ee488de32183c18ba3f3780
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index e19e2c4..03bbb6a 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -35,6 +35,7 @@
 #include "space.h"
 #include "timing_logger.h"
 #include "thread.h"
+#include "thread_list.h"
 
 static const bool kUseMarkStackPrefetch = true;
 
@@ -111,8 +112,12 @@
       LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
       SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
       if (!large_objects->Test(obj)) {
-        CHECK(large_object_space->Contains(obj)) << "Attempting to mark object " << obj
-                                                  << " not in large object space";
+        if (!large_object_space->Contains(obj)) {
+          LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
+          LOG(ERROR) << "Attempting see if it's a bad root";
+          VerifyRoots();
+          LOG(FATAL) << "Can't mark bad root";
+        }
         large_objects->Set(obj);
         // Don't need to check finger since large objects never have any object references.
       }
@@ -165,6 +170,29 @@
   mark_sweep->MarkObject0(root, true);
 }
 
+void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+                                   const AbstractMethod* method) {
+  reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, method);
+}
+
+void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method) {
+  // See if the root is on any space bitmap.
+  if (heap_->FindSpaceFromObject(root) == NULL) {
+    LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+    if (large_object_space->Contains(root)) {
+      LOG(ERROR) << "Found invalid root: " << root;
+      LOG(ERROR) << "VReg / Shadow frame offset: " << vreg;
+      if (method != NULL) {
+        LOG(ERROR) << "In method " << PrettyMethod(method, true);
+      }
+    }
+  }
+}
+
+void MarkSweep::VerifyRoots() {
+  Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
+}
+
 // Marks all objects in the root set.
 void MarkSweep::MarkRoots() {
   Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 74b2aa7..76c5428 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -234,6 +234,17 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Verify the roots of the heap and print out information related to any invalid roots.
+  // Called in MarkObject, so may we may not hold the mutator lock.
+  void VerifyRoots()
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  static void VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+                                 const AbstractMethod* method);
+
+  void VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method)
+      NO_THREAD_SAFETY_ANALYSIS;
+
   template <typename Visitor>
   static void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
diff --git a/src/heap.cc b/src/heap.cc
index f91ce84..6b0b0e5 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1135,7 +1135,7 @@
 
   // If the GC was slow, then print timings in the log.
   uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
-  total_paused_time_ += duration / kTimeAdjust;
+  total_paused_time_ += duration;
   if (duration > MsToNs(50)) {
     const size_t percent_free = GetPercentFree();
     const size_t current_heap_size = GetUsedMemorySize();
@@ -1763,7 +1763,7 @@
   uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
   uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
   uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
-  total_paused_time_ += (pause_roots + pause_dirty) / kTimeAdjust;
+  total_paused_time_ += pause_roots + pause_dirty;
   if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5) ||
       (gc_cause == kGcCauseForAlloc && duration > MsToNs(20))) {
     const size_t percent_free = GetPercentFree();
diff --git a/src/heap.h b/src/heap.h
index 6cd19d4..14d8382 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -94,6 +94,8 @@
   static const size_t kTimeAdjust = 1024;
 
   typedef void (RootVisitor)(const Object* root, void* arg);
+  typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg,
+      const AbstractMethod* method);
   typedef bool (IsMarkedTester)(const Object* object, void* arg);
 
   // Create a heap with the requested sizes. The possible empty
diff --git a/src/stack.h b/src/stack.h
index 70d6f9d..bceadc3 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -90,12 +90,13 @@
             (shadow_frame_entry <= (&references_[number_of_references_ - 1])));
   }
 
-  void VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+  template <typename Visitor>
+  void VisitRoots(const Visitor& visitor) {
     size_t num_refs = NumberOfReferences();
     for (size_t j = 0; j < num_refs; j++) {
       Object* object = GetReference(j);
       if (object != NULL) {
-        visitor(object, arg);
+        visitor(object, j);
       }
     }
   }
diff --git a/src/thread.cc b/src/thread.cc
index db8c39f..485caab 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -1807,12 +1807,14 @@
   return object->GetThinLockId() == thin_lock_id_;
 }
 
+// Visitor parameters are: (const Object* obj, size_t vreg, const AbstractMethod* method).
+template <typename Visitor>
 class ReferenceMapVisitor : public StackVisitor {
  public:
   ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
-                      Context* context, Heap::RootVisitor* root_visitor, void* arg)
+                      Context* context, const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(stack, trace_stack, context), root_visitor_(root_visitor), arg_(arg) {}
+      : StackVisitor(stack, trace_stack, context), visitor_(visitor) {}
 
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (false) {
@@ -1821,7 +1823,8 @@
     }
     ShadowFrame* shadow_frame = GetCurrentShadowFrame();
     if (shadow_frame != NULL) {
-      shadow_frame->VisitRoots(root_visitor_, arg_);
+      WrapperVisitor wrapperVisitor(visitor_, shadow_frame->GetMethod());
+      shadow_frame->VisitRoots(wrapperVisitor);
     } else {
       AbstractMethod* m = GetMethod();
       // Process register map (which native and runtime methods don't have)
@@ -1863,15 +1866,13 @@
                 }
                 spill_shifts--;  // wind back one as we want the last match
                 ref = reinterpret_cast<Object*>(GetGPR(spill_shifts));
-                if (ref != NULL) {
-                  root_visitor_(ref, arg_);
-                }
               } else {
                 ref = reinterpret_cast<Object*>(GetVReg(cur_quick_frame, code_item, core_spills,
                                                         fp_spills, frame_size, reg));
-                if (ref != NULL) {
-                  root_visitor_(ref, arg_);
-                }
+              }
+
+              if (ref != NULL) {
+                visitor_(ref, reg, m);
               }
             }
           }
@@ -1882,18 +1883,104 @@
   }
 
  private:
-  bool TestBitmap(int reg, const uint8_t* reg_vector) {
+
+  class WrapperVisitor {
+   public:
+    WrapperVisitor(const Visitor& visitor, AbstractMethod* method)
+        : visitor_(visitor),
+          method_(method) {
+
+    }
+
+    void operator()(const Object* obj, size_t offset) const {
+      visitor_(obj, offset, method_);
+    }
+
+   private:
+    const Visitor& visitor_;
+    AbstractMethod* method_;
+  };
+
+  static bool TestBitmap(int reg, const uint8_t* reg_vector) {
     return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
   }
 
-  // Call-back when we visit a root.
-  Heap::RootVisitor* root_visitor_;
-  // Argument to call-back.
-  void* arg_;
+  // Visitor for when we visit a root.
+  const Visitor& visitor_;
+
   // A method helper we keep around to avoid dex file/cache re-computations.
   MethodHelper mh_;
 };
 
+class RootCallbackVisitor {
+ public:
+  RootCallbackVisitor(Heap::RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
+
+  }
+
+  void operator()(const Object* obj, size_t, const AbstractMethod*) const {
+    visitor_(obj, arg_);
+  }
+
+ private:
+  Heap::RootVisitor* visitor_;
+  void* arg_;
+};
+
+class VerifyCallbackVisitor {
+ public:
+  VerifyCallbackVisitor(Heap::VerifyRootVisitor* visitor, void* arg)
+      : visitor_(visitor),
+        arg_(arg) {
+
+  }
+
+  void operator()(const Object* obj, size_t vreg, const AbstractMethod* method) const  {
+    visitor_(obj, arg_, vreg, method);
+  }
+
+ private:
+  Heap::VerifyRootVisitor* visitor_;
+  void* arg_;
+};
+
+struct VerifyRootWrapperArg {
+  Heap::VerifyRootVisitor* visitor;
+  void* arg;
+};
+
+static void VerifyRootWrapperCallback(const Object* root, void* arg) {
+  VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
+  wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
+}
+
+void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) {
+  // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
+  // don't have.
+  VerifyRootWrapperArg wrapperArg;
+  wrapperArg.arg = arg;
+  wrapperArg.visitor = visitor;
+
+  if (exception_ != NULL) {
+    VerifyRootWrapperCallback(exception_, &wrapperArg);
+  }
+  if (class_loader_override_ != NULL) {
+    VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
+  }
+  jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+  jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+
+  SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+
+  // Visit roots on this thread's stack
+  Context* context = GetLongJumpContext();
+  VerifyCallbackVisitor visitorToCallback(visitor, arg);
+  ReferenceMapVisitor<VerifyCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
+                                                  visitorToCallback);
+  mapper.WalkStack();
+  ReleaseLongJumpContext(context);
+}
+
 void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
   if (exception_ != NULL) {
     visitor(exception_, arg);
@@ -1908,7 +1995,9 @@
 
   // Visit roots on this thread's stack
   Context* context = GetLongJumpContext();
-  ReferenceMapVisitor mapper(GetManagedStack(), GetTraceStack(), context, visitor, arg);
+  RootCallbackVisitor visitorToCallback(visitor, arg);
+  ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
+                                                  visitorToCallback);
   mapper.WalkStack();
   ReleaseLongJumpContext(context);
 }
@@ -1921,8 +2010,9 @@
 
 void Thread::VerifyStack() {
   UniquePtr<Context> context(Context::Create());
-  ReferenceMapVisitor mapper(GetManagedStack(), GetTraceStack(), context.get(), VerifyObject,
-                             Runtime::Current()->GetHeap());
+  RootCallbackVisitor visitorToCallback(visitor, arg);
+  ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context.get(),
+                                                  VerifyObject, Runtime::Current()->GetHeap());
   mapper.WalkStack();
 }
 #endif
diff --git a/src/thread.h b/src/thread.h
index 06798cd..3ba9f4a 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -409,6 +409,9 @@
   void VisitRoots(Heap::RootVisitor* visitor, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
 #if VERIFY_OBJECT_ENABLED
   void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 #else
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 4030be6..09a8de6 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -502,6 +502,13 @@
   }
 }
 
+void ThreadList::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
+    (*it)->VerifyRoots(visitor, arg);
+  }
+}
+
 uint32_t ThreadList::AllocThreadId() {
   MutexLock mu(Thread::Current(), allocated_ids_lock_);
   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
diff --git a/src/thread_list.h b/src/thread_list.h
index 94e987f..3142fd3 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -81,6 +81,9 @@
   void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Return a copy of the thread list.
   std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) {
     return list_;