Move thread_running_gc_ logic to VisitObjectsInternalRegionSpace

Cleaner to have the logic in the caller.

Test: test-art-host CC
Change-Id: I93a16f8baf327b6692cce5c6141d1c361ce53f16
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c933d04..34afa2a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -937,7 +937,14 @@
       // calls VerifyHeapReferences() as part of the zygote compaction
       // which then would call here without the moving GC disabled,
       // which is fine.
-      DCHECK(IsMovingGCDisabled(self));
+      bool is_thread_running_gc = false;
+      if (kIsDebugBuild) {
+        MutexLock mu(self, *gc_complete_lock_);
+        is_thread_running_gc = self == thread_running_gc_;
+      }
+      // If we are not the thread running the GC on in a GC exclusive region, then moving GC
+      // must be disabled.
+      DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
     }
     region_space_->Walk(callback, arg);
   }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0d56213..1a782b4 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -736,9 +736,7 @@
 
   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
     MutexLock mu(self, *gc_complete_lock_);
-    // If we are in a GC critical section or the disable moving GC count is non zero then moving
-    // GC is guaranteed to not start.
-    return disable_moving_gc_count_ > 0 || thread_running_gc_ == self;
+    return disable_moving_gc_count_ > 0;
   }
 
   // Request an asynchronous trim.