Revert "Remove support for moving GC collector transitions"

This reverts commit 66a655029c98de9492570f4cfd06476c0fde9cd1.

Bug: 78286368
Bug: 130236304

Reason for revert: Causes tombstones in AOSP walleye

Change-Id: I41c27d119f82490b68a01f85d92d22d0961d2af6
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ecf6ac8..bbcb93c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -96,6 +96,9 @@
 
 namespace gc {
 
+static constexpr size_t kCollectorTransitionStressIterations = 0;
+static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
+
 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
 
 // Minimum amount of remaining bytes before a concurrent GC is triggered.
@@ -312,9 +315,6 @@
   if (kUseReadBarrier) {
     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
-  } else {
-    CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
-        << "Changing from moving to non-moving GC (or visa versa) is not supported.";
   }
   verification_.reset(new Verification(this));
   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
@@ -799,6 +799,34 @@
   }
 }
 
+void Heap::DisableMovingGc() {
+  CHECK(!kUseReadBarrier);
+  if (IsMovingGc(foreground_collector_type_)) {
+    foreground_collector_type_ = kCollectorTypeCMS;
+  }
+  if (IsMovingGc(background_collector_type_)) {
+    background_collector_type_ = foreground_collector_type_;
+  }
+  TransitionCollector(foreground_collector_type_);
+  Thread* const self = Thread::Current();
+  ScopedThreadStateChange tsc(self, kSuspended);
+  ScopedSuspendAll ssa(__FUNCTION__);
+  // Something may have caused the transition to fail.
+  if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
+    CHECK(main_space_ != nullptr);
+    // The allocation stack may have non movable objects in it. We need to flush it since the GC
+    // can't only handle marking allocation stack objects of one non moving space and one main
+    // space.
+    {
+      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+      FlushAllocStack();
+    }
+    main_space_->DisableMovingObjects();
+    non_moving_space_ = main_space_;
+    CHECK(!non_moving_space_->CanMoveObjects());
+  }
+}
+
 bool Heap::IsCompilingBoot() const {
   if (!Runtime::Current()->IsAotCompiler()) {
     return false;
@@ -919,6 +947,14 @@
 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
   if (old_process_state != new_process_state) {
     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
+    for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
+      // Start at index 1 to avoid "is always false" warning.
+      // Have iteration 1 always transition the collector.
+      TransitionCollector((((i & 1) == 1) == jank_perceptible)
+          ? foreground_collector_type_
+          : background_collector_type_);
+      usleep(kCollectorTransitionStressWait);
+    }
     if (jank_perceptible) {
       // Transition back to foreground right away to prevent jank.
       RequestCollectorTransition(foreground_collector_type_, 0);
@@ -1346,7 +1382,7 @@
       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
     }
   } else {
-    LOG(FATAL) << "Unsupported";
+    TransitionCollector(desired_collector_type);
   }
 }
 
@@ -1795,6 +1831,35 @@
         }
         break;
       }
+      case kAllocatorTypeNonMoving: {
+        if (kUseReadBarrier) {
+          // DisableMovingGc() isn't compatible with CC.
+          break;
+        }
+        // Try to transition the heap if the allocation failure was due to the space being full.
+        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
+          // If we aren't out of memory then the OOM was probably from the non moving space being
+          // full. Attempt to disable compaction and turn the main space into a non moving space.
+          DisableMovingGc();
+          // Thread suspension could have occurred.
+          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+              (!instrumented && EntrypointsInstrumented())) {
+            return nullptr;
+          }
+          // If we are still a moving GC then something must have caused the transition to fail.
+          if (IsMovingGc(collector_type_)) {
+            MutexLock mu(self, *gc_complete_lock_);
+            // If we couldn't disable moving GC, just throw OOME and return null.
+            LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
+                         << disable_moving_gc_count_;
+          } else {
+            LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
+            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+                                            usable_size, bytes_tl_bulk_allocated);
+          }
+        }
+        break;
+      }
       default: {
         // Do nothing for others allocators.
       }
@@ -2016,6 +2081,163 @@
   return HomogeneousSpaceCompactResult::kSuccess;
 }
 
+void Heap::TransitionCollector(CollectorType collector_type) {
+  if (collector_type == collector_type_) {
+    return;
+  }
+  // Collector transition must not happen with CC
+  CHECK(!kUseReadBarrier);
+  VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
+             << " -> " << static_cast<int>(collector_type);
+  uint64_t start_time = NanoTime();
+  uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+  Runtime* const runtime = Runtime::Current();
+  Thread* const self = Thread::Current();
+  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
+  // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+  // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+  // http://b/71769596
+  // Locks::mutator_lock_->AssertNotHeld(self);
+  // Busy wait until we can GC (StartGC can fail if we have a non-zero
+  // compacting_gc_disable_count_, this should rarely occurs).
+  for (;;) {
+    {
+      ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
+      MutexLock mu(self, *gc_complete_lock_);
+      // Ensure there is only one GC at a time.
+      WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
+      // Currently we only need a heap transition if we switch from a moving collector to a
+      // non-moving one, or visa versa.
+      const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
+      // If someone else beat us to it and changed the collector before we could, exit.
+      // This is safe to do before the suspend all since we set the collector_type_running_ before
+      // we exit the loop. If another thread attempts to do the heap transition before we exit,
+      // then it would get blocked on WaitForGcToCompleteLocked.
+      if (collector_type == collector_type_) {
+        return;
+      }
+      // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
+      if (!copying_transition || disable_moving_gc_count_ == 0) {
+        // TODO: Not hard code in semi-space collector?
+        collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
+        break;
+      }
+    }
+    usleep(1000);
+  }
+  if (runtime->IsShuttingDown(self)) {
+    // Don't allow heap transitions to happen if the runtime is shutting down since these can
+    // cause objects to get finalized.
+    FinishGC(self, collector::kGcTypeNone);
+    return;
+  }
+  collector::GarbageCollector* collector = nullptr;
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
+    switch (collector_type) {
+      case kCollectorTypeSS: {
+        if (!IsMovingGc(collector_type_)) {
+          // Create the bump pointer space from the backup space.
+          CHECK(main_space_backup_ != nullptr);
+          MemMap mem_map = main_space_backup_->ReleaseMemMap();
+          // We are transitioning from non moving GC -> moving GC, since we copied from the bump
+          // pointer space last transition it will be protected.
+          CHECK(mem_map.IsValid());
+          mem_map.Protect(PROT_READ | PROT_WRITE);
+          bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
+                                                                          std::move(mem_map));
+          AddSpace(bump_pointer_space_);
+          collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+          // Use the now empty main space mem map for the bump pointer temp space.
+          mem_map = main_space_->ReleaseMemMap();
+          // Unset the pointers just in case.
+          if (dlmalloc_space_ == main_space_) {
+            dlmalloc_space_ = nullptr;
+          } else if (rosalloc_space_ == main_space_) {
+            rosalloc_space_ = nullptr;
+          }
+          // Remove the main space so that we don't try to trim it, this doens't work for debug
+          // builds since RosAlloc attempts to read the magic number from a protected page.
+          RemoveSpace(main_space_);
+          RemoveRememberedSet(main_space_);
+          delete main_space_;  // Delete the space since it has been removed.
+          main_space_ = nullptr;
+          RemoveRememberedSet(main_space_backup_.get());
+          main_space_backup_.reset(nullptr);  // Deletes the space.
+          temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+                                                                  std::move(mem_map));
+          AddSpace(temp_space_);
+        }
+        break;
+      }
+      case kCollectorTypeMS:
+        // Fall through.
+      case kCollectorTypeCMS: {
+        if (IsMovingGc(collector_type_)) {
+          CHECK(temp_space_ != nullptr);
+          MemMap mem_map = temp_space_->ReleaseMemMap();
+          RemoveSpace(temp_space_);
+          temp_space_ = nullptr;
+          mem_map.Protect(PROT_READ | PROT_WRITE);
+          CreateMainMallocSpace(std::move(mem_map),
+                                kDefaultInitialSize,
+                                std::min(mem_map.Size(), growth_limit_),
+                                mem_map.Size());
+          // Compact to the main space from the bump pointer space, don't need to swap semispaces.
+          AddSpace(main_space_);
+          collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+          mem_map = bump_pointer_space_->ReleaseMemMap();
+          RemoveSpace(bump_pointer_space_);
+          bump_pointer_space_ = nullptr;
+          const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+          // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
+          if (kIsDebugBuild && kUseRosAlloc) {
+            mem_map.Protect(PROT_READ | PROT_WRITE);
+          }
+          main_space_backup_.reset(CreateMallocSpaceFromMemMap(
+              std::move(mem_map),
+              kDefaultInitialSize,
+              std::min(mem_map.Size(), growth_limit_),
+              mem_map.Size(),
+              name,
+              true));
+          if (kIsDebugBuild && kUseRosAlloc) {
+            main_space_backup_->GetMemMap()->Protect(PROT_NONE);
+          }
+        }
+        break;
+      }
+      default: {
+        LOG(FATAL) << "Attempted to transition to invalid collector type "
+                   << static_cast<size_t>(collector_type);
+        UNREACHABLE();
+      }
+    }
+    ChangeCollector(collector_type);
+  }
+  // Can't call into java code with all threads suspended.
+  reference_processor_->EnqueueClearedReferences(self);
+  uint64_t duration = NanoTime() - start_time;
+  GrowForUtilization(semi_space_collector_);
+  DCHECK(collector != nullptr);
+  LogGC(kGcCauseCollectorTransition, collector);
+  FinishGC(self, collector::kGcTypeFull);
+  {
+    ScopedObjectAccess soa(self);
+    soa.Vm()->UnloadNativeLibraries();
+  }
+  int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+  int32_t delta_allocated = before_allocated - after_allocated;
+  std::string saved_str;
+  if (delta_allocated >= 0) {
+    saved_str = " saved at least " + PrettySize(delta_allocated);
+  } else {
+    saved_str = " expanded " + PrettySize(-delta_allocated);
+  }
+  VLOG(heap) << "Collector transition to " << collector_type << " took "
+             << PrettyDuration(duration) << saved_str;
+}
+
 void Heap::ChangeCollector(CollectorType collector_type) {
   // TODO: Only do this with all mutators suspended to avoid races.
   if (collector_type != collector_type_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b474050..07f6a19 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -308,6 +308,9 @@
   void ChangeAllocator(AllocatorType allocator)
       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
 
+  // Transition the garbage collector during runtime, may copy objects from one space to another.
+  void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
+
   // Change the collector to be one of the possible options (MS, CMS, SS).
   void ChangeCollector(CollectorType collector_type)
       REQUIRES(Locks::mutator_lock_);
@@ -688,6 +691,9 @@
                          uint32_t* boot_oat_begin,
                          uint32_t* boot_oat_end);
 
+  // Permenantly disable moving garbage collection.
+  void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
+
   space::DlMallocSpace* GetDlMallocSpace() const {
     return dlmalloc_space_;
   }
diff --git a/test/1337-gc-coverage/gc_coverage.cc b/test/1337-gc-coverage/gc_coverage.cc
index eb8ec0e..ac959f6 100644
--- a/test/1337-gc-coverage/gc_coverage.cc
+++ b/test/1337-gc-coverage/gc_coverage.cc
@@ -46,5 +46,19 @@
   return reinterpret_cast<jlong>(soa.Decode<mirror::Object>(object).Ptr());
 }
 
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportCollectorTransition(JNIEnv*, jclass) {
+  // Same as supportHomogeneousSpaceCompact for now.
+  return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
+      JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToSS(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeSS);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToCMS(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeCMS);
+}
+
 }  // namespace
 }  // namespace art
diff --git a/test/1337-gc-coverage/src/Main.java b/test/1337-gc-coverage/src/Main.java
index 128ad4d..7875eb1 100644
--- a/test/1337-gc-coverage/src/Main.java
+++ b/test/1337-gc-coverage/src/Main.java
@@ -22,6 +22,7 @@
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
     testHomogeneousCompaction();
+    testCollectorTransitions();
     System.out.println("Done.");
   }
 
@@ -67,10 +68,40 @@
     }
   }
 
+  private static void testCollectorTransitions() {
+    if (supportCollectorTransition()) {
+      Object o = new Object();
+      // Transition to semi-space collector.
+      allocateStuff();
+      transitionToSS();
+      allocateStuff();
+      long addressBefore = objectAddress(o);
+      Runtime.getRuntime().gc();
+      long addressAfter = objectAddress(o);
+      if (addressBefore == addressAfter) {
+        System.out.println("error: Expected different adddress " + addressBefore + " vs " +
+            addressAfter);
+      }
+      // Transition back to CMS.
+      transitionToCMS();
+      allocateStuff();
+      addressBefore = objectAddress(o);
+      Runtime.getRuntime().gc();
+      addressAfter = objectAddress(o);
+      if (addressBefore != addressAfter) {
+        System.out.println("error: Expected same adddress " + addressBefore + " vs " +
+            addressAfter);
+      }
+    }
+  }
+
   // Methods to get access to ART internals.
   private static native boolean supportHomogeneousSpaceCompact();
   private static native boolean performHomogeneousSpaceCompact();
   private static native void incrementDisableMovingGC();
   private static native void decrementDisableMovingGC();
   private static native long objectAddress(Object object);
+  private static native boolean supportCollectorTransition();
+  private static native void transitionToSS();
+  private static native void transitionToCMS();
 }