Implement cyclic region allocation in ART's region space.

This region allocation strategy tries to allocate new regions
instead of reusing previously freed regions. The intent is to
minimize region reuse to try to catch some GC bugs earlier.

This region allocation behavior is only turned on in debug mode.

Test: art/test.py
Test: Device boot test with libartd
Bug: 74064045
Change-Id: I76442bc65f9fa8cc2f5f67e6584f7fd73869d98e
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 7072a7e..c6ec174 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -258,16 +258,79 @@
       return nullptr;
     }
   }
+
   // Find a large enough set of contiguous free regions.
-  size_t left = 0;
-  while (left + num_regs - 1 < num_regions_) {
+  if (kCyclicRegionAllocation) {
+    // Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
+    size_t next_region1 = -1;
+    mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
+                                                          num_regions_,
+                                                          num_regs,
+                                                          bytes_allocated,
+                                                          usable_size,
+                                                          bytes_tl_bulk_allocated,
+                                                          &next_region1);
+    if (region1 != nullptr) {
+      DCHECK_LT(0u, next_region1);
+      DCHECK_LE(next_region1, num_regions_);
+      // Move the cyclic allocation region marker to the region
+      // following the large region that was just allocated.
+      cyclic_alloc_region_index_ = next_region1 % num_regions_;
+      return region1;
+    }
+
+    // If the previous attempt failed, try to find a range of free regions within
+    // [0, cyclic_alloc_region_index_ + num_regions_ - 1).
+    size_t next_region2 = -1;
+    mirror::Object* region2 =
+        AllocLargeInRange<kForEvac>(0,
+                                    cyclic_alloc_region_index_ + num_regions_ - 1,
+                                    num_regs,
+                                    bytes_allocated,
+                                    usable_size,
+                                    bytes_tl_bulk_allocated,
+                                    &next_region2);
+    if (region2 != nullptr) {
+      DCHECK_LT(0u, next_region2);
+      DCHECK_LE(next_region2, num_regions_);
+      // Move the cyclic allocation region marker to the region
+      // following the large region that was just allocated.
+      cyclic_alloc_region_index_ = next_region2 % num_regions_;
+      return region2;
+    }
+  } else {
+    // Try to find a range of free regions within [0, num_regions_).
+    mirror::Object* region = AllocLargeInRange<kForEvac>(0,
+                                                         num_regions_,
+                                                         num_regs,
+                                                         bytes_allocated,
+                                                         usable_size,
+                                                         bytes_tl_bulk_allocated);
+    if (region != nullptr) {
+      return region;
+    }
+  }
+  return nullptr;
+}
+
+template<bool kForEvac>
+inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
+                                                      size_t end,
+                                                      size_t num_regs,
+                                                      /* out */ size_t* bytes_allocated,
+                                                      /* out */ size_t* usable_size,
+                                                      /* out */ size_t* bytes_tl_bulk_allocated,
+                                                      /* out */ size_t* next_region) {
+  size_t left = begin;
+  while (left + num_regs - 1 < end) {
     bool found = true;
     size_t right = left;
-    DCHECK_LT(right, left + num_regs)
-        << "The inner loop Should iterate at least once";
+    DCHECK_LT(right, left + num_regs) << "The inner loop should iterate at least once";
     while (right < left + num_regs) {
       if (regions_[right].IsFree()) {
         ++right;
+        // Ensure `right` is not going beyond the past-the-end index of the region space.
+        DCHECK_LE(right, num_regions_);
       } else {
         found = false;
         break;
@@ -303,9 +366,15 @@
         *usable_size = allocated;
       }
       *bytes_tl_bulk_allocated = allocated;
-      return reinterpret_cast<mirror::Object*>(first_reg->Begin());
+      mirror::Object* large_region = reinterpret_cast<mirror::Object*>(first_reg->Begin());
+      DCHECK(large_region != nullptr);
+      if (next_region != nullptr) {
+        // Return the index to the region next to the allocated large region via `next_region`.
+        *next_region = right;
+      }
+      return large_region;
     } else {
-      // right points to the non-free region. Start with the one after it.
+      // `right` points to the non-free region. Start with the one after it.
       left = right + 1;
     }
   }
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 5ea434a..6a01c88 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -92,7 +92,8 @@
       max_peak_num_non_free_regions_(0U),
       non_free_region_index_limit_(0U),
       current_region_(&full_region_),
-      evac_region_(nullptr) {
+      evac_region_(nullptr),
+      cyclic_alloc_region_index_(0U) {
   CHECK_ALIGNED(mem_map->Size(), kRegionSize);
   CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
   DCHECK_GT(num_regions_, 0U);
@@ -437,6 +438,7 @@
     r->Clear(/*zero_and_release_pages*/true);
   }
   SetNonFreeRegionLimit(0);
+  DCHECK_EQ(num_non_free_regions_, 0u);
   current_region_ = &full_region_;
   evac_region_ = &full_region_;
 }
@@ -450,6 +452,9 @@
     return;
   }
   num_regions_ = new_num_regions;
+  if (kCyclicRegionAllocation && cyclic_alloc_region_index_ >= num_regions_) {
+    cyclic_alloc_region_index_ = 0u;
+  }
   SetLimit(Begin() + new_capacity);
   if (Size() > new_capacity) {
     SetEnd(Limit());
@@ -608,7 +613,14 @@
     return nullptr;
   }
   for (size_t i = 0; i < num_regions_; ++i) {
-    Region* r = &regions_[i];
+    // When using the cyclic region allocation strategy, try to
+    // allocate a region starting from the last cyclic allocated
+    // region marker. Otherwise, try to allocate a region starting
+    // from the beginning of the region space.
+    size_t region_index = kCyclicRegionAllocation
+        ? ((cyclic_alloc_region_index_ + i) % num_regions_)
+        : i;
+    Region* r = &regions_[region_index];
     if (r->IsFree()) {
       r->Unfree(this, time_);
       if (for_evac) {
@@ -618,6 +630,11 @@
         r->SetNewlyAllocated();
         ++num_non_free_regions_;
       }
+      if (kCyclicRegionAllocation) {
+        // Move the cyclic allocation region marker to the region
+        // following the one that was just allocated.
+        cyclic_alloc_region_index_ = (region_index + 1) % num_regions_;
+      }
       return r;
     }
   }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 6a1371a..c7e1888 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -31,6 +31,13 @@
 
 namespace space {
 
+// Cyclic region allocation strategy. If `true`, region allocation
+// will not try to allocate a new region from the beginning of the
+// region space, but from the last allocated region. This allocation
+// strategy reduces region reuse and should help catch some GC bugs
+// earlier.
+static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
+
 // A space that consists of equal-sized regions.
 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
  public:
@@ -571,6 +578,23 @@
 
   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
 
+  // Scan region range [`begin`, `end`) in increasing order to try to
+  // allocate a large region having a size of `num_regs` regions. If
+  // there is no space in the region space to allocate this large
+  // region, return null.
+  //
+  // If argument `next_region` is not null, use `*next_region` to
+  // return the index to the region next to the allocated large region
+  // returned by this method.
+  template<bool kForEvac>
+  mirror::Object* AllocLargeInRange(size_t num_regs,
+                                    size_t begin,
+                                    size_t end,
+                                    /* out */ size_t* bytes_allocated,
+                                    /* out */ size_t* usable_size,
+                                    /* out */ size_t* bytes_tl_bulk_allocated,
+                                    /* out */ size_t* next_region = nullptr) REQUIRES(region_lock_);
+
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   uint32_t time_;                  // The time as the number of collections since the startup.
@@ -600,6 +624,11 @@
   Region* evac_region_;            // The region currently used for evacuation.
   Region full_region_;             // The dummy/sentinel region that looks full.
 
+  // Index into the region array pointing to the starting region when
+  // trying to allocate a new region. Only used when
+  // `kCyclicRegionAllocation` is true.
+  size_t cyclic_alloc_region_index_ GUARDED_BY(region_lock_);
+
   // Mark bitmap used by the GC.
   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;