Handle OOM situation in java-heap more aggressively

Currently we perform several rounds of GCs before throwing out-of-memory
exception. Furthermore, even if a small amount of memory is reclaimed,
we call it success. This leads to janky app behavior and timeouts
during finalization.
In this change we perform the next GC type, and if that doesn't reclaim a
minimum amount of free space, then perform a final GC cycle, which is
full-heap, including soft-references and evacuates all regions to be
thorough. If that also failes, then we throw OOME.

Bug: 144525957
Test: art/test/testrunner/testrunner.py
Change-Id: I34af2250661f886c8c2ba32637ee37886e2021d7
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bb9ad3b..b919cdd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1738,6 +1738,9 @@
                                              size_t* usable_size,
                                              size_t* bytes_tl_bulk_allocated,
                                              ObjPtr<mirror::Class>* klass) {
+  // After a GC (due to allocation failure) we should retrieve at least this
+  // fraction of the current max heap size. Otherwise throw OOME.
+  constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
   bool was_default_allocator = allocator == GetCurrentAllocator();
   // Make sure there is no pending exception since we may need to throw an OOME.
   self->AssertNoPendingException();
@@ -1782,50 +1785,36 @@
     }
   }
 
+  auto have_reclaimed_enough = [&]() {
+    size_t curr_bytes_allocated = GetBytesAllocated();
+    double curr_free_heap =
+        static_cast<double>(growth_limit_ - curr_bytes_allocated) / growth_limit_;
+    return curr_free_heap >= kMinFreeHeapAfterGcForAlloc;
+  };
+  // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
+  // if it's not already tried. If that doesn't succeed then go for the most
+  // exhaustive option. Perform a full-heap collection including clearing
+  // SoftReferences. In case of ConcurrentCopying, it will also ensure that
+  // all regions are evacuated. If allocation doesn't succeed even after that
+  // then there is no hope, so we throw OOME.
   collector::GcType tried_type = next_gc_type_;
-  const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
-      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+  if (last_gc < tried_type) {
+    const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+        CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
 
-  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
-      (!instrumented && EntrypointsInstrumented())) {
-    return nullptr;
-  }
-  if (gc_ran) {
-    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
-                                                     usable_size, bytes_tl_bulk_allocated);
-    if (ptr != nullptr) {
-      return ptr;
-    }
-  }
-
-  // Loop through our different Gc types and try to Gc until we get enough free memory.
-  for (collector::GcType gc_type : gc_plan_) {
-    if (gc_type == tried_type) {
-      continue;
-    }
-    // Attempt to run the collector, if we succeed, re-try the allocation.
-    const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
-        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
         (!instrumented && EntrypointsInstrumented())) {
       return nullptr;
     }
-    if (plan_gc_ran) {
-      // Did we free sufficient memory for the allocation to succeed?
-      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+    if (gc_ran && have_reclaimed_enough()) {
+      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
+                                                       alloc_size, bytes_allocated,
                                                        usable_size, bytes_tl_bulk_allocated);
       if (ptr != nullptr) {
         return ptr;
       }
     }
   }
-  // Allocations have failed after GCs;  this is an exceptional state.
-  // Try harder, growing the heap if necessary.
-  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
-                                                  usable_size, bytes_tl_bulk_allocated);
-  if (ptr != nullptr) {
-    return ptr;
-  }
   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
   // VM spec requires that all SoftReferences have been collected and cleared before throwing
@@ -1840,8 +1829,12 @@
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
   }
-  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
-                                  bytes_tl_bulk_allocated);
+  mirror::Object* ptr = nullptr;
+  if (have_reclaimed_enough()) {
+    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+                                    usable_size, bytes_tl_bulk_allocated);
+  }
+
   if (ptr == nullptr) {
     const uint64_t current_time = NanoTime();
     switch (allocator) {