Revert "RegisterNativeAllocation: Avoid case of double blocking gc."

This reverts commit 8df0f36b4fd5db6da67066da62eccc1e0b81e028.

004-NativeAllocations test fails on art-gss-gc-tlab configuration.

Change-Id: I0fb0969c8e4af0bcd5f8481ce828ac4cf258c089
Bug: 36851903
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index ef99673..a450a75 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -299,7 +299,7 @@
   objects_moved_.StoreRelaxed(0);
   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
   if (gc_cause == kGcCauseExplicit ||
-      gc_cause == kGcCauseForNativeAllocBlocking ||
+      gc_cause == kGcCauseForNativeAlloc ||
       gc_cause == kGcCauseCollectorTransition ||
       GetCurrentIteration()->GetClearSoftReferences()) {
     force_evacuate_all_ = true;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index fae8b2b..41e6051 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -192,7 +192,7 @@
   RevokeAllThreadLocalBuffers();
   if (generational_) {
     if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
-        GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
+        GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
         GetCurrentIteration()->GetClearSoftReferences()) {
       // If an explicit, native allocation-triggered, or last attempt
       // collection, collect the whole heap.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 2bbc86e..c35ec7c 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -29,7 +29,7 @@
     case kGcCauseBackground: return "Background";
     case kGcCauseExplicit: return "Explicit";
     case kGcCauseForNativeAlloc: return "NativeAlloc";
-    case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
+    case kGcCauseForNativeAllocBackground: return "NativeAllocBackground";
     case kGcCauseCollectorTransition: return "CollectorTransition";
     case kGcCauseDisableMovingGc: return "DisableMovingGc";
     case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index b8cf3c4..41c8943 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -31,12 +31,10 @@
   kGcCauseBackground,
   // An explicit System.gc() call.
   kGcCauseExplicit,
-  // GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
-  // (This may be a blocking GC depending on whether we run a non-concurrent collector).
+  // GC triggered for a native allocation.
   kGcCauseForNativeAlloc,
-  // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
-  // (This is always a blocking GC).
-  kGcCauseForNativeAllocBlocking,
+  // Background GC triggered for a native allocation.
+  kGcCauseForNativeAllocBackground,
   // GC triggered for a collector transition.
   kGcCauseCollectorTransition,
   // Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 668fb4b..298336a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -563,7 +563,6 @@
   native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
   native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
                                                        *native_blocking_gc_lock_));
-  native_blocking_gc_is_assigned_ = false;
   native_blocking_gc_in_progress_ = false;
   native_blocking_gcs_finished_ = 0;
 
@@ -2696,10 +2695,6 @@
     // old_native_bytes_allocated_ now that GC has been triggered, resetting
     // new_native_bytes_allocated_ to zero in the process.
     old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
-    if (gc_cause == kGcCauseForNativeAllocBlocking) {
-      MutexLock mu(self, *native_blocking_gc_lock_);
-      native_blocking_gc_in_progress_ = true;
-    }
   }
 
   DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3531,7 +3526,6 @@
     // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
     if (cause == kGcCauseForAlloc ||
         cause == kGcCauseForNativeAlloc ||
-        cause == kGcCauseForNativeAllocBlocking ||
         cause == kGcCauseDisableMovingGc) {
       VLOG(gc) << "Starting a blocking GC " << cause;
     }
@@ -3933,36 +3927,33 @@
         // finish before addressing the fact that we exceeded the blocking
         // watermark again.
         do {
-          ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
           native_blocking_gc_cond_->Wait(self);
         } while (native_blocking_gcs_finished_ == initial_gcs_finished);
         initial_gcs_finished++;
       }
 
       // It's possible multiple threads have seen that we exceeded the
-      // blocking watermark. Ensure that only one of those threads is assigned
-      // to run the blocking GC. The rest of the threads should instead wait
-      // for the blocking GC to complete.
+      // blocking watermark. Ensure that only one of those threads runs the
+      // blocking GC. The rest of the threads should instead wait for the
+      // blocking GC to complete.
       if (native_blocking_gcs_finished_ == initial_gcs_finished) {
-        if (native_blocking_gc_is_assigned_) {
+        if (native_blocking_gc_in_progress_) {
           do {
-            ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
             native_blocking_gc_cond_->Wait(self);
           } while (native_blocking_gcs_finished_ == initial_gcs_finished);
         } else {
-          native_blocking_gc_is_assigned_ = true;
+          native_blocking_gc_in_progress_ = true;
           run_gc = true;
         }
       }
     }
 
     if (run_gc) {
-      CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
+      CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
       RunFinalization(env, kNativeAllocationFinalizeTimeout);
       CHECK(!env->ExceptionCheck());
 
       MutexLock mu(self, *native_blocking_gc_lock_);
-      native_blocking_gc_is_assigned_ = false;
       native_blocking_gc_in_progress_ = false;
       native_blocking_gcs_finished_++;
       native_blocking_gc_cond_->Broadcast(self);
@@ -3971,7 +3962,7 @@
     // Trigger another GC because there have been enough native bytes
     // allocated since the last GC.
     if (IsGcConcurrent()) {
-      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true);
     } else {
       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
     }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7287178..aa123d8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1237,20 +1237,10 @@
   // old_native_bytes_allocated_ and new_native_bytes_allocated_.
   Atomic<size_t> old_native_bytes_allocated_;
 
-  // Used for synchronization when multiple threads call into
-  // RegisterNativeAllocation and require blocking GC.
-  // * If a previous blocking GC is in progress, all threads will wait for
-  // that GC to complete, then wait for one of the threads to complete another
-  // blocking GC.
-  // * If a blocking GC is assigned but not in progress, a thread has been
-  // assigned to run a blocking GC but has not started yet. Threads will wait
-  // for the assigned blocking GC to complete.
-  // * If a blocking GC is not assigned nor in progress, the first thread will
-  // run a blocking GC and signal to other threads that blocking GC has been
-  // assigned.
+  // Used for synchronization of blocking GCs triggered by
+  // RegisterNativeAllocation.
   Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
-  bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
   bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
   uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);