Fix cases where we miss instrumentation changes

Moved allocation stack push to after we record the allocation since
it can cause thread suspension.

Added handling in entrypoint utils for thread suspension cases.

Keep the AllocRecordObjectMap around since we do not want to delete
it if there are any threads waiting on new_record_condition_. The
condition guards adding stack traces while the GC is running. If we
delete the map and there are still waiters that did not resume, it
caused a CHECK failure. This could happen in cases where one thread
disables allocation tracking while other threads are about to
resume from the condition.

Bug: 27506909

(cherry picked from commit 0f394021d6abfdf9ebea6c7f405ec936a812ea62)

Change-Id: I1dc51e9f000684b4032b57beab59d317ece26f06
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 7e7d904..3e6b453 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -173,7 +173,10 @@
     if (klass == nullptr) {
       return nullptr;
     }
-    return klass->Alloc<kInstrumented>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+    // CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
+    return klass->Alloc</*kInstrumented*/true>(
+        self,
+        Runtime::Current()->GetHeap()->GetCurrentAllocator());
   }
   DCHECK(klass != nullptr);
   return klass->Alloc<kInstrumented>(self, allocator_type);
@@ -194,7 +197,9 @@
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // Pass in false since the object cannot be finalizable.
-    return klass->Alloc<kInstrumented, false>(self, heap->GetCurrentAllocator());
+    // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
+    // instrumented.
+    return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator());
   }
   // Pass in false since the object cannot be finalizable.
   return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -265,9 +270,12 @@
       return nullptr;
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
-                                               klass->GetComponentSizeShift(),
-                                               heap->GetCurrentAllocator());
+    // CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
+    return mirror::Array::Alloc</*kInstrumented*/true>(self,
+                                                       klass,
+                                                       component_count,
+                                                       klass->GetComponentSizeShift(),
+                                                       heap->GetCurrentAllocator());
   }
   return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
                                              klass->GetComponentSizeShift(), allocator_type);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4672483..7319045 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -92,7 +92,7 @@
 }
 
 AllocRecordObjectMap::~AllocRecordObjectMap() {
-  STLDeleteValues(&entries_);
+  Clear();
 }
 
 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
@@ -223,7 +223,11 @@
       if (heap->IsAllocTrackingEnabled()) {
         return;  // Already enabled, bail.
       }
-      AllocRecordObjectMap* records = new AllocRecordObjectMap();
+      AllocRecordObjectMap* records = heap->GetAllocationRecords();
+      if (records == nullptr) {
+        records = new AllocRecordObjectMap;
+        heap->SetAllocationRecords(records);
+      }
       CHECK(records != nullptr);
       records->SetProperties();
       std::string self_name;
@@ -237,7 +241,6 @@
       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
                 << records->max_stack_depth_ << " frames, taking up to "
                 << PrettySize(sz * records->alloc_record_max_) << ")";
-      heap->SetAllocationRecords(records);
     }
     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
     {
@@ -247,7 +250,6 @@
   } else {
     // Delete outside of the critical section to avoid possible lock violations like the runtime
     // shutdown lock.
-    std::unique_ptr<AllocRecordObjectMap> map;
     {
       MutexLock mu(self, *Locks::alloc_tracker_lock_);
       if (!heap->IsAllocTrackingEnabled()) {
@@ -255,7 +257,8 @@
       }
       heap->SetAllocTrackingEnabled(false);
       LOG(INFO) << "Disabling alloc tracker";
-      map = heap->ReleaseAllocationRecords();
+      AllocRecordObjectMap* records = heap->GetAllocationRecords();
+      records->Clear();
     }
     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
@@ -303,5 +306,10 @@
   DCHECK_LE(records->Size(), records->alloc_record_max_);
 }
 
+void AllocRecordObjectMap::Clear() {
+  STLDeleteValues(&entries_);
+  entries_.clear();
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index ffdfd31..18cce4d 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -306,6 +306,8 @@
     return entries_.rend();
   }
 
+  void Clear() REQUIRES(Locks::alloc_tracker_lock_);
+
  private:
   static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
   static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d7023d8..59fd4a6 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -174,9 +174,6 @@
   } else {
     DCHECK(!Runtime::Current()->HasStatsEnabled());
   }
-  if (AllocatorHasAllocationStack(allocator)) {
-    PushOnAllocationStack(self, &obj);
-  }
   if (kInstrumented) {
     if (IsAllocTrackingEnabled()) {
       // Use obj->GetClass() instead of klass, because PushOnAllocationStack() could move klass
@@ -185,6 +182,9 @@
   } else {
     DCHECK(!IsAllocTrackingEnabled());
   }
+  if (AllocatorHasAllocationStack(allocator)) {
+    PushOnAllocationStack(self, &obj);
+  }
   if (kInstrumented) {
     if (gc_stress_mode_) {
       CheckGcStressMode(self, &obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e5b599..f4fccee 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3940,10 +3940,6 @@
   allocation_records_.reset(records);
 }
 
-std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() {
-  return std::move(allocation_records_);
-}
-
 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
   if (IsAllocTrackingEnabled()) {
     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e0a53a0..9eda422 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -766,10 +766,6 @@
     return allocation_records_.get();
   }
 
-  // Release ownership of the allocation records.
-  std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords()
-      REQUIRES(Locks::alloc_tracker_lock_);
-
   void SetAllocationRecords(AllocRecordObjectMap* records)
       REQUIRES(Locks::alloc_tracker_lock_);