Fix allocation tracking race

Check if changed from uninstrumented to instrumented during GC for
alloc. If we changed, retry the allocation with kInstrumented = true.

Added stress test.

Bug: 27337759

(cherry picked from commit eebc3af4453f5c1fb5fd80c710cfd49566080d28)

Change-Id: I8fa50975b558199fcf142c8555476053001ace50
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index f437830..d7023d8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -109,16 +109,25 @@
     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
                                               &usable_size, &bytes_tl_bulk_allocated);
     if (UNLIKELY(obj == nullptr)) {
-      bool is_current_allocator = allocator == GetCurrentAllocator();
-      obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
+      // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
+      // or changes the allocator in a suspend point here, we need to retry the allocation.
+      obj = AllocateInternalWithGc(self,
+                                   allocator,
+                                   kInstrumented,
+                                   byte_count,
+                                   &bytes_allocated,
+                                   &usable_size,
                                    &bytes_tl_bulk_allocated, &klass);
       if (obj == nullptr) {
-        bool after_is_current_allocator = allocator == GetCurrentAllocator();
-        // If there is a pending exception, fail the allocation right away since the next one
-        // could cause OOM and abort the runtime.
-        if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
-          // If the allocator changed, we need to restart the allocation.
-          return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
+        // The only way that we can get a null return if there is no pending exception is if the
+        // allocator or instrumentation changed.
+        if (!self->IsExceptionPending()) {
+          // AllocObject will pick up the new allocator type, and instrumented as true is the safe
+          // default.
+          return AllocObject</*kInstrumented*/true>(self,
+                                                    klass,
+                                                    byte_count,
+                                                    pre_fence_visitor);
         }
         return nullptr;
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a656fb8..81a8831 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1650,8 +1650,15 @@
   return nullptr;
 }
 
+static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
+  instrumentation::Instrumentation* const instrumentation =
+      Runtime::Current()->GetInstrumentation();
+  return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
+}
+
 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
                                              AllocatorType allocator,
+                                             bool instrumented,
                                              size_t alloc_size,
                                              size_t* bytes_allocated,
                                              size_t* usable_size,
@@ -1667,12 +1674,13 @@
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
+  // If we were the default allocator but the allocator changed while we were suspended,
+  // abort the allocation.
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
+    return nullptr;
+  }
   if (last_gc != collector::kGcTypeNone) {
-    // If we were the default allocator but the allocator changed while we were suspended,
-    // abort the allocation.
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      return nullptr;
-    }
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                      usable_size, bytes_tl_bulk_allocated);
@@ -1684,7 +1692,8 @@
   collector::GcType tried_type = next_gc_type_;
   const bool gc_ran =
       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
-  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
   }
   if (gc_ran) {
@@ -1703,7 +1712,8 @@
     // Attempt to run the collector, if we succeed, re-try the allocation.
     const bool plan_gc_ran =
         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
+    if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+        (!instrumented && EntrypointsInstrumented())) {
       return nullptr;
     }
     if (plan_gc_ran) {
@@ -1732,7 +1742,8 @@
   // We don't need a WaitForGcToComplete here either.
   DCHECK(!gc_plan_.empty());
   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
-  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+      (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
@@ -1748,6 +1759,11 @@
             min_interval_homogeneous_space_compaction_by_oom_) {
           last_time_homogeneous_space_compaction_by_oom_ = current_time;
           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+          // Thread suspension could have occurred.
+          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+              (!instrumented && EntrypointsInstrumented())) {
+            return nullptr;
+          }
           switch (result) {
             case HomogeneousSpaceCompactResult::kSuccess:
               // If the allocation succeeded, we delayed an oom.
@@ -1788,6 +1804,11 @@
           // If we aren't out of memory then the OOM was probably from the non moving space being
           // full. Attempt to disable compaction and turn the main space into a non moving space.
           DisableMovingGc();
+          // Thread suspension could have occurred.
+          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
+              (!instrumented && EntrypointsInstrumented())) {
+            return nullptr;
+          }
           // If we are still a moving GC then something must have caused the transition to fail.
           if (IsMovingGc(collector_type_)) {
             MutexLock mu(self, *gc_complete_lock_);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a181e23..eec8867 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -862,6 +862,7 @@
   // an initial allocation attempt failed.
   mirror::Object* AllocateInternalWithGc(Thread* self,
                                          AllocatorType allocator,
+                                         bool instrumented,
                                          size_t num_bytes,
                                          size_t* bytes_allocated,
                                          size_t* usable_size,
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e3cbf53..fa2f038 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -414,6 +414,12 @@
                                size_t inlined_frames_before_frame)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  // Does not hold lock, used to check if someone changed from not instrumented to instrumented
+  // during a GC suspend point.
+  bool AllocEntrypointsInstrumented() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    return quick_alloc_entry_points_instrumentation_counter_ > 0;
+  }
+
  private:
   InstrumentationLevel GetCurrentInstrumentationLevel() const;
 
@@ -567,9 +573,7 @@
   InterpreterHandlerTable interpreter_handler_table_ GUARDED_BY(Locks::mutator_lock_);
 
   // Greater than 0 if quick alloc entry points instrumented.
-  size_t quick_alloc_entry_points_instrumentation_counter_
-      GUARDED_BY(Locks::instrument_entrypoints_lock_);
-
+  size_t quick_alloc_entry_points_instrumentation_counter_;
   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
 
   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
diff --git a/test/145-alloc-tracking-stress/expected.txt b/test/145-alloc-tracking-stress/expected.txt
new file mode 100644
index 0000000..134d8d0
--- /dev/null
+++ b/test/145-alloc-tracking-stress/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/145-alloc-tracking-stress/info.txt b/test/145-alloc-tracking-stress/info.txt
new file mode 100644
index 0000000..443062d
--- /dev/null
+++ b/test/145-alloc-tracking-stress/info.txt
@@ -0,0 +1 @@
+Regression test for b/18661622
diff --git a/test/145-alloc-tracking-stress/src/Main.java b/test/145-alloc-tracking-stress/src/Main.java
new file mode 100644
index 0000000..752fdd9
--- /dev/null
+++ b/test/145-alloc-tracking-stress/src/Main.java
@@ -0,0 +1,74 @@
+/*
+
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Map;
+
+public class Main implements Runnable {
+    static final int numberOfThreads = 4;
+    static final int totalOperations = 1000;
+    static Method enableAllocTrackingMethod;
+    static Object holder;
+    static volatile boolean trackingThreadDone = false;
+    int threadIndex;
+
+    Main(int index) {
+        threadIndex = index;
+    }
+
+    public static void main(String[] args) throws Exception {
+      Class klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+      if (klass == null) {
+          throw new AssertionError("Couldn't find DdmVmInternal class");
+      }
+      enableAllocTrackingMethod = klass.getDeclaredMethod("enableRecentAllocations",
+              Boolean.TYPE);
+      if (enableAllocTrackingMethod == null) {
+          throw new AssertionError("Couldn't find enableRecentAllocations method");
+      }
+
+      final Thread[] threads = new Thread[numberOfThreads];
+      for (int t = 0; t < threads.length; t++) {
+          threads[t] = new Thread(new Main(t));
+          threads[t].start();
+      }
+      for (Thread t : threads) {
+          t.join();
+      }
+      System.out.println("Finishing");
+    }
+
+    public void run() {
+        if (threadIndex == 0) {
+            for (int i = 0; i < totalOperations; ++i) {
+                try {
+                    enableAllocTrackingMethod.invoke(null, true);
+                    holder = new Object();
+                    enableAllocTrackingMethod.invoke(null, false);
+                } catch (Exception e) {
+                    System.out.println(e);
+                    return;
+                }
+            }
+            trackingThreadDone = true;
+        } else {
+            while (!trackingThreadDone) {
+                holder = new Object();
+            }
+        }
+    }
+}