Avoid suspending for alloc trace enabling when already suspended.

Bug: 17499772
Change-Id: Id98c10967b28e8859e5ac46f5878c304fb85c498
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 3af6ad9..fae8ba9 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -877,6 +877,10 @@
     DCHECK(heap_bitmap_lock_ == nullptr);
     heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
     DCHECK(runtime_shutdown_lock_ == nullptr);
     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
@@ -885,10 +889,6 @@
     DCHECK(profiler_lock_ == nullptr);
     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
 
-    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", current_lock_level);
-
     UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
     DCHECK(deoptimization_lock_ == nullptr);
     deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index dfeda8d..013c078 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -92,12 +92,12 @@
   kBreakpointInvokeLock,
   kAllocTrackerLock,
   kDeoptimizationLock,
-  kTraceLock,
   kProfilerLock,
   kJdwpEventListLock,
   kJdwpAttachLock,
   kJdwpStartLock,
   kRuntimeShutdownLock,
+  kTraceLock,
   kHeapBitmapLock,
   kMutatorLock,
   kThreadListSuspendThreadLock,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 37cabea..69da0a2 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4295,7 +4295,7 @@
       recent_allocation_records_ = new AllocRecord[alloc_record_max_];
       CHECK(recent_allocation_records_ != NULL);
     }
-    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
   } else {
     {
       ScopedObjectAccess soa(self);  // For type_cache_.Clear();
@@ -4311,7 +4311,7 @@
       type_cache_.Clear();
     }
     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
-    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(false);
   }
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 203aaff..dfe9e29 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -423,7 +423,7 @@
     }
   }
   if (running_on_valgrind_) {
-    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
   }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() exiting";
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 0f45b9e..a2e88a6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -597,10 +597,13 @@
   thread->ResetQuickAllocEntryPointsForThread();
 }
 
-void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
+void Instrumentation::SetEntrypointsInstrumented(bool instrumented, bool suspended) {
   Runtime* runtime = Runtime::Current();
   ThreadList* tl = runtime->GetThreadList();
-  if (runtime->IsStarted()) {
+  if (suspended) {
+    Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+  }
+  if (runtime->IsStarted() && !suspended) {
     tl->SuspendAll();
   }
   {
@@ -608,30 +611,30 @@
     SetQuickAllocEntryPointsInstrumented(instrumented);
     ResetQuickAllocEntryPoints();
   }
-  if (runtime->IsStarted()) {
+  if (runtime->IsStarted() && !suspended) {
     tl->ResumeAll();
   }
 }
 
-void Instrumentation::InstrumentQuickAllocEntryPoints() {
+void Instrumentation::InstrumentQuickAllocEntryPoints(bool suspended) {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
   DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
   const bool enable_instrumentation =
       quick_alloc_entry_points_instrumentation_counter_.FetchAndAddSequentiallyConsistent(1) == 0;
   if (enable_instrumentation) {
-    SetEntrypointsInstrumented(true);
+    SetEntrypointsInstrumented(true, suspended);
   }
 }
 
-void Instrumentation::UninstrumentQuickAllocEntryPoints() {
+void Instrumentation::UninstrumentQuickAllocEntryPoints(bool suspended) {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
   DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
   const bool disable_instrumentation =
       quick_alloc_entry_points_instrumentation_counter_.FetchAndSubSequentiallyConsistent(1) == 1;
   if (disable_instrumentation) {
-    SetEntrypointsInstrumented(false);
+    SetEntrypointsInstrumented(false, suspended);
   }
 }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d05cee5..3c1c756 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -182,10 +182,10 @@
     return interpreter_handler_table_;
   }
 
-  void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                                                        Locks::runtime_shutdown_lock_);
-  void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                                                          Locks::runtime_shutdown_lock_);
+  void InstrumentQuickAllocEntryPoints(bool suspended)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+  void UninstrumentQuickAllocEntryPoints(bool suspended)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
   void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
 
   // Update the code of a method respecting any installed stubs.
@@ -350,7 +350,7 @@
 
   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
   // exclusive access to mutator lock which you can't get if the runtime isn't started.
-  void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
+  void SetEntrypointsInstrumented(bool instrumented, bool suspended) NO_THREAD_SAFETY_ANALYSIS;
 
   void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
                             mirror::ArtMethod* method, uint32_t dex_pc) const
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff206..d8a537f 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -60,11 +60,11 @@
 }
 
 static void VMDebug_startAllocCounting(JNIEnv*, jclass) {
-  Runtime::Current()->SetStatsEnabled(true);
+  Runtime::Current()->SetStatsEnabled(true, false);
 }
 
 static void VMDebug_stopAllocCounting(JNIEnv*, jclass) {
-  Runtime::Current()->SetStatsEnabled(false);
+  Runtime::Current()->SetStatsEnabled(false, false);
 }
 
 static jint VMDebug_getAllocCount(JNIEnv*, jclass, jint kind) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index c62603d..60d641e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -998,14 +998,14 @@
   }
 }
 
-void Runtime::SetStatsEnabled(bool new_state) {
+void Runtime::SetStatsEnabled(bool new_state, bool suspended) {
   if (new_state == true) {
     GetStats()->Clear(~0);
     // TODO: wouldn't it make more sense to clear _all_ threads' stats?
     Thread::Current()->GetStats()->Clear(~0);
-    GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    GetInstrumentation()->InstrumentQuickAllocEntryPoints(suspended);
   } else {
-    GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+    GetInstrumentation()->UninstrumentQuickAllocEntryPoints(suspended);
   }
   stats_enabled_ = new_state;
 }
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a868e2c..96924ec 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -385,7 +385,7 @@
 
   void ResetStats(int kinds);
 
-  void SetStatsEnabled(bool new_state);
+  void SetStatsEnabled(bool new_state, bool suspended);
 
   enum class NativeBridgeAction {  // private
     kUnload,
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 6dcc5fe..b32e042 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -373,11 +373,9 @@
 
       // Enable count of allocs if specified in the flags.
       if ((flags && kTraceCountAllocs) != 0) {
-        runtime->SetStatsEnabled(true);
+        runtime->SetStatsEnabled(true, true);
       }
 
-
-
       if (sampling_enabled) {
         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
                                             reinterpret_cast<void*>(interval_us)),
@@ -492,7 +490,7 @@
   size_t final_offset = cur_offset_.LoadRelaxed();
 
   if ((flags_ & kTraceCountAllocs) != 0) {
-    Runtime::Current()->SetStatsEnabled(false);
+    Runtime::Current()->SetStatsEnabled(false, true);
   }
 
   std::set<mirror::ArtMethod*> visited_methods;