Add and use ScopedSuspendAll

Usage replaces most SuspendAll and ResumeAll calls.

Change-Id: I355683a5365876242cea85a656dcb58455f7a294
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9f05e64..4c1408a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1140,23 +1140,20 @@
   if (IsImage()) {
     TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
 
-    Runtime* current = Runtime::Current();
+    Runtime* runtime = Runtime::Current();
 
     // Suspend all threads.
-    current->GetThreadList()->SuspendAll(__FUNCTION__);
+    ScopedSuspendAll ssa(__FUNCTION__);
 
     std::string error_msg;
     std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
                                                                         Thread::Current(),
-                                                                        current->GetClassLinker(),
+                                                                        runtime->GetClassLinker(),
                                                                         &error_msg));
     CHECK(update.get() != nullptr) << error_msg;  // TODO: Soft failure?
 
     // Do the marking.
     update->Walk();
-
-    // Resume threads.
-    current->GetThreadList()->ResumeAll();
   }
 }
 
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index aa4cf55..e248604 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1606,10 +1606,8 @@
       // stack. Need to revoke the thread-local allocation stacks that
       // point into it.
       ScopedThreadSuspension sts(self, kNative);
-      ThreadList* thread_list = Runtime::Current()->GetThreadList();
-      thread_list->SuspendAll(__FUNCTION__);
+      ScopedSuspendAll ssa(__FUNCTION__);
       heap->RevokeAllThreadLocalAllocationStacks(self);
-      thread_list->ResumeAll();
     }
     {
       // Mark dex caches.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 72226af..d5691af 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -578,7 +578,7 @@
   }
 
   Runtime* runtime = Runtime::Current();
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+  ScopedSuspendAll ssa(__FUNCTION__);
   Thread* self = Thread::Current();
   ThreadState old_state = self->SetStateUnsafe(kRunnable);
   CHECK_NE(old_state, kRunnable);
@@ -588,8 +588,6 @@
   instrumentation_events_ = 0;
   gDebuggerActive = true;
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-  runtime->GetThreadList()->ResumeAll();
-
   LOG(INFO) << "Debugger is active";
 }
 
@@ -602,32 +600,32 @@
   // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
   // and clear the object registry.
   Runtime* runtime = Runtime::Current();
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
   Thread* self = Thread::Current();
-  ThreadState old_state = self->SetStateUnsafe(kRunnable);
-
-  // Debugger may not be active at this point.
-  if (IsDebuggerActive()) {
-    {
-      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
-      // This prevents us from having any pending deoptimization request when the debugger attaches
-      // to us again while no event has been requested yet.
-      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
-      deoptimization_requests_.clear();
-      full_deoptimization_event_count_ = 0U;
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
+    ThreadState old_state = self->SetStateUnsafe(kRunnable);
+    // Debugger may not be active at this point.
+    if (IsDebuggerActive()) {
+      {
+        // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
+        // This prevents us from having any pending deoptimization request when the debugger attaches
+        // to us again while no event has been requested yet.
+        MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+        deoptimization_requests_.clear();
+        full_deoptimization_event_count_ = 0U;
+      }
+      if (instrumentation_events_ != 0) {
+        runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
+                                                      instrumentation_events_);
+        instrumentation_events_ = 0;
+      }
+      if (RequiresDeoptimization()) {
+        runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
+      }
+      gDebuggerActive = false;
     }
-    if (instrumentation_events_ != 0) {
-      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
-                                                    instrumentation_events_);
-      instrumentation_events_ = 0;
-    }
-    if (RequiresDeoptimization()) {
-      runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
-    }
-    gDebuggerActive = false;
+    CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
   }
-  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-  runtime->GetThreadList()->ResumeAll();
 
   {
     ScopedObjectAccess soa(self);
@@ -751,9 +749,8 @@
   MonitorInfo monitor_info;
   {
     ScopedThreadSuspension sts(self, kSuspended);
-    Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+    ScopedSuspendAll ssa(__FUNCTION__);
     monitor_info = MonitorInfo(o);
-    Runtime::Current()->GetThreadList()->ResumeAll();
   }
   if (monitor_info.owner_ != nullptr) {
     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
@@ -3161,8 +3158,7 @@
   CHECK_EQ(self->GetState(), kRunnable);
   ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
   // We need to suspend mutator threads first.
-  Runtime* const runtime = Runtime::Current();
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+  ScopedSuspendAll ssa(__FUNCTION__);
   const ThreadState old_state = self->SetStateUnsafe(kRunnable);
   {
     MutexLock mu(self, *Locks::deoptimization_lock_);
@@ -3174,7 +3170,6 @@
     deoptimization_requests_.clear();
   }
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-  runtime->GetThreadList()->ResumeAll();
 }
 
 static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
@@ -4724,13 +4719,9 @@
         // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
         // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
         ScopedThreadSuspension sts(self, kSuspended);
-        ThreadList* tl = Runtime::Current()->GetThreadList();
-        tl->SuspendAll(__FUNCTION__);
-        {
-          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-          space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
-        }
-        tl->ResumeAll();
+        ScopedSuspendAll ssa(__FUNCTION__);
+        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
       } else if (space->IsBumpPointerSpace()) {
         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
         context.SetChunkOverhead(0);
@@ -4740,13 +4731,11 @@
         heap->IncrementDisableMovingGC(self);
         {
           ScopedThreadSuspension sts(self, kSuspended);
-          ThreadList* tl = Runtime::Current()->GetThreadList();
-          tl->SuspendAll(__FUNCTION__);
+          ScopedSuspendAll ssa(__FUNCTION__);
           ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
           context.SetChunkOverhead(0);
           space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
           HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
-          tl->ResumeAll();
         }
         heap->DecrementDisableMovingGC(self);
       } else {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7ddc7cc..089f453 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -421,12 +421,11 @@
           if (heap_bitmap_exclusive_locked) {
             Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
           }
-          Locks::mutator_lock_->SharedUnlock(self);
-          ThreadList* tl = Runtime::Current()->GetThreadList();
-          tl->SuspendAll(__FUNCTION__);
-          mark_sweep_->VerifyRoots();
-          tl->ResumeAll();
-          Locks::mutator_lock_->SharedLock(self);
+          {
+            ScopedThreadSuspension(self, kSuspended);
+            ScopedSuspendAll ssa(__FUNCTION__);
+            mark_sweep_->VerifyRoots();
+          }
           if (heap_bitmap_exclusive_locked) {
             Locks::heap_bitmap_lock_->ExclusiveLock(self);
           }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9292c7a..cfe7713 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -639,10 +639,9 @@
     background_collector_type_ = foreground_collector_type_;
   }
   TransitionCollector(foreground_collector_type_);
-  ThreadList* tl = Runtime::Current()->GetThreadList();
-  Thread* self = Thread::Current();
+  Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kSuspended);
-  tl->SuspendAll(__FUNCTION__);
+  ScopedSuspendAll ssa(__FUNCTION__);
   // Something may have caused the transition to fail.
   if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
     CHECK(main_space_ != nullptr);
@@ -657,7 +656,6 @@
     non_moving_space_ = main_space_;
     CHECK(!non_moving_space_->CanMoveObjects());
   }
-  tl->ResumeAll();
 }
 
 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
@@ -889,11 +887,9 @@
     IncrementDisableMovingGC(self);
     {
       ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
-      ThreadList* tl = Runtime::Current()->GetThreadList();
-      tl->SuspendAll(__FUNCTION__);
+      ScopedSuspendAll ssa(__FUNCTION__);
       VisitObjectsInternalRegionSpace(callback, arg);
       VisitObjectsInternal(callback, arg);
-      tl->ResumeAll();
     }
     DecrementDisableMovingGC(self);
   } else {
@@ -1267,12 +1263,13 @@
     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
     // about pauses.
     Runtime* runtime = Runtime::Current();
-    runtime->GetThreadList()->SuspendAll(__FUNCTION__);
-    uint64_t start_time = NanoTime();
-    size_t count = runtime->GetMonitorList()->DeflateMonitors();
-    VLOG(heap) << "Deflating " << count << " monitors took "
-        << PrettyDuration(NanoTime() - start_time);
-    runtime->GetThreadList()->ResumeAll();
+    {
+      ScopedSuspendAll ssa(__FUNCTION__);
+      uint64_t start_time = NanoTime();
+      size_t count = runtime->GetMonitorList()->DeflateMonitors();
+      VLOG(heap) << "Deflating " << count << " monitors took "
+          << PrettyDuration(NanoTime() - start_time);
+    }
     ATRACE_END();
   }
   TrimIndirectReferenceTables(self);
@@ -1749,19 +1746,15 @@
 }
 
 size_t Heap::GetObjectsAllocated() const {
-  Thread* self = Thread::Current();
+  Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
-  auto* tl = Runtime::Current()->GetThreadList();
   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
-  tl->SuspendAll(__FUNCTION__);
+  ScopedSuspendAll ssa(__FUNCTION__);
+  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
   size_t total = 0;
-  {
-    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    for (space::AllocSpace* space : alloc_spaces_) {
-      total += space->GetObjectsAllocated();
-    }
+  for (space::AllocSpace* space : alloc_spaces_) {
+    total += space->GetObjectsAllocated();
   }
-  tl->ResumeAll();
   return total;
 }
 
@@ -1911,7 +1904,6 @@
   // Inc requested homogeneous space compaction.
   count_requested_homogeneous_space_compaction_++;
   // Store performed homogeneous space compaction at a new request arrival.
-  ThreadList* tl = Runtime::Current()->GetThreadList();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
   {
@@ -1938,34 +1930,34 @@
     FinishGC(self, collector::kGcTypeNone);
     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
   }
-  // Suspend all threads.
-  tl->SuspendAll(__FUNCTION__);
-  uint64_t start_time = NanoTime();
-  // Launch compaction.
-  space::MallocSpace* to_space = main_space_backup_.release();
-  space::MallocSpace* from_space = main_space_;
-  to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-  const uint64_t space_size_before_compaction = from_space->Size();
-  AddSpace(to_space);
-  // Make sure that we will have enough room to copy.
-  CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
-  collector::GarbageCollector* collector = Compact(to_space, from_space,
-                                                   kGcCauseHomogeneousSpaceCompact);
-  const uint64_t space_size_after_compaction = to_space->Size();
-  main_space_ = to_space;
-  main_space_backup_.reset(from_space);
-  RemoveSpace(from_space);
-  SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
-  // Update performed homogeneous space compaction count.
-  count_performed_homogeneous_space_compaction_++;
-  // Print statics log and resume all threads.
-  uint64_t duration = NanoTime() - start_time;
-  VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
-             << PrettySize(space_size_before_compaction) << " -> "
-             << PrettySize(space_size_after_compaction) << " compact-ratio: "
-             << std::fixed << static_cast<double>(space_size_after_compaction) /
-             static_cast<double>(space_size_before_compaction);
-  tl->ResumeAll();
+  collector::GarbageCollector* collector;
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
+    uint64_t start_time = NanoTime();
+    // Launch compaction.
+    space::MallocSpace* to_space = main_space_backup_.release();
+    space::MallocSpace* from_space = main_space_;
+    to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+    const uint64_t space_size_before_compaction = from_space->Size();
+    AddSpace(to_space);
+    // Make sure that we will have enough room to copy.
+    CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
+    collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+    const uint64_t space_size_after_compaction = to_space->Size();
+    main_space_ = to_space;
+    main_space_backup_.reset(from_space);
+    RemoveSpace(from_space);
+    SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
+    // Update performed homogeneous space compaction count.
+    count_performed_homogeneous_space_compaction_++;
+    // Print statics log and resume all threads.
+    uint64_t duration = NanoTime() - start_time;
+    VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
+               << PrettySize(space_size_before_compaction) << " -> "
+               << PrettySize(space_size_after_compaction) << " compact-ratio: "
+               << std::fixed << static_cast<double>(space_size_after_compaction) /
+               static_cast<double>(space_size_before_compaction);
+  }
   // Finish GC.
   reference_processor_->EnqueueClearedReferences(self);
   GrowForUtilization(semi_space_collector_);
@@ -1983,7 +1975,6 @@
   uint64_t start_time = NanoTime();
   uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
   Runtime* const runtime = Runtime::Current();
-  ThreadList* const tl = runtime->GetThreadList();
   Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
@@ -2021,84 +2012,91 @@
     return;
   }
   collector::GarbageCollector* collector = nullptr;
-  tl->SuspendAll(__FUNCTION__);
-  switch (collector_type) {
-    case kCollectorTypeSS: {
-      if (!IsMovingGc(collector_type_)) {
-        // Create the bump pointer space from the backup space.
-        CHECK(main_space_backup_ != nullptr);
-        std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
-        // We are transitioning from non moving GC -> moving GC, since we copied from the bump
-        // pointer space last transition it will be protected.
-        CHECK(mem_map != nullptr);
-        mem_map->Protect(PROT_READ | PROT_WRITE);
-        bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
-                                                                        mem_map.release());
-        AddSpace(bump_pointer_space_);
-        collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
-        // Use the now empty main space mem map for the bump pointer temp space.
-        mem_map.reset(main_space_->ReleaseMemMap());
-        // Unset the pointers just in case.
-        if (dlmalloc_space_ == main_space_) {
-          dlmalloc_space_ = nullptr;
-        } else if (rosalloc_space_ == main_space_) {
-          rosalloc_space_ = nullptr;
-        }
-        // Remove the main space so that we don't try to trim it, this doens't work for debug
-        // builds since RosAlloc attempts to read the magic number from a protected page.
-        RemoveSpace(main_space_);
-        RemoveRememberedSet(main_space_);
-        delete main_space_;  // Delete the space since it has been removed.
-        main_space_ = nullptr;
-        RemoveRememberedSet(main_space_backup_.get());
-        main_space_backup_.reset(nullptr);  // Deletes the space.
-        temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                                mem_map.release());
-        AddSpace(temp_space_);
-      }
-      break;
-    }
-    case kCollectorTypeMS:
-      // Fall through.
-    case kCollectorTypeCMS: {
-      if (IsMovingGc(collector_type_)) {
-        CHECK(temp_space_ != nullptr);
-        std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
-        RemoveSpace(temp_space_);
-        temp_space_ = nullptr;
-        mem_map->Protect(PROT_READ | PROT_WRITE);
-        CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
-                              std::min(mem_map->Size(), growth_limit_), mem_map->Size());
-        mem_map.release();
-        // Compact to the main space from the bump pointer space, don't need to swap semispaces.
-        AddSpace(main_space_);
-        collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
-        mem_map.reset(bump_pointer_space_->ReleaseMemMap());
-        RemoveSpace(bump_pointer_space_);
-        bump_pointer_space_ = nullptr;
-        const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-        // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
-        if (kIsDebugBuild && kUseRosAlloc) {
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
+    switch (collector_type) {
+      case kCollectorTypeSS: {
+        if (!IsMovingGc(collector_type_)) {
+          // Create the bump pointer space from the backup space.
+          CHECK(main_space_backup_ != nullptr);
+          std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
+          // We are transitioning from non moving GC -> moving GC, since we copied from the bump
+          // pointer space last transition it will be protected.
+          CHECK(mem_map != nullptr);
           mem_map->Protect(PROT_READ | PROT_WRITE);
+          bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
+                                                                          mem_map.release());
+          AddSpace(bump_pointer_space_);
+          collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+          // Use the now empty main space mem map for the bump pointer temp space.
+          mem_map.reset(main_space_->ReleaseMemMap());
+          // Unset the pointers just in case.
+          if (dlmalloc_space_ == main_space_) {
+            dlmalloc_space_ = nullptr;
+          } else if (rosalloc_space_ == main_space_) {
+            rosalloc_space_ = nullptr;
+          }
+          // Remove the main space so that we don't try to trim it, this doens't work for debug
+          // builds since RosAlloc attempts to read the magic number from a protected page.
+          RemoveSpace(main_space_);
+          RemoveRememberedSet(main_space_);
+          delete main_space_;  // Delete the space since it has been removed.
+          main_space_ = nullptr;
+          RemoveRememberedSet(main_space_backup_.get());
+          main_space_backup_.reset(nullptr);  // Deletes the space.
+          temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+                                                                  mem_map.release());
+          AddSpace(temp_space_);
         }
-        main_space_backup_.reset(CreateMallocSpaceFromMemMap(
-            mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
-            mem_map->Size(), name, true));
-        if (kIsDebugBuild && kUseRosAlloc) {
-          mem_map->Protect(PROT_NONE);
-        }
-        mem_map.release();
+        break;
       }
-      break;
+      case kCollectorTypeMS:
+        // Fall through.
+      case kCollectorTypeCMS: {
+        if (IsMovingGc(collector_type_)) {
+          CHECK(temp_space_ != nullptr);
+          std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+          RemoveSpace(temp_space_);
+          temp_space_ = nullptr;
+          mem_map->Protect(PROT_READ | PROT_WRITE);
+          CreateMainMallocSpace(mem_map.get(),
+                                kDefaultInitialSize,
+                                std::min(mem_map->Size(), growth_limit_),
+                                mem_map->Size());
+          mem_map.release();
+          // Compact to the main space from the bump pointer space, don't need to swap semispaces.
+          AddSpace(main_space_);
+          collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+          mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+          RemoveSpace(bump_pointer_space_);
+          bump_pointer_space_ = nullptr;
+          const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+          // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
+          if (kIsDebugBuild && kUseRosAlloc) {
+            mem_map->Protect(PROT_READ | PROT_WRITE);
+          }
+          main_space_backup_.reset(CreateMallocSpaceFromMemMap(
+              mem_map.get(),
+              kDefaultInitialSize,
+              std::min(mem_map->Size(), growth_limit_),
+              mem_map->Size(),
+              name,
+              true));
+          if (kIsDebugBuild && kUseRosAlloc) {
+            mem_map->Protect(PROT_NONE);
+          }
+          mem_map.release();
+        }
+        break;
+      }
+      default: {
+        LOG(FATAL) << "Attempted to transition to invalid collector type "
+                   << static_cast<size_t>(collector_type);
+        break;
+      }
     }
-    default: {
-      LOG(FATAL) << "Attempted to transition to invalid collector type "
-                 << static_cast<size_t>(collector_type);
-      break;
-    }
+    ChangeCollector(collector_type);
   }
-  ChangeCollector(collector_type);
-  tl->ResumeAll();
   // Can't call into java code with all threads suspended.
   reference_processor_->EnqueueClearedReferences(self);
   uint64_t duration = NanoTime() - start_time;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index d8072ea..49126d2 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -303,17 +303,13 @@
     void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
   // TODO: NO_THREAD_SAFETY_ANALYSIS.
   Thread* self = Thread::Current();
-  ThreadList* tl = Runtime::Current()->GetThreadList();
-  tl->SuspendAll(__FUNCTION__);
-  {
-    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
-    MutexLock mu2(self, *Locks::thread_list_lock_);
-    rosalloc_->InspectAll(callback, arg);
-    if (do_null_callback_at_end) {
-      callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
-    }
+  ScopedSuspendAll ssa(__FUNCTION__);
+  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+  MutexLock mu2(self, *Locks::thread_list_lock_);
+  rosalloc_->InspectAll(callback, arg);
+  if (do_null_callback_at_end) {
+    callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
   }
-  tl->ResumeAll();
 }
 
 void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e2094dc..dfc1f5f 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1403,10 +1403,11 @@
     // comment in Heap::VisitObjects().
     heap->IncrementDisableMovingGC(self);
   }
-  Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__, true /* long suspend */);
-  Hprof hprof(filename, fd, direct_to_ddms);
-  hprof.Dump();
-  Runtime::Current()->GetThreadList()->ResumeAll();
+  {
+    ScopedSuspendAll ssa(__FUNCTION__, true /* long suspend */);
+    Hprof hprof(filename, fd, direct_to_ddms);
+    hprof.Dump();
+  }
   if (heap->IsGcConcurrentAndMoving()) {
     heap->DecrementDisableMovingGC(self);
   }
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 973cd7d..7e2a84d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -610,19 +610,17 @@
 void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
   Thread* self = Thread::Current();
   Runtime* runtime = Runtime::Current();
-  ThreadList* tl = runtime->GetThreadList();
   Locks::mutator_lock_->AssertNotHeld(self);
   Locks::instrument_entrypoints_lock_->AssertHeld(self);
   if (runtime->IsStarted()) {
-    tl->SuspendAll(__FUNCTION__);
-  }
-  {
+    ScopedSuspendAll ssa(__FUNCTION__);
     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
     SetQuickAllocEntryPointsInstrumented(instrumented);
     ResetQuickAllocEntryPoints();
-  }
-  if (runtime->IsStarted()) {
-    tl->ResumeAll();
+  } else {
+    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+    SetQuickAllocEntryPointsInstrumented(instrumented);
+    ResetQuickAllocEntryPoints();
   }
 }
 
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index c7cc68a..d98d246 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -150,13 +150,9 @@
   void CheckConfigureStubs(const char* key, Instrumentation::InstrumentationLevel level) {
     ScopedObjectAccess soa(Thread::Current());
     instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
-    {
-      ScopedThreadSuspension sts(soa.Self(), kSuspended);
-      Runtime* runtime = Runtime::Current();
-      runtime->GetThreadList()->SuspendAll("Instrumentation::ConfigureStubs");
-      instr->ConfigureStubs(key, level);
-      runtime->GetThreadList()->ResumeAll();
-    }
+    ScopedThreadSuspension sts(soa.Self(), kSuspended);
+    ScopedSuspendAll ssa("Instrumentation::ConfigureStubs");
+    instr->ConfigureStubs(key, level);
   }
 
   Instrumentation::InstrumentationLevel GetCurrentInstrumentationLevel() {
@@ -174,10 +170,8 @@
     TestInstrumentationListener listener;
     {
       ScopedThreadSuspension sts(soa.Self(), kSuspended);
-      Runtime* runtime = Runtime::Current();
-      runtime->GetThreadList()->SuspendAll("Add instrumentation listener");
+      ScopedSuspendAll ssa("Add instrumentation listener");
       instr->AddListener(&listener, instrumentation_event);
-      runtime->GetThreadList()->ResumeAll();
     }
 
     ArtMethod* const event_method = nullptr;
@@ -193,10 +187,8 @@
     listener.Reset();
     {
       ScopedThreadSuspension sts(soa.Self(), kSuspended);
-      Runtime* runtime = Runtime::Current();
-      runtime->GetThreadList()->SuspendAll("Remove instrumentation listener");
+      ScopedSuspendAll ssa("Remove instrumentation listener");
       instr->RemoveListener(&listener, instrumentation_event);
-      runtime->GetThreadList()->ResumeAll();
     }
 
     // Check the listener is not registered and is not notified of the event.
@@ -211,12 +203,11 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("Single method deoptimization");
+    ScopedSuspendAll ssa("Single method deoptimization");
     if (enable_deoptimization) {
       instrumentation->EnableDeoptimization();
     }
     instrumentation->Deoptimize(method);
-    runtime->GetThreadList()->ResumeAll();
   }
 
   void UndeoptimizeMethod(Thread* self, ArtMethod* method,
@@ -225,12 +216,11 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("Single method undeoptimization");
+    ScopedSuspendAll ssa("Single method undeoptimization");
     instrumentation->Undeoptimize(method);
     if (disable_deoptimization) {
       instrumentation->DisableDeoptimization(key);
     }
-    runtime->GetThreadList()->ResumeAll();
   }
 
   void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
@@ -238,12 +228,11 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("Full deoptimization");
+    ScopedSuspendAll ssa("Full deoptimization");
     if (enable_deoptimization) {
       instrumentation->EnableDeoptimization();
     }
     instrumentation->DeoptimizeEverything(key);
-    runtime->GetThreadList()->ResumeAll();
   }
 
   void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
@@ -251,12 +240,11 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("Full undeoptimization");
+    ScopedSuspendAll ssa("Full undeoptimization");
     instrumentation->UndeoptimizeEverything(key);
     if (disable_deoptimization) {
       instrumentation->DisableDeoptimization(key);
     }
-    runtime->GetThreadList()->ResumeAll();
   }
 
   void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
@@ -264,9 +252,8 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
+    ScopedSuspendAll ssa("EnableMethodTracing");
     instrumentation->EnableMethodTracing(key, needs_interpreter);
-    runtime->GetThreadList()->ResumeAll();
   }
 
   void DisableMethodTracing(Thread* self, const char* key)
@@ -274,9 +261,8 @@
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
     ScopedThreadSuspension sts(self, kSuspended);
-    runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
+    ScopedSuspendAll ssa("EnableMethodTracing");
     instrumentation->DisableMethodTracing(key);
-    runtime->GetThreadList()->ResumeAll();
   }
 
  private:
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 683b2cf..0607493 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -164,18 +164,16 @@
 
 void Jit::CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold) {
   CHECK_GT(compile_threshold, 0U);
-  Runtime* const runtime = Runtime::Current();
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+  ScopedSuspendAll ssa(__FUNCTION__);
   // Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
   // something.
   instrumentation_cache_.reset(
       new jit::JitInstrumentationCache(compile_threshold, warmup_threshold));
-  runtime->GetInstrumentation()->AddListener(
+  Runtime::Current()->GetInstrumentation()->AddListener(
       new jit::JitInstrumentationListener(instrumentation_cache_.get()),
       instrumentation::Instrumentation::kMethodEntered |
       instrumentation::Instrumentation::kBackwardBranch |
       instrumentation::Instrumentation::kInvokeVirtualOrInterface);
-  runtime->GetThreadList()->ResumeAll();
 }
 
 }  // namespace jit
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 7910f94..9e12806 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,7 +35,7 @@
     trace = soa.Self()->CreateInternalStackTrace<false>(soa);
   } else {
     // Suspend thread to build stack trace.
-    ScopedThreadSuspension sts(soa.Self(), kSuspended);
+    ScopedThreadSuspension sts(soa.Self(), kNative);
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
     Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
@@ -47,11 +47,9 @@
       }
       // Restart suspended thread.
       thread_list->Resume(thread, false);
-    } else {
-      if (timed_out) {
-        LOG(ERROR) << "Trying to get thread's stack failed as the thread failed to suspend within a "
-            "generous timeout.";
-      }
+    } else if (timed_out) {
+      LOG(ERROR) << "Trying to get thread's stack failed as the thread failed to suspend within a "
+          "generous timeout.";
     }
   }
   return trace;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d63781b..6176acd 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1282,4 +1282,12 @@
   allocated_ids_.reset(id);
 }
 
+ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
+  Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
+}
+
+ScopedSuspendAll::~ScopedSuspendAll() {
+  Runtime::Current()->GetThreadList()->ResumeAll();
+}
+
 }  // namespace art
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 4c50181..c727432 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -19,6 +19,7 @@
 
 #include "base/histogram.h"
 #include "base/mutex.h"
+#include "base/value_object.h"
 #include "gc_root.h"
 #include "jni.h"
 #include "object_callbacks.h"
@@ -60,12 +61,13 @@
       REQUIRES(!Locks::thread_suspend_count_lock_);
 
   // Suspends all threads and gets exclusive access to the mutator_lock_.
-  // If long suspend is true, then other people who try to suspend will never timeout. Long suspend
-  // is currenly used for hprof since large heaps take a long time.
+  // If long_suspend is true, then other threads who try to suspend will never timeout.
+  // long_suspend is currenly used for hprof since large heaps take a long time.
   void SuspendAll(const char* cause, bool long_suspend = false)
       EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
+      REQUIRES(!Locks::thread_list_lock_,
+               !Locks::thread_suspend_count_lock_,
+               !Locks::mutator_lock_);
 
   // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
   // else null. The peer is used to identify the thread to avoid races with the thread terminating.
@@ -188,6 +190,20 @@
   DISALLOW_COPY_AND_ASSIGN(ThreadList);
 };
 
+// Helper for suspending all threads and
+class ScopedSuspendAll : public ValueObject {
+ public:
+  ScopedSuspendAll(const char* cause, bool long_suspend = false)
+     EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
+     REQUIRES(!Locks::thread_list_lock_,
+              !Locks::thread_suspend_count_lock_,
+              !Locks::mutator_lock_);
+  // No REQUIRES(mutator_lock_) since the unlock function already asserts this.
+  ~ScopedSuspendAll()
+      UNLOCK_FUNCTION(Locks::mutator_lock_)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+};
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_THREAD_LIST_H_
diff --git a/runtime/trace.cc b/runtime/trace.cc
index d629ce6..5a44947 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -293,13 +293,11 @@
         break;
       }
     }
-
-    runtime->GetThreadList()->SuspendAll(__FUNCTION__);
     {
+      ScopedSuspendAll ssa(__FUNCTION__);
       MutexLock mu(self, *Locks::thread_list_lock_);
       runtime->GetThreadList()->ForEach(GetSample, the_trace);
     }
-    runtime->GetThreadList()->ResumeAll();
     ATRACE_END();
   }
 
@@ -348,10 +346,9 @@
   // Enable count of allocs if specified in the flags.
   bool enable_stats = false;
 
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
-
   // Create Trace object.
   {
+    ScopedSuspendAll ssa(__FUNCTION__);
     MutexLock mu(self, *Locks::trace_lock_);
     if (the_trace_ != nullptr) {
       LOG(ERROR) << "Trace already in progress, ignoring this request";
@@ -375,8 +372,6 @@
     }
   }
 
-  runtime->GetThreadList()->ResumeAll();
-
   // Can't call this when holding the mutator lock.
   if (enable_stats) {
     runtime->SetStatsEnabled(true);
@@ -405,40 +400,41 @@
     CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
     sampling_pthread_ = 0U;
   }
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
 
-  if (the_trace != nullptr) {
-    stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
-    if (finish_tracing) {
-      the_trace->FinishTracing();
-    }
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
+    if (the_trace != nullptr) {
+      stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
+      if (finish_tracing) {
+        the_trace->FinishTracing();
+      }
 
-    if (the_trace->trace_mode_ == TraceMode::kSampling) {
-      MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-      runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
-    } else {
-      runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
-      runtime->GetInstrumentation()->RemoveListener(
-          the_trace, instrumentation::Instrumentation::kMethodEntered |
-          instrumentation::Instrumentation::kMethodExited |
-          instrumentation::Instrumentation::kMethodUnwind);
-    }
-    if (the_trace->trace_file_.get() != nullptr) {
-      // Do not try to erase, so flush and close explicitly.
-      if (flush_file) {
-        if (the_trace->trace_file_->Flush() != 0) {
-          PLOG(WARNING) << "Could not flush trace file.";
-        }
+      if (the_trace->trace_mode_ == TraceMode::kSampling) {
+        MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+        runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
       } else {
-        the_trace->trace_file_->MarkUnchecked();  // Do not trigger guard.
+        runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
+        runtime->GetInstrumentation()->RemoveListener(
+            the_trace, instrumentation::Instrumentation::kMethodEntered |
+            instrumentation::Instrumentation::kMethodExited |
+            instrumentation::Instrumentation::kMethodUnwind);
       }
-      if (the_trace->trace_file_->Close() != 0) {
-        PLOG(ERROR) << "Could not close trace file.";
+      if (the_trace->trace_file_.get() != nullptr) {
+        // Do not try to erase, so flush and close explicitly.
+        if (flush_file) {
+          if (the_trace->trace_file_->Flush() != 0) {
+            PLOG(WARNING) << "Could not flush trace file.";
+          }
+        } else {
+          the_trace->trace_file_->MarkUnchecked();  // Do not trigger guard.
+        }
+        if (the_trace->trace_file_->Close() != 0) {
+          PLOG(ERROR) << "Could not close trace file.";
+        }
       }
+      delete the_trace;
     }
-    delete the_trace;
   }
-  runtime->GetThreadList()->ResumeAll();
   if (stop_alloc_counting) {
     // Can be racy since SetStatsEnabled is not guarded by any locks.
     runtime->SetStatsEnabled(false);
@@ -492,7 +488,7 @@
   }
 
   if (the_trace != nullptr) {
-    runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+    ScopedSuspendAll ssa(__FUNCTION__);
     stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
 
     if (the_trace->trace_mode_ == TraceMode::kSampling) {
@@ -500,12 +496,12 @@
       runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
     } else {
       runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
-      runtime->GetInstrumentation()->RemoveListener(the_trace,
-                                                    instrumentation::Instrumentation::kMethodEntered |
-                                                    instrumentation::Instrumentation::kMethodExited |
-                                                    instrumentation::Instrumentation::kMethodUnwind);
+      runtime->GetInstrumentation()->RemoveListener(
+          the_trace,
+          instrumentation::Instrumentation::kMethodEntered |
+          instrumentation::Instrumentation::kMethodExited |
+          instrumentation::Instrumentation::kMethodUnwind);
     }
-    runtime->GetThreadList()->ResumeAll();
   }
 
   if (stop_alloc_counting) {
@@ -531,23 +527,23 @@
   // Enable count of allocs if specified in the flags.
   bool enable_stats = (the_trace->flags_ && kTraceCountAllocs) != 0;
 
-  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+  {
+    ScopedSuspendAll ssa(__FUNCTION__);
 
-  // Reenable.
-  if (the_trace->trace_mode_ == TraceMode::kSampling) {
-    CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
-        reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
-  } else {
-    runtime->GetInstrumentation()->AddListener(the_trace,
-                                               instrumentation::Instrumentation::kMethodEntered |
-                                               instrumentation::Instrumentation::kMethodExited |
-                                               instrumentation::Instrumentation::kMethodUnwind);
-    // TODO: In full-PIC mode, we don't need to fully deopt.
-    runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
+    // Reenable.
+    if (the_trace->trace_mode_ == TraceMode::kSampling) {
+      CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
+          reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
+    } else {
+      runtime->GetInstrumentation()->AddListener(the_trace,
+                                                 instrumentation::Instrumentation::kMethodEntered |
+                                                 instrumentation::Instrumentation::kMethodExited |
+                                                 instrumentation::Instrumentation::kMethodUnwind);
+      // TODO: In full-PIC mode, we don't need to fully deopt.
+      runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
+    }
   }
 
-  runtime->GetThreadList()->ResumeAll();
-
   // Can't call this when holding the mutator lock.
   if (enable_stats) {
     runtime->SetStatsEnabled(true);