Add comment for RunCheckpoint and clean up ThreadList a bit
Change-Id: Id512bfd15fee9a7359c77f094e16d64634943619
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ed859cf..dcf9601 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -60,8 +60,11 @@
static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
ThreadList::ThreadList()
- : suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0),
- suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) {
+ : suspend_all_count_(0),
+ debug_suspend_all_count_(0),
+ unregistering_count_(0),
+ suspend_all_historam_("suspend all histogram", 16, 64),
+ long_suspend_(false) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
}
@@ -381,7 +384,8 @@
// from-space to to-space refs. Used to synchronize threads at a point
// to mark the initiation of marking while maintaining the to-space
// invariant.
-size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
const uint64_t start_time = NanoTime();
@@ -509,7 +513,9 @@
// Debugger thread might be set to kRunnable for a short period of time after the
// SuspendAllInternal. This is safe because it will be set back to suspended state before
// the SuspendAll returns.
-void ThreadList::SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2,
+void ThreadList::SuspendAllInternal(Thread* self,
+ Thread* ignore1,
+ Thread* ignore2,
bool debug_suspend) {
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -698,12 +704,14 @@
VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
}
-static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message,
+static void ThreadSuspendByPeerWarning(Thread* self,
+ LogSeverity severity,
+ const char* message,
jobject peer) {
JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jstring>
- scoped_name_string(env, (jstring)env->GetObjectField(
- peer, WellKnownClasses::java_lang_Thread_name));
+ scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
+ peer, WellKnownClasses::java_lang_Thread_name)));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
@@ -713,8 +721,10 @@
}
}
-Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
- bool debug_suspension, bool* timed_out) {
+Thread* ThreadList::SuspendThreadByPeer(jobject peer,
+ bool request_suspension,
+ bool debug_suspension,
+ bool* timed_out) {
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
*timed_out = false;
@@ -811,12 +821,14 @@
}
}
-static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message,
+static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
+ const char* message,
uint32_t thread_id) {
LOG(severity) << StringPrintf("%s: %d", message, thread_id);
}
-Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
+Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
+ bool debug_suspension,
bool* timed_out) {
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index f0933f7..07ea10d 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -55,8 +55,8 @@
// Thread suspension support.
void ResumeAll()
- UNLOCK_FUNCTION(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ UNLOCK_FUNCTION(Locks::mutator_lock_);
void Resume(Thread* thread, bool for_debugger = false)
REQUIRES(!Locks::thread_suspend_count_lock_);
@@ -76,7 +76,8 @@
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
@@ -84,7 +85,8 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Find an already suspended thread (or self) by its id.
@@ -92,7 +94,7 @@
// Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
// of the suspend check. Returns how many checkpoints that are expected to run, including for
- // already suspended threads.
+ // already suspended threads for b/24191051.
size_t RunCheckpoint(Closure* checkpoint_function)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -101,14 +103,17 @@
// Flip thread roots from from-space refs to to-space refs. Used by
// the concurrent copying collector.
- size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
+ size_t FlipThreadRoots(Closure* thread_flip_visitor,
+ Closure* flip_callback,
gc::collector::GarbageCollector* collector)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
// Suspends all threads
void SuspendAllForDebugger()
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
void SuspendSelfForDebugger()
@@ -127,10 +132,14 @@
// Add/remove current thread from list.
void Register(Thread* self)
- REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_,
+ REQUIRES(Locks::runtime_shutdown_lock_)
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
- void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
- !Locks::thread_suspend_count_lock_);
+ void Unregister(Thread* self)
+ REQUIRES(!Locks::mutator_lock_,
+ !Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
void VisitRoots(RootVisitor* visitor) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -160,7 +169,9 @@
void WaitForOtherNonDaemonThreadsToExit()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
- void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr,
+ void SuspendAllInternal(Thread* self,
+ Thread* ignore1,
+ Thread* ignore2 = nullptr,
bool debug_suspend = false)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -201,8 +212,8 @@
!Locks::mutator_lock_);
// No REQUIRES(mutator_lock_) since the unlock function already asserts this.
~ScopedSuspendAll()
- UNLOCK_FUNCTION(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ UNLOCK_FUNCTION(Locks::mutator_lock_);
};
} // namespace art