Stop thread from reattaching during runtime shutdown while tracing.
Bug: 16024763
(cherry picked from commit c5d824a20c225763761a6dff43294b229ff35469)
Change-Id: Id5dcd4c8a131b93de00023c3ae9f518ea182614a
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1fb7948..4c56fcd 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -150,13 +150,6 @@
}
Runtime::~Runtime() {
- if (method_trace_ && Thread::Current() == nullptr) {
- // We need a current thread to shutdown method tracing: re-attach it now.
- JNIEnv* unused_env;
- if (GetJavaVM()->AttachCurrentThread(&unused_env, nullptr) != JNI_OK) {
- LOG(ERROR) << "Could not attach current thread before runtime shutdown.";
- }
- }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ff1a079..7cf26e6 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -294,16 +294,19 @@
void ThreadList::SuspendAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " SuspendAll starting...";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll starting...";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll starting...";
+ }
ATRACE_BEGIN("Suspending mutator threads");
uint64_t start_time = NanoTime();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && self != nullptr) {
CHECK_NE(self->GetState(), kRunnable);
}
{
@@ -344,14 +347,21 @@
ATRACE_END();
ATRACE_BEGIN("Mutator threads suspended");
- VLOG(threads) << *self << " SuspendAll complete";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll complete";
+ }
}
void ThreadList::ResumeAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " ResumeAll starting";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll starting";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll starting";
+ }
ATRACE_END();
ATRACE_BEGIN("Resuming mutator threads");
@@ -377,11 +387,20 @@
// Broadcast a notification to all suspended threads, some or all of
// which may choose to wake up. No need to wait for them.
- VLOG(threads) << *self << " ResumeAll waking others";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll waking others";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll waking others";
+ }
Thread::resume_cond_->Broadcast(self);
}
ATRACE_END();
- VLOG(threads) << *self << " ResumeAll complete";
+
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll complete";
+ }
}
void ThreadList::Resume(Thread* thread, bool for_debugger) {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 1a450c4..0f5e06a 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -179,29 +179,29 @@
(clock_source_ == kProfilerClockSourceDual);
}
-static void MeasureClockOverhead(Trace* trace) {
- if (trace->UseThreadCpuClock()) {
+void Trace::MeasureClockOverhead() {
+ if (UseThreadCpuClock()) {
Thread::Current()->GetCpuMicroTime();
}
- if (trace->UseWallClock()) {
+ if (UseWallClock()) {
MicroTime();
}
}
// Compute an average time taken to measure clocks.
-static uint32_t GetClockOverheadNanoSeconds(Trace* trace) {
+uint32_t Trace::GetClockOverheadNanoSeconds() {
Thread* self = Thread::Current();
uint64_t start = self->GetCpuMicroTime();
for (int i = 4000; i > 0; i--) {
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
}
uint64_t elapsed_us = self->GetCpuMicroTime() - start;
@@ -445,7 +445,8 @@
Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled)
: trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_),
- buffer_size_(buffer_size), start_time_(MicroTime()), cur_offset_(0), overflow_(false) {
+ buffer_size_(buffer_size), start_time_(MicroTime()),
+ clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
// Set up the beginning of the trace.
uint16_t trace_version = GetTraceVersion(clock_source_);
memset(buf_.get(), 0, kTraceHeaderLength);
@@ -481,7 +482,6 @@
uint64_t elapsed = MicroTime() - start_time_;
size_t final_offset = cur_offset_.LoadRelaxed();
- uint32_t clock_overhead_ns = GetClockOverheadNanoSeconds(this);
if ((flags_ & kTraceCountAllocs) != 0) {
Runtime::Current()->SetStatsEnabled(false);
@@ -507,7 +507,7 @@
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
os << StringPrintf("num-method-calls=%zd\n", num_records);
- os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns);
+ os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
if ((flags_ & kTraceCountAllocs) != 0) {
os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
diff --git a/runtime/trace.h b/runtime/trace.h
index 9c8d35b..dd80a01 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -79,6 +79,8 @@
bool UseWallClock();
bool UseThreadCpuClock();
+ void MeasureClockOverhead();
+ uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -166,6 +168,9 @@
// Time trace was created.
const uint64_t start_time_;
+ // Clock overhead.
+ const uint32_t clock_overhead_ns_;
+
// Offset into buf_.
AtomicInteger cur_offset_;