merge in lmp-release history after reset to lmp-dev
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index be966e1..4021c95 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -359,19 +359,19 @@
RegisterInfo* info = regs.Get(next);
// Try to allocate a register that doesn't hold a live value.
if (info->IsTemp() && !info->InUse() && info->IsDead()) {
+ // If it's wide, split it up.
+ if (info->IsWide()) {
+ // If the pair was associated with a wide value, unmark the partner as well.
+ if (info->SReg() != INVALID_SREG) {
+ RegisterInfo* partner = GetRegInfo(info->Partner());
+ DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
+ DCHECK(partner->IsWide());
+ partner->SetIsWide(false);
+ }
+ info->SetIsWide(false);
+ }
Clobber(info->GetReg());
info->MarkInUse();
- /*
- * NOTE: "wideness" is an attribute of how the container is used, not its physical size.
- * The caller will set wideness as appropriate.
- */
- if (info->IsWide()) {
- RegisterInfo* partner = GetRegInfo(info->Partner());
- DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
- DCHECK(partner->IsWide());
- info->SetIsWide(false);
- partner->SetIsWide(false);
- }
*next_temp = next + 1;
return info->GetReg();
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a208b51..efcc07d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1527,6 +1527,7 @@
"Ljava/lang/IllegalAccessError;",
"Ljava/lang/IncompatibleClassChangeError;",
"Ljava/lang/InstantiationError;",
+ "Ljava/lang/LinkageError;",
"Ljava/lang/NoClassDefFoundError;",
"Ljava/lang/NoSuchFieldError;",
"Ljava/lang/NoSuchMethodError;"
@@ -1538,7 +1539,7 @@
}
}
if (!found) {
- LOG(FATAL) << "Unexpected exeption " << exception->Dump();
+ LOG(FATAL) << "Unexpected exception " << exception->Dump();
}
self->ClearException();
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index c0a865f..80fb9ea 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -32,7 +32,7 @@
Mutex* Locks::abort_lock_ = nullptr;
Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
-Mutex* Locks::breakpoint_lock_ = nullptr;
+ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -880,7 +880,7 @@
UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
DCHECK(breakpoint_lock_ == nullptr);
- breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
+ breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d898e49..8d89f96 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -562,7 +562,7 @@
static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
// Guards breakpoints.
- static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
+ static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
// Guards lists of classes within the class linker.
static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 6d2f21e..6f431c3 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -209,7 +209,7 @@
bool need_full_deoptimization_;
};
-static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs)
+static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
@@ -373,7 +373,7 @@
static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
@@ -740,7 +740,7 @@
{
// TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
CHECK_EQ(gBreakpoints.size(), 0U);
}
@@ -3043,7 +3043,7 @@
}
static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
return &breakpoint;
@@ -3054,7 +3054,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
if (kIsDebugBuild) {
for (const Breakpoint& breakpoint : gBreakpoints) {
CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
@@ -3079,7 +3079,7 @@
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
- MutexLock mu(self, *Locks::breakpoint_lock_);
+ WriterMutexLock mu(self, *Locks::breakpoint_lock_);
const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
bool need_full_deoptimization;
if (existing_breakpoint == nullptr) {
@@ -3110,7 +3110,7 @@
// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
// request if we need to undeoptimize.
void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
bool need_full_deoptimization = false;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 73907b2..9fe0509 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1585,7 +1585,7 @@
count_performed_homogeneous_space_compaction_++;
// Print statics log and resume all threads.
uint64_t duration = NanoTime() - start_time;
- LOG(INFO) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
+ VLOG(gc) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
<< PrettySize(space_size_before_compaction) << " -> "
<< PrettySize(space_size_after_compaction) << " compact-ratio: "
<< std::fixed << static_cast<double>(space_size_after_compaction) /
@@ -1736,7 +1736,7 @@
} else {
saved_str = " expanded " + PrettySize(-delta_allocated);
}
- LOG(INFO) << "Heap transition to " << process_state_ << " took "
+ VLOG(gc) << "Heap transition to " << process_state_ << " took "
<< PrettyDuration(duration) << saved_str;
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 21d11a5..d05cee5 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -177,7 +177,8 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
- InterpreterHandlerTable GetInterpreterHandlerTable() const {
+ InterpreterHandlerTable GetInterpreterHandlerTable() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return interpreter_handler_table_;
}
@@ -220,31 +221,31 @@
return instrumentation_stubs_installed_;
}
- bool HasMethodEntryListeners() const {
+ bool HasMethodEntryListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_method_entry_listeners_;
}
- bool HasMethodExitListeners() const {
+ bool HasMethodExitListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_method_exit_listeners_;
}
- bool HasDexPcListeners() const {
+ bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_;
}
- bool HasFieldReadListeners() const {
+ bool HasFieldReadListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_field_read_listeners_;
}
- bool HasFieldWriteListeners() const {
+ bool HasFieldWriteListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_field_write_listeners_;
}
- bool HasExceptionCaughtListeners() const {
+ bool HasExceptionCaughtListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_exception_caught_listeners_;
}
- bool IsActive() const {
+ bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_;
@@ -343,7 +344,7 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
deoptimized_methods_lock_);
- void UpdateInterpreterHandlerTable() {
+ void UpdateInterpreterHandlerTable() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
}
@@ -404,30 +405,30 @@
// Do we have any listeners for method entry events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_method_entry_listeners_;
+ bool have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any listeners for method exit events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_method_exit_listeners_;
+ bool have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any listeners for method unwind events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_method_unwind_listeners_;
+ bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any listeners for dex move events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_dex_pc_listeners_;
+ bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any listeners for field read events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_field_read_listeners_;
+ bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any listeners for field write events? Short-cut to avoid taking the
// instrumentation_lock_.
- bool have_field_write_listeners_;
+ bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
// Do we have any exception caught listeners? Short-cut to avoid taking the instrumentation_lock_.
- bool have_exception_caught_listeners_;
+ bool have_exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
// The event listeners, written to with the mutator_lock_ exclusively held.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -451,7 +452,7 @@
// Current interpreter handler table. This is updated each time the thread state flags are
// modified.
- InterpreterHandlerTable interpreter_handler_table_;
+ InterpreterHandlerTable interpreter_handler_table_ GUARDED_BY(Locks::mutator_lock_);
// Greater than 0 if quick alloc entry points instrumented.
// TODO: The access and changes to this is racy and should be guarded by a lock.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 7cf26e6..9d85ec3 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -625,7 +625,7 @@
#endif
AssertThreadsAreSuspended(self, self, debug_thread);
- VLOG(threads) << *self << " SuspendAll complete";
+ VLOG(threads) << *self << " SuspendAllForDebugger complete";
}
void ThreadList::SuspendSelfForDebugger() {