Merge "Revert^2 "Pass boot class path to ImageSpace::LoadBootImage.""
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 88178a0..f2e12f6 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -48,6 +48,13 @@
"libadbconnectiond",
]
+// Files associated with bionic / managed core library time zone APIs.
+art_runtime_time_zone_prebuilts = [
+ "apex_tz_version",
+ "apex_tzdata",
+ "apex_tzlookup.xml",
+]
+
// Modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
art_tools_common_binaries = [
"dexdiag",
@@ -109,7 +116,8 @@
binaries: [],
}
},
- prebuilts: ["com.android.runtime.ld.config.txt"],
+ prebuilts: art_runtime_time_zone_prebuilts
+ + ["com.android.runtime.ld.config.txt"],
key: "com.android.runtime.key",
}
@@ -137,7 +145,8 @@
binaries: art_tools_binaries,
}
},
- prebuilts: ["com.android.runtime.ld.config.txt"],
+ prebuilts: art_runtime_time_zone_prebuilts
+ + ["com.android.runtime.ld.config.txt"],
key: "com.android.runtime.key",
}
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 1e7d76c..df50682 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -127,14 +127,6 @@
return cards_scanned;
}
-/*
- * Visitor is expected to take in a card and return the new value. When a value is modified, the
- * modify visitor is called.
- * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
- * value.
- * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
- * us to know which cards got cleared.
- */
template <typename Visitor, typename ModifiedVisitor>
inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
uint8_t* scan_end,
@@ -144,6 +136,7 @@
uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
CheckCardValid(card_cur);
CheckCardValid(card_end);
+ DCHECK(visitor(kCardClean) == kCardClean);
// Handle any unaligned cards at the start.
while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
@@ -188,7 +181,8 @@
while (word_cur < word_end) {
while (true) {
expected_word = *word_cur;
- if (LIKELY(expected_word == 0)) {
+ static_assert(kCardClean == 0);
+ if (LIKELY(expected_word == 0 /* All kCardClean */ )) {
break;
}
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b84f22f..c99ed4b 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -95,12 +95,10 @@
}
/*
- * Visitor is expected to take in a card and return the new value. When a value is modified, the
- * modify visitor is called.
- * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
- * value.
- * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
- * us to know which cards got cleared.
+ * Modify cards in the range from scan_begin (inclusive) to scan_end (exclusive). Each card
+ * value v is replaced by visitor(v). Visitor() should not have side-effects.
+ * Whenever a card value is changed, modified(card_address, old_value, new_value) is invoked.
+ * For opportunistic performance reasons, this assumes that visitor(kCardClean) is kCardClean!
*/
template <typename Visitor, typename ModifiedVisitor>
void ModifyCardsAtomic(uint8_t* scan_begin,
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 87965ed..12baaa4 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -60,7 +60,7 @@
uint8_t* HeapLimit() const {
return HeapBegin() + heap_size_;
}
- // Return a pseudo random card for an address.
+ // Return a non-zero pseudo random card for an address.
uint8_t PseudoRandomCard(const uint8_t* addr) const {
size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
return 1 + offset % 254;
@@ -97,7 +97,8 @@
class UpdateVisitor {
public:
uint8_t operator()(uint8_t c) const {
- return c * 93 + 123;
+ // Must map zero to zero. Never applied to zero.
+ return c == 0 ? 0 : c * 93 + 123;
}
void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 12f1522..42f940e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -164,7 +164,8 @@
have_watched_frame_pop_listeners_(false),
have_branch_listeners_(false),
have_exception_handled_listeners_(false),
- deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
+ deoptimized_methods_lock_(new ReaderWriterMutex("deoptimized methods lock",
+ kGenericBottomLock)),
deoptimization_enabled_(false),
interpreter_handler_table_(kMainHandlerTable),
quick_alloc_entry_points_instrumentation_counter_(0),
@@ -743,7 +744,7 @@
// Restore stack only if there is no method currently deoptimized.
bool empty;
{
- ReaderMutexLock mu(self, deoptimized_methods_lock_);
+ ReaderMutexLock mu(self, *GetDeoptimizedMethodsLock());
empty = IsDeoptimizedMethodsEmpty(); // Avoid lock violation.
}
if (empty) {
@@ -931,7 +932,7 @@
Thread* self = Thread::Current();
{
- WriterMutexLock mu(self, deoptimized_methods_lock_);
+ WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
CHECK(has_not_been_deoptimized) << "Method " << ArtMethod::PrettyMethod(method)
<< " is already deoptimized";
@@ -955,7 +956,7 @@
Thread* self = Thread::Current();
bool empty;
{
- WriterMutexLock mu(self, deoptimized_methods_lock_);
+ WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
bool found_and_erased = RemoveDeoptimizedMethod(method);
CHECK(found_and_erased) << "Method " << ArtMethod::PrettyMethod(method)
<< " is not deoptimized";
@@ -987,12 +988,12 @@
bool Instrumentation::IsDeoptimized(ArtMethod* method) {
DCHECK(method != nullptr);
- ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
return IsDeoptimizedMethod(method);
}
void Instrumentation::EnableDeoptimization() {
- ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
CHECK(IsDeoptimizedMethodsEmpty());
CHECK_EQ(deoptimization_enabled_, false);
deoptimization_enabled_ = true;
@@ -1009,7 +1010,7 @@
while (true) {
ArtMethod* method;
{
- ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
if (IsDeoptimizedMethodsEmpty()) {
break;
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3bd4fb5..d4c3c29 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -19,12 +19,13 @@
#include <stdint.h>
#include <list>
+#include <memory>
#include <unordered_set>
#include "arch/instruction_set.h"
#include "base/enums.h"
+#include "base/locks.h"
#include "base/macros.h"
-#include "base/mutex.h"
#include "base/safe_map.h"
#include "gc_root.h"
@@ -39,6 +40,7 @@
template <typename T> class Handle;
template <typename T> class MutableHandle;
union JValue;
+class SHARED_LOCKABLE ReaderWriterMutex;
class ShadowFrame;
class Thread;
enum class DeoptimizationMethodType;
@@ -211,11 +213,11 @@
// Deoptimization.
void EnableDeoptimization()
REQUIRES(Locks::mutator_lock_)
- REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES(!GetDeoptimizedMethodsLock());
// Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
void DisableDeoptimization(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES(!GetDeoptimizedMethodsLock());
bool AreAllMethodsDeoptimized() const {
return interpreter_stubs_installed_;
@@ -231,7 +233,7 @@
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
+ !GetDeoptimizedMethodsLock());
// Executes everything with compiled code (or interpreter if there is no code). May visit class
// linker classes through ConfigureStubs.
@@ -239,23 +241,23 @@
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
+ !GetDeoptimizedMethodsLock());
// Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
// method (except a class initializer) set to the resolution trampoline will be deoptimized only
// once its declaring class is initialized.
void Deoptimize(ArtMethod* method)
- REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
// Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
void Undeoptimize(ArtMethod* method)
- REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+ REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
bool IsDeoptimized(ArtMethod* method)
- REQUIRES(!deoptimized_methods_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
void EnableMethodTracing(const char* key,
@@ -263,14 +265,14 @@
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
+ !GetDeoptimizedMethodsLock());
// Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
void DisableMethodTracing(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
+ !GetDeoptimizedMethodsLock());
InterpreterHandlerTable GetInterpreterHandlerTable() const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -289,19 +291,19 @@
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Update the code of a native method to a JITed stub.
void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Update the code of a method to the interpreter respecting any installed stubs from debugger.
void UpdateMethodsCodeToInterpreterEntryPoint(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Update the code of a method respecting any installed stubs from debugger.
void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Return the code that we can execute for an invoke including from the JIT.
const void* GetCodeForInvoke(ArtMethod* method) const
@@ -483,7 +485,7 @@
// being returned from.
TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
uint64_t* gpr_result, uint64_t* fpr_result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
// instrumentation frame that's popped.
@@ -492,10 +494,10 @@
// Call back for configure stubs.
void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES(!GetDeoptimizedMethodsLock());
void InstallStubsForMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
// Install instrumentation exit stub on every method of the stack of the given thread.
// This is used by the debugger to cause a deoptimization of the thread's stack after updating
@@ -528,7 +530,7 @@
// becomes the highest instrumentation level required by a client.
void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!deoptimized_methods_lock_,
+ REQUIRES(!GetDeoptimizedMethodsLock(),
!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_);
@@ -583,18 +585,21 @@
// Read barrier-aware utility functions for accessing deoptimized_methods_
bool AddDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
bool IsDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
bool RemoveDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
ArtMethod* BeginDeoptimizedMethod()
- REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
bool IsDeoptimizedMethodsEmpty() const
- REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
+ ReaderWriterMutex* GetDeoptimizedMethodsLock() const {
+ return deoptimized_methods_lock_.get();
+ }
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
bool instrumentation_stubs_installed_;
@@ -676,8 +681,8 @@
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
- mutable ReaderWriterMutex deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
- std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
+ mutable std::unique_ptr<ReaderWriterMutex> deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+ std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(GetDeoptimizedMethodsLock());
bool deoptimization_enabled_;
// Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fdb6b86..749758a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -758,6 +758,10 @@
}
void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+ if (IsInZygoteExecSpace(code_ptr)) {
+ // No need to free, this is shared memory.
+ return;
+ }
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.