Merge "CW on Master: Propagate or throw exception when no class found happens in interpreter."
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index dafefea..b31e9a2 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -766,8 +766,9 @@
// Generate code for all slow paths.
void Mir2Lir::HandleSlowPaths() {
- int n = slow_paths_.Size();
- for (int i = 0; i < n; ++i) {
+ // We should check slow_paths_.Size() every time, because a new slow path
+ // may be created during slowpath->Compile().
+ for (size_t i = 0; i < slow_paths_.Size(); ++i) {
LIRSlowPath* slowpath = slow_paths_.Get(i);
slowpath->Compile();
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 4e973d8..8df5b6d 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -327,6 +327,13 @@
{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+// This is a special encoding with r8_form on the second register only
+// for Movzx8 and Movsx8.
+#define EXT_0F_R8_FORM_ENCODING_MAP(opname, prefix, opcode, reg_def) \
+{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE1, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, true }, #opname "RR", "!0r,!1r" }, \
+{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+
#define EXT_0F_REX_W_ENCODING_MAP(opname, prefix, opcode, reg_def) \
{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE1, { prefix, REX_W, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RR", "!0r,!1r" }, \
{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1, { prefix, REX_W, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
@@ -488,9 +495,9 @@
{ kX86LockCmpxchg64A, kArray, IS_STORE | IS_QUAD_OP | REG_USE01 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0, false }, "Lock Cmpxchg8b", "[!0r+!1r<<!2d+!3d]" },
{ kX86XchgMR, kMemReg, IS_STORE | IS_LOAD | IS_TERTIARY_OP | REG_DEF2 | REG_USE02, { 0, 0, 0x87, 0, 0, 0, 0, 0, false }, "Xchg", "[!0r+!1d],!2r" },
- EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
+ EXT_0F_R8_FORM_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
- EXT_0F_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0),
+ EXT_0F_R8_FORM_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0),
EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx8q, REX_W, 0xB6, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx16q, REX_W, 0xB7, REG_DEF0),
@@ -593,6 +600,10 @@
}
}
+static bool IsByteSecondOperand(const X86EncodingMap* entry) {
+ return StartsWith(entry->name, "Movzx8") || StartsWith(entry->name, "Movsx8");
+}
+
size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
int32_t raw_base, int32_t displacement) {
bool has_modrm = HasModrm(entry);
@@ -613,7 +624,8 @@
bool registers_need_rex_prefix = NeedsRex(raw_reg) || NeedsRex(raw_index) || NeedsRex(raw_base);
if (r8_form) {
// Do we need an empty REX prefix to normalize byte registers?
- registers_need_rex_prefix = registers_need_rex_prefix || (RegStorage::RegNum(raw_reg) >= 4);
+ registers_need_rex_prefix = registers_need_rex_prefix ||
+ (RegStorage::RegNum(raw_reg) >= 4 && !IsByteSecondOperand(entry));
registers_need_rex_prefix = registers_need_rex_prefix ||
(modrm_is_reg_reg && (RegStorage::RegNum(raw_base) >= 4));
}
@@ -877,7 +889,7 @@
uint8_t rex = 0;
if (r8_form) {
// Do we need an empty REX prefix to normalize byte register addressing?
- if (RegStorage::RegNum(raw_reg_r) >= 4) {
+ if (RegStorage::RegNum(raw_reg_r) >= 4 && !IsByteSecondOperand(entry)) {
rex |= 0x40; // REX.0000
} else if (modrm_is_reg_reg && RegStorage::RegNum(raw_reg_b) >= 4) {
rex |= 0x40; // REX.0000
@@ -1167,7 +1179,9 @@
}
void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2) {
- CheckValidByteRegister(entry, raw_reg1);
+ if (!IsByteSecondOperand(entry)) {
+ CheckValidByteRegister(entry, raw_reg1);
+ }
CheckValidByteRegister(entry, raw_reg2);
EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
uint8_t low_reg1 = LowRegisterBits(raw_reg1);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index cf29e52..f1166f6 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -761,54 +761,59 @@
}
bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
- return false;
-// Turned off until tests available in Art.
-//
-// RegLocation rl_src_address = info->args[0]; // long address
-// RegLocation rl_address;
-// if (!cu_->target64) {
-// rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0]
-// rl_address = LoadValue(rl_src_address, kCoreReg);
-// } else {
-// rl_address = LoadValueWide(rl_src_address, kCoreReg);
-// }
-// RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info);
-// RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-// // Unaligned access is allowed on x86.
-// LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
-// if (size == k64) {
-// StoreValueWide(rl_dest, rl_result);
-// } else {
-// DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-// StoreValue(rl_dest, rl_result);
-// }
-// return true;
+ RegLocation rl_src_address = info->args[0]; // long address
+ RegLocation rl_address;
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0]
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ }
+ RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ // Unaligned access is allowed on x86.
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ if (size == k64) {
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
+ StoreValue(rl_dest, rl_result);
+ }
+ return true;
}
bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
- return false;
-// Turned off until tests available in Art.
-//
-// RegLocation rl_src_address = info->args[0]; // long address
-// RegLocation rl_address;
-// if (!cu_->target64) {
-// rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0]
-// rl_address = LoadValue(rl_src_address, kCoreReg);
-// } else {
-// rl_address = LoadValueWide(rl_src_address, kCoreReg);
-// }
-// RegLocation rl_src_value = info->args[2]; // [size] value
-// if (size == k64) {
-// // Unaligned access is allowed on x86.
-// RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
-// StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-// } else {
-// DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-// // Unaligned access is allowed on x86.
-// RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
-// StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-// }
-// return true;
+ RegLocation rl_src_address = info->args[0]; // long address
+ RegLocation rl_address;
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0]
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ }
+ RegLocation rl_src_value = info->args[2]; // [size] value
+ RegLocation rl_value;
+ if (size == k64) {
+ // Unaligned access is allowed on x86.
+ rl_value = LoadValueWide(rl_src_value, kCoreReg);
+ } else {
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
+ // In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
+ if (!cu_->target64 && size == kSignedByte) {
+ rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+ if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
+ RegStorage temp = AllocateByteRegister();
+ OpRegCopy(temp, rl_src_value.reg);
+ rl_value.reg = temp;
+ } else {
+ rl_value = LoadValue(rl_src_value, kCoreReg);
+ }
+ } else {
+ rl_value = LoadValue(rl_src_value, kCoreReg);
+ }
+ }
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ return true;
}
void X86Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
@@ -831,14 +836,12 @@
bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
- if (cu_->instruction_set == kX86_64) {
- return false; // TODO: Verify working on x86-64.
- }
-
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object - known non-null
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
+ if (!cu_->target64) {
+ rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
+ }
RegLocation rl_src_expected = info->args[4]; // int, long or Object
// If is_long, high half is in info->args[5]
RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
@@ -846,21 +849,21 @@
if (is_long && cu_->target64) {
// RAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in RAX.
- FlushReg(rs_r0);
- Clobber(rs_r0);
- LockTemp(rs_r0);
+ FlushReg(rs_r0q);
+ Clobber(rs_r0q);
+ LockTemp(rs_r0q);
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
- RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- LoadValueDirectWide(rl_src_expected, rs_r0);
+ RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
+ LoadValueDirectWide(rl_src_expected, rs_r0q);
NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
// After a store we need to insert barrier in case of potential load. Since the
// locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
GenMemBarrier(kStoreLoad);
- FreeTemp(rs_r0);
+ FreeTemp(rs_r0q);
} else if (is_long) {
// TODO: avoid unnecessary loads of SI and DI when the values are in registers.
// TODO: CFI support.
@@ -942,7 +945,12 @@
LockTemp(rs_r0);
}
- RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+ RegLocation rl_offset;
+ if (cu_->target64) {
+ rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
+ } else {
+ rl_offset = LoadValue(rl_src_offset, kCoreReg);
+ }
LoadValueDirect(rl_src_expected, rs_r0);
NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index fdbc9c2..289dc1d 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -26,7 +26,7 @@
#include <fstream>
#include <memory>
-#include "../../external/icu4c/common/unicode/uvernum.h"
+#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 6161aff..c95be01 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1996,13 +1996,14 @@
case kTerminated:
return JDWP::TS_ZOMBIE;
case kTimedWaiting:
+ case kWaitingForCheckPointsToRun:
case kWaitingForDebuggerSend:
case kWaitingForDebuggerSuspension:
case kWaitingForDebuggerToAttach:
case kWaitingForDeoptimization:
case kWaitingForGcToComplete:
- case kWaitingForCheckPointsToRun:
case kWaitingForJniOnLoad:
+ case kWaitingForMethodTracingStart:
case kWaitingForSignalCatcherOutput:
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6d70a38..511e9f8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -981,6 +981,7 @@
friend class VerifyReferenceCardVisitor;
friend class VerifyReferenceVisitor;
friend class VerifyObjectVisitor;
+ friend class ScopedHeapFill;
friend class ScopedHeapLock;
friend class space::SpaceTest;
@@ -997,6 +998,25 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
};
+// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
+// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
+// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
+class ScopedHeapFill {
+ public:
+ explicit ScopedHeapFill(Heap* heap)
+ : heap_(heap),
+ delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
+ heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
+ }
+ ~ScopedHeapFill() {
+ heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
+ }
+
+ private:
+ Heap* const heap_;
+ const int64_t delta_;
+};
+
} // namespace gc
} // namespace art
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 8f5da83..f459b59 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -137,7 +137,8 @@
new_quick_code = GetQuickResolutionTrampoline(class_linker);
}
} else { // !uninstall
- if ((interpreter_stubs_installed_ || IsDeoptimized(method)) && !method->IsNative()) {
+ if ((interpreter_stubs_installed_ || forced_interpret_only_ || IsDeoptimized(method)) &&
+ !method->IsNative()) {
new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickToInterpreterBridge();
} else {
@@ -150,7 +151,9 @@
new_quick_code = class_linker->GetQuickOatCodeFor(method);
DCHECK(new_quick_code != GetQuickToInterpreterBridgeTrampoline(class_linker));
if (entry_exit_stubs_installed_ && new_quick_code != GetQuickToInterpreterBridge()) {
- DCHECK(new_portable_code != GetPortableToInterpreterBridge());
+ // TODO: portable to quick bridge. Bug: 8196384. We cannot enable the check below as long
+ // as GetPortableToQuickBridge() == GetPortableToInterpreterBridge().
+ // DCHECK(new_portable_code != GetPortableToInterpreterBridge());
new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickInstrumentationEntryPoint();
}
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 86db893..bae67f2 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -85,6 +85,7 @@
case kWaitingForJniOnLoad: return kJavaWaiting;
case kWaitingForSignalCatcherOutput: return kJavaWaiting;
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
+ case kWaitingForMethodTracingStart: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3b14aaa..efa205e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -147,6 +147,13 @@
}
Runtime::~Runtime() {
+ if (method_trace_ && Thread::Current() == nullptr) {
+ // We need a current thread to shutdown method tracing: re-attach it now.
+ JNIEnv* unused_env;
+ if (GetJavaVM()->AttachCurrentThread(&unused_env, nullptr) != JNI_OK) {
+ LOG(ERROR) << "Could not attach current thread before runtime shutdown.";
+ }
+ }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
@@ -681,6 +688,7 @@
Trace::SetDefaultClockSource(options->profile_clock_source_);
if (options->method_trace_) {
+ ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
Trace::Start(options->method_trace_file_.c_str(), -1, options->method_trace_file_size_, 0,
false, false, 0);
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d20a459..54732fa 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -153,8 +153,8 @@
#if HAVE_TIMED_RWLOCK
// Attempt to rectify locks so that we dump thread list with required locks before exiting.
-static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) NO_THREAD_SAFETY_ANALYSIS __attribute__((noreturn));
-static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) {
+static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS __attribute__((noreturn));
+static void UnsafeLogFatalForThreadSuspendAllTimeout() {
Runtime* runtime = Runtime::Current();
std::ostringstream ss;
ss << "Thread suspend timeout\n";
@@ -332,7 +332,7 @@
#if HAVE_TIMED_RWLOCK
// Timeout if we wait more than 30 seconds.
if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
- UnsafeLogFatalForThreadSuspendAllTimeout(self);
+ UnsafeLogFatalForThreadSuspendAllTimeout();
}
#else
Locks::mutator_lock_->ExclusiveLock(self);
@@ -351,6 +351,7 @@
void ThreadList::ResumeAll() {
Thread* self = Thread::Current();
+ DCHECK(self != nullptr);
VLOG(threads) << *self << " ResumeAll starting";
@@ -587,7 +588,7 @@
#if HAVE_TIMED_RWLOCK
// Timeout if we wait more than 30 seconds.
if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
- UnsafeLogFatalForThreadSuspendAllTimeout(self);
+ UnsafeLogFatalForThreadSuspendAllTimeout();
} else {
Locks::mutator_lock_->ExclusiveUnlock(self);
}
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 57bf4f1..0e47d21 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -38,6 +38,7 @@
kWaitingForSignalCatcherOutput, // WAITING TS_WAIT waiting for signal catcher IO to complete
kWaitingInMainSignalCatcherLoop, // WAITING TS_WAIT blocking/reading/processing signals
kWaitingForDeoptimization, // WAITING TS_WAIT waiting for deoptimization suspend all
+ kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
diff --git a/runtime/trace.h b/runtime/trace.h
index 08da16f..3f5d80a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -65,11 +65,14 @@
static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
bool direct_to_ddms, bool sampling_enabled, int interval_us)
- LOCKS_EXCLUDED(Locks::mutator_lock_,
- Locks::thread_list_lock_,
- Locks::thread_suspend_count_lock_,
- Locks::trace_lock_);
- static void Stop() LOCKS_EXCLUDED(Locks::trace_lock_);
+ LOCKS_EXCLUDED(Locks::mutator_lock_,
+ Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_,
+ Locks::trace_lock_);
+ static void Stop()
+ LOCKS_EXCLUDED(Locks::mutator_lock_,
+ Locks::thread_list_lock_,
+ Locks::trace_lock_);
static void Shutdown() LOCKS_EXCLUDED(Locks::trace_lock_);
static TracingMode GetMethodTracingMode() LOCKS_EXCLUDED(Locks::trace_lock_);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 26e7d31..5a5805f 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -101,11 +101,6 @@
}
const struct sigaction& action = user_sigactions[sig].GetAction();
-
- // Only deliver the signal if the signal was not masked out.
- if (sigismember(&action.sa_mask, sig)) {
- return;
- }
if ((action.sa_flags & SA_SIGINFO) == 0) {
if (action.sa_handler != NULL) {
action.sa_handler(sig);
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 3b11879..f412034 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -15,9 +15,11 @@
*/
import junit.framework.Assert;
+import java.util.Arrays;
+import java.lang.reflect.Method;
public class Main {
- public static void main(String args[]) {
+ public static void main(String args[]) throws Exception {
test_Double_doubleToRawLongBits();
test_Double_longBitsToDouble();
test_Float_floatToRawIntBits();
@@ -50,6 +52,18 @@
test_String_isEmpty();
test_String_length();
test_Thread_currentThread();
+ initSupportMethodsForPeekPoke();
+ test_Memory_peekByte();
+ test_Memory_peekShort();
+ test_Memory_peekInt();
+ test_Memory_peekLong();
+ test_Memory_pokeByte();
+ test_Memory_pokeShort();
+ test_Memory_pokeInt();
+ test_Memory_pokeLong();
+ test_AtomicBoolean_compareAndSet();
+ test_AtomicInteger_compareAndSet();
+ test_AtomicLong_compareAndSet();
}
/*
@@ -82,6 +96,60 @@
Assert.assertNotNull(Thread.currentThread());
}
+ /**
+ * Will test inlining CAS, by inclusion of AtomicBoolean in core.oat.
+ */
+ public static void test_AtomicBoolean_compareAndSet() {
+ java.util.concurrent.atomic.AtomicBoolean ab = new java.util.concurrent.atomic.AtomicBoolean();
+ Assert.assertEquals(ab.compareAndSet(false, false), true);
+ Assert.assertEquals(ab.compareAndSet(true, false), false);
+ Assert.assertEquals(ab.compareAndSet(true, true), false);
+ Assert.assertEquals(ab.compareAndSet(false, true), true);
+ Assert.assertEquals(ab.compareAndSet(false, true), false);
+ Assert.assertEquals(ab.compareAndSet(false, false), false);
+ Assert.assertEquals(ab.compareAndSet(true, true), true);
+ Assert.assertEquals(ab.compareAndSet(true, false), true);
+ Assert.assertEquals(ab.compareAndSet(true, false), false);
+ Assert.assertEquals(ab.compareAndSet(true, true), false);
+ Assert.assertEquals(ab.compareAndSet(false, false), true);
+ }
+
+ /**
+ * Will test inlining CAS, by inclusion of AtomicInteger in core.oat.
+ */
+ public static void test_AtomicInteger_compareAndSet() {
+ java.util.concurrent.atomic.AtomicInteger ab = new java.util.concurrent.atomic.AtomicInteger();
+ Assert.assertEquals(ab.compareAndSet(0, 0), true);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0), false);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), false);
+ Assert.assertEquals(ab.compareAndSet(0, 0x12345678), true);
+ Assert.assertEquals(ab.compareAndSet(0, 0x12345678), false);
+ Assert.assertEquals(ab.compareAndSet(0, 0), false);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), true);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0), true);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0), false);
+ Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), false);
+ Assert.assertEquals(ab.compareAndSet(0, 0), true);
+ }
+
+ /**
+ * Will test inlining CAS, by inclusion of AtomicLong in core.oat.
+ */
+ public static void test_AtomicLong_compareAndSet() {
+ java.util.concurrent.atomic.AtomicLong ab = new java.util.concurrent.atomic.AtomicLong();
+ Assert.assertEquals(ab.compareAndSet(0l, 0l), true);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), false);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), false);
+ Assert.assertEquals(ab.compareAndSet(0l, 0x1234567890l), true);
+ Assert.assertEquals(ab.compareAndSet(0l, 0x1234567890l), false);
+ Assert.assertEquals(ab.compareAndSet(0l, 0l), false);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), true);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), true);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), false);
+ Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), false);
+ Assert.assertEquals(ab.compareAndSet(0l, 0l), true);
+ }
+
public static void test_String_length() {
String str0 = "";
String str1 = "x";
@@ -510,4 +578,131 @@
Assert.assertEquals(Long.reverse(Long.MIN_VALUE), 1L);
}
+ static Object runtime;
+ static Method address_of;
+ static Method peek_byte;
+ static Method peek_short;
+ static Method peek_int;
+ static Method peek_long;
+ static Method poke_byte;
+ static Method poke_short;
+ static Method poke_int;
+ static Method poke_long;
+
+ public static void initSupportMethodsForPeekPoke() throws Exception {
+ Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+ Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+ runtime = get_runtime.invoke(null);
+ address_of = vm_runtime.getDeclaredMethod("addressOf", Object.class);
+
+ Class<?> io_memory = Class.forName("libcore.io.Memory");
+ peek_byte = io_memory.getDeclaredMethod("peekByte", Long.TYPE);
+ peek_int = io_memory.getDeclaredMethod("peekInt", Long.TYPE, Boolean.TYPE);
+ peek_short = io_memory.getDeclaredMethod("peekShort", Long.TYPE, Boolean.TYPE);
+ peek_long = io_memory.getDeclaredMethod("peekLong", Long.TYPE, Boolean.TYPE);
+ poke_byte = io_memory.getDeclaredMethod("pokeByte", Long.TYPE, Byte.TYPE);
+ poke_short = io_memory.getDeclaredMethod("pokeShort", Long.TYPE, Short.TYPE, Boolean.TYPE);
+ poke_int = io_memory.getDeclaredMethod("pokeInt", Long.TYPE, Integer.TYPE, Boolean.TYPE);
+ poke_long = io_memory.getDeclaredMethod("pokeLong", Long.TYPE, Long.TYPE, Boolean.TYPE);
+ }
+
+ public static void test_Memory_peekByte() throws Exception {
+ byte[] b = new byte [2];
+ b[0] = 0x12;
+ b[1] = 0x11;
+ long address = (long)address_of.invoke(runtime, b);
+ Assert.assertEquals((byte)peek_byte.invoke(null, address), 0x12);
+ Assert.assertEquals((byte)peek_byte.invoke(null, address + 1), 0x11);
+ }
+
+ public static void test_Memory_peekShort() throws Exception {
+ byte[] b = new byte [3];
+ b[0] = 0x13;
+ b[1] = 0x12;
+ b[2] = 0x11;
+ long address = (long)address_of.invoke(runtime, b);
+ Assert.assertEquals((short)peek_short.invoke(null, address, false), 0x1213); // Aligned read
+ Assert.assertEquals((short)peek_short.invoke(null, address + 1, false), 0x1112); // Unaligned read
+ }
+
+ public static void test_Memory_peekInt() throws Exception {
+ byte[] b = new byte [5];
+ b[0] = 0x15;
+ b[1] = 0x14;
+ b[2] = 0x13;
+ b[3] = 0x12;
+ b[4] = 0x11;
+ long address = (long)address_of.invoke(runtime, b);
+ Assert.assertEquals((int)peek_int.invoke(null, address, false), 0x12131415);
+ Assert.assertEquals((int)peek_int.invoke(null, address + 1, false), 0x11121314);
+ }
+
+ public static void test_Memory_peekLong() throws Exception {
+ byte[] b = new byte [9];
+ b[0] = 0x19;
+ b[1] = 0x18;
+ b[2] = 0x17;
+ b[3] = 0x16;
+ b[4] = 0x15;
+ b[5] = 0x14;
+ b[6] = 0x13;
+ b[7] = 0x12;
+ b[8] = 0x11;
+ long address = (long)address_of.invoke(runtime, b);
+ Assert.assertEquals((long)peek_long.invoke(null, address, false), 0x1213141516171819L);
+ Assert.assertEquals((long)peek_long.invoke(null, address + 1, false), 0x1112131415161718L);
+ }
+
+ public static void test_Memory_pokeByte() throws Exception {
+ byte[] r = {0x11, 0x12};
+ byte[] b = new byte [2];
+ long address = (long)address_of.invoke(runtime, b);
+ poke_byte.invoke(null, address, (byte)0x11);
+ poke_byte.invoke(null, address + 1, (byte)0x12);
+ Assert.assertTrue(Arrays.equals(r, b));
+ }
+
+ public static void test_Memory_pokeShort() throws Exception {
+ byte[] ra = {0x12, 0x11, 0x13};
+ byte[] ru = {0x12, 0x22, 0x21};
+ byte[] b = new byte [3];
+ long address = (long)address_of.invoke(runtime, b);
+
+ // Aligned write
+ b[2] = 0x13;
+ poke_short.invoke(null, address, (short)0x1112, false);
+ Assert.assertTrue(Arrays.equals(ra, b));
+
+ // Unaligned write
+ poke_short.invoke(null, address + 1, (short)0x2122, false);
+ Assert.assertTrue(Arrays.equals(ru, b));
+ }
+
+ public static void test_Memory_pokeInt() throws Exception {
+ byte[] ra = {0x14, 0x13, 0x12, 0x11, 0x15};
+ byte[] ru = {0x14, 0x24, 0x23, 0x22, 0x21};
+ byte[] b = new byte [5];
+ long address = (long)address_of.invoke(runtime, b);
+
+ b[4] = 0x15;
+ poke_int.invoke(null, address, (int)0x11121314, false);
+ Assert.assertTrue(Arrays.equals(ra, b));
+
+ poke_int.invoke(null, address + 1, (int)0x21222324, false);
+ Assert.assertTrue(Arrays.equals(ru, b));
+ }
+
+ public static void test_Memory_pokeLong() throws Exception {
+ byte[] ra = {0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x19};
+ byte[] ru = {0x18, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21};
+ byte[] b = new byte [9];
+ long address = (long)address_of.invoke(runtime, b);
+
+ b[8] = 0x19;
+ poke_long.invoke(null, address, (long)0x1112131415161718L, false);
+ Assert.assertTrue(Arrays.equals(ra, b));
+
+ poke_long.invoke(null, address + 1, (long)0x2122232425262728L, false);
+ Assert.assertTrue(Arrays.equals(ru, b));
+ }
}
diff --git a/test/304-method-tracing/expected.txt b/test/304-method-tracing/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/304-method-tracing/expected.txt
diff --git a/test/304-method-tracing/info.txt b/test/304-method-tracing/info.txt
new file mode 100644
index 0000000..d3154e6
--- /dev/null
+++ b/test/304-method-tracing/info.txt
@@ -0,0 +1 @@
+Test method tracing from command-line.
diff --git a/test/304-method-tracing/run b/test/304-method-tracing/run
new file mode 100755
index 0000000..7bd1895
--- /dev/null
+++ b/test/304-method-tracing/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Runs the test with method tracing enabled.
+exec ${RUN} "$@" --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin
diff --git a/test/304-method-tracing/src/Main.java b/test/304-method-tracing/src/Main.java
new file mode 100644
index 0000000..25cee6d
--- /dev/null
+++ b/test/304-method-tracing/src/Main.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+public class Main {
+ static class ThreadRunnable implements Runnable {
+ public void run() {
+ for (int i = 0; i < 1000; ++i) {
+ doNothing();
+ }
+ }
+
+ private void doNothing() {}
+ }
+
+ public static void main(String[] args) {
+ ArrayList<Thread> threads = new ArrayList<Thread>();
+ for (int i = 0; i < 10; ++i) {
+ threads.add(new Thread(new ThreadRunnable(), "TestThread-" + i));
+ }
+
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ for (Thread t : threads) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ System.out.println("Thread " + t.getName() + " has been interrupted");
+ }
+ }
+ }
+}
diff --git a/test/run-all-tests b/test/run-all-tests
index 885ee44..25d5c5f 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -80,6 +80,9 @@
elif [ "x$1" = "x--64" ]; then
run_args="${run_args} --64"
shift
+ elif [ "x$1" = "x--trace" ]; then
+ run_args="${run_args} --trace"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
diff --git a/test/run-test b/test/run-test
index d1c5bb2..2989f25 100755
--- a/test/run-test
+++ b/test/run-test
@@ -64,7 +64,6 @@
target_mode="yes"
dev_mode="no"
update_mode="no"
-debug_mode="no"
runtime="art"
usage="no"
build_only="no"
@@ -162,6 +161,9 @@
run_args="${run_args} --64"
suffix64="64"
shift
+ elif [ "x$1" = "x--trace" ]; then
+ run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
@@ -257,6 +259,7 @@
echo " --output-path [path] Location where to store the build" \
"files."
echo " --64 Run the test in 64-bit mode"
+ echo " --trace Run with method tracing"
) 1>&2
exit 1
fi