Merge "Tighten up ahat public API."
diff --git a/build/Android.bp b/build/Android.bp
index 2c959d4..6766dd6 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -4,6 +4,7 @@
deps: [
"blueprint",
"blueprint-pathtools",
+ "blueprint-proptools",
"soong",
"soong-android",
"soong-cc",
diff --git a/build/art.go b/build/art.go
index 452b348..4e48d2d 100644
--- a/build/art.go
+++ b/build/art.go
@@ -19,6 +19,8 @@
"android/soong/cc"
"fmt"
"sync"
+
+ "github.com/google/blueprint/proptools"
)
var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"}
@@ -210,31 +212,33 @@
func customLinker(ctx android.LoadHookContext) {
linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "")
- if linker != "" {
- type props struct {
- DynamicLinker string
- }
-
- p := &props{}
- p.DynamicLinker = linker
- ctx.AppendProperties(p)
+ type props struct {
+ DynamicLinker string
}
+
+ p := &props{}
+ if linker != "" {
+ p.DynamicLinker = linker
+ }
+
+ ctx.AppendProperties(p)
}
func prefer32Bit(ctx android.LoadHookContext) {
- if envTrue(ctx, "HOST_PREFER_32_BIT") {
- type props struct {
- Target struct {
- Host struct {
- Compile_multilib string
- }
+ type props struct {
+ Target struct {
+ Host struct {
+ Compile_multilib *string
}
}
-
- p := &props{}
- p.Target.Host.Compile_multilib = "prefer32"
- ctx.AppendProperties(p)
}
+
+ p := &props{}
+ if envTrue(ctx, "HOST_PREFER_32_BIT") {
+ p.Target.Host.Compile_multilib = proptools.StringPtr("prefer32")
+ }
+
+ ctx.AppendProperties(p)
}
func testMap(config android.Config) map[string][]string {
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index c5d3a6b..2929f36 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -26,6 +26,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stringpiece.h"
#include "noop_compiler_callbacks.h"
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 547ffbc..135f9c7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -66,6 +66,7 @@
#include "nativehelper/ScopedLocalRef.h"
#include "object_lock.h"
#include "runtime.h"
+#include "runtime_intrinsics.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
@@ -365,28 +366,6 @@
}
#undef CREATE_TRAMPOLINE
-static void SetupIntrinsic(Thread* self,
- Intrinsics intrinsic,
- InvokeType invoke_type,
- const char* class_name,
- const char* method_name,
- const char* signature)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- PointerSize image_size = class_linker->GetImagePointerSize();
- ObjPtr<mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
- if (cls == nullptr) {
- LOG(FATAL) << "Could not find class of intrinsic " << class_name;
- }
- ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size);
- if (method == nullptr || method->GetDeclaringClass() != cls) {
- LOG(FATAL) << "Could not find method of intrinsic "
- << class_name << " " << method_name << " " << signature;
- }
- DCHECK_EQ(method->GetInvokeType(), invoke_type);
- method->SetIntrinsic(static_cast<uint32_t>(intrinsic));
-}
-
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -405,14 +384,7 @@
// We don't need to setup the intrinsics for non boot image compilation, as
// those compilations will pick up a boot image that have the ArtMethod already
// set with the intrinsics flag.
- ScopedObjectAccess soa(Thread::Current());
-#define SETUP_INTRINSICS(Name, InvokeType, NeedsEnvironmentOrCache, SideEffects, Exceptions, \
- ClassName, MethodName, Signature) \
- SetupIntrinsic(soa.Self(), Intrinsics::k##Name, InvokeType, ClassName, MethodName, Signature);
-#include "intrinsics_list.h"
- INTRINSICS_LIST(SETUP_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef SETUP_INTRINSICS
+ InitializeIntrinsics();
}
// Compile:
// 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9d0b5c8..b8d1f52 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -536,6 +536,7 @@
break;
case kVirtual:
case kInterface:
+ case kPolymorphic:
LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
UNREACHABLE();
}
@@ -563,6 +564,9 @@
case kInterface:
entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
break;
+ case kPolymorphic:
+ LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
+ UNREACHABLE();
}
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 3851877..f7fd910 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -527,6 +527,10 @@
StartAttributeStream("packed_type") << vec_operation->GetPackedType();
}
+ void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+ StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
+ }
+
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 4429e6e..bdeb261 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -256,30 +256,63 @@
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
<< " should have been converted to HIR"; \
}
-#define UNREACHABLE_INTRINSICS(Arch) \
-UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
-UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
-UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
-UNREACHABLE_INTRINSIC(Arch, LongCompare) \
-UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
-UNREACHABLE_INTRINSIC(Arch, LongSignum) \
-UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
-UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
-UNREACHABLE_INTRINSIC(Arch, StringLength) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleFullFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleAcquireFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleReleaseFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleLoadLoadFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleStoreStoreFence)
+#define UNREACHABLE_INTRINSICS(Arch) \
+UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
+UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
+UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
+UNREACHABLE_INTRINSIC(Arch, LongCompare) \
+UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
+UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
+UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
+UNREACHABLE_INTRINSIC(Arch, StringLength) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleAcquireFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleReleaseFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleLoadLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleStoreStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, MethodHandleInvokeExact) \
+UNREACHABLE_INTRINSIC(Arch, MethodHandleInvoke) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchange) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchangeAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchangeRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAdd) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAddAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAddRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAnd) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAndAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAndRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOr) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOrAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOrRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXor) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXorAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXorRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSetRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetOpaque) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetVolatile) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetOpaque) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetVolatile) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetPlain) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetRelease)
template <typename IntrinsicLocationsBuilder, typename Codegenerator>
bool IsCallFreeIntrinsic(HInvoke* invoke, Codegenerator* codegen) {
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 8f84796..74de077 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -25,6 +25,8 @@
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "driver/compiler_driver.h"
#include "linear_order.h"
+#include "mirror/array-inl.h"
+#include "mirror/string.h"
namespace art {
@@ -71,12 +73,25 @@
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
-// All current SIMD targets want 16-byte alignment.
-static constexpr size_t kAlignedBase = 16;
-
// No loop unrolling factor (just one copy of the loop-body).
static constexpr uint32_t kNoUnrollingFactor = 1;
+//
+// Static helpers.
+//
+
+// Base alignment for arrays/strings guaranteed by the Android runtime.
+static uint32_t BaseAlignment() {
+ return kObjectAlignment;
+}
+
+// Hidden offset for arrays/strings guaranteed by the Android runtime.
+static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
+ return is_string_char_at
+ ? mirror::String::ValueOffset().Uint32Value()
+ : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
+}
+
// Remove the instruction from the graph. A bit more elaborate than the usual
// instruction removal, since there may be a cycle in the use structure.
static void RemoveFromCycle(HInstruction* instruction) {
@@ -288,7 +303,7 @@
}
// Compute relative vector length based on type difference.
-static size_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, size_t vl) {
+static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
DCHECK(DataType::IsIntegralType(other_type));
DCHECK(DataType::IsIntegralType(vector_type));
DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
@@ -395,7 +410,7 @@
} else if (reduction->IsVecMax()) {
return HVecReduce::kMax;
}
- LOG(FATAL) << "Unsupported SIMD reduction";
+ LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
UNREACHABLE();
}
@@ -446,7 +461,8 @@
simplified_(false),
vector_length_(0),
vector_refs_(nullptr),
- vector_peeling_candidate_(nullptr),
+ vector_static_peeling_factor_(0),
+ vector_dynamic_peeling_candidate_(nullptr),
vector_runtime_test_a_(nullptr),
vector_runtime_test_b_(nullptr),
vector_map_(nullptr),
@@ -746,7 +762,8 @@
// Reset vector bookkeeping.
vector_length_ = 0;
vector_refs_->clear();
- vector_peeling_candidate_ = nullptr;
+ vector_static_peeling_factor_ = 0;
+ vector_dynamic_peeling_candidate_ = nullptr;
vector_runtime_test_a_ =
vector_runtime_test_b_= nullptr;
@@ -763,10 +780,17 @@
}
}
- // Does vectorization seem profitable?
- if (!IsVectorizationProfitable(trip_count)) {
- return false;
- }
+ // Prepare alignment analysis:
+ // (1) find desired alignment (SIMD vector size in bytes).
+ // (2) initialize static loop peeling votes (peeling factor that will
+ // make one particular reference aligned), never to exceed (1).
+ // (3) variable to record how many references share same alignment.
+ // (4) variable to record suitable candidate for dynamic loop peeling.
+ uint32_t desired_alignment = GetVectorSizeInBytes();
+ DCHECK_LE(desired_alignment, 16u);
+ uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ uint32_t max_num_same_alignment = 0;
+ const ArrayReference* peeling_candidate = nullptr;
// Data dependence analysis. Find each pair of references with same type, where
// at least one is a write. Each such pair denotes a possible data dependence.
@@ -774,9 +798,10 @@
// aliased, as well as the property that references either point to the same
// array or to two completely disjoint arrays, i.e., no partial aliasing.
// Other than a few simply heuristics, no detailed subscript analysis is done.
- // The scan over references also finds a suitable dynamic loop peeling candidate.
- const ArrayReference* candidate = nullptr;
+ // The scan over references also prepares finding a suitable alignment strategy.
for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
+ uint32_t num_same_alignment = 0;
+ // Scan over all next references.
for (auto j = i; ++j != vector_refs_->end(); ) {
if (i->type == j->type && (i->lhs || j->lhs)) {
// Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
@@ -790,6 +815,10 @@
if (x != y) {
return false;
}
+ // Count the number of references that have the same alignment (since
+ // base and offset are the same) and where at least one is a write, so
+ // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
+ num_same_alignment++;
} else {
// Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
// Conservatively assume a potential loop-carried data dependence otherwise, avoided by
@@ -808,10 +837,38 @@
}
}
}
- }
+ // Update information for finding suitable alignment strategy:
+ // (1) update votes for static loop peeling,
+ // (2) update suitable candidate for dynamic loop peeling.
+ Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
+ if (alignment.Base() >= desired_alignment) {
+ // If the array/string object has a known, sufficient alignment, use the
+ // initial offset to compute the static loop peeling vote (this always
+ // works, since elements have natural alignment).
+ uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
+ uint32_t vote = (offset == 0)
+ ? 0
+ : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
+ DCHECK_LT(vote, 16u);
+ ++peeling_votes[vote];
+ } else if (BaseAlignment() >= desired_alignment &&
+ num_same_alignment > max_num_same_alignment) {
+ // Otherwise, if the array/string object has a known, sufficient alignment
+ // for just the base but with an unknown offset, record the candidate with
+ // the most occurrences for dynamic loop peeling (again, the peeling always
+ // works, since elements have natural alignment).
+ max_num_same_alignment = num_same_alignment;
+ peeling_candidate = &(*i);
+ }
+ } // for i
- // Consider dynamic loop peeling for alignment.
- SetPeelingCandidate(candidate, trip_count);
+ // Find a suitable alignment strategy.
+ SetAlignmentStrategy(peeling_votes, peeling_candidate);
+
+ // Does vectorization seem profitable?
+ if (!IsVectorizationProfitable(trip_count)) {
+ return false;
+ }
// Success!
return true;
@@ -828,9 +885,12 @@
uint32_t unroll = GetUnrollingFactor(block, trip_count);
uint32_t chunk = vector_length_ * unroll;
+ DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
+
// A cleanup loop is needed, at least, for any unknown trip count or
// for a known trip count with remainder iterations after vectorization.
- bool needs_cleanup = trip_count == 0 || (trip_count % chunk) != 0;
+ bool needs_cleanup = trip_count == 0 ||
+ ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
// Adjust vector bookkeeping.
HPhi* main_phi = nullptr;
@@ -844,21 +904,40 @@
DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
<< induc_type;
- // Generate dynamic loop peeling trip count, if needed, under the assumption
- // that the Android runtime guarantees at least "component size" alignment:
- // ptc = (ALIGN - (&a[initial] % ALIGN)) / type-size
+ // Generate the trip count for static or dynamic loop peeling, if needed:
+ // ptc = <peeling factor>;
HInstruction* ptc = nullptr;
- if (vector_peeling_candidate_ != nullptr) {
- DCHECK_LT(vector_length_, trip_count) << "dynamic peeling currently requires known trip count";
- //
- // TODO: Implement this. Compute address of first access memory location and
- // compute peeling factor to obtain kAlignedBase alignment.
- //
- needs_cleanup = true;
+ if (vector_static_peeling_factor_ != 0) {
+ // Static loop peeling for SIMD alignment (using the most suitable
+ // fixed peeling factor found during prior alignment analysis).
+ DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
+ ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
+ } else if (vector_dynamic_peeling_candidate_ != nullptr) {
+ // Dynamic loop peeling for SIMD alignment (using the most suitable
+ // candidate found during prior alignment analysis):
+ // rem = offset % ALIGN; // adjusted as #elements
+ // ptc = rem == 0 ? 0 : (ALIGN - rem);
+ uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
+ uint32_t align = GetVectorSizeInBytes() >> shift;
+ uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
+ vector_dynamic_peeling_candidate_->is_string_char_at);
+ HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
+ HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
+ induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
+ HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
+ induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
+ HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
+ induc_type, graph_->GetConstant(induc_type, align), rem));
+ HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
+ rem, graph_->GetConstant(induc_type, 0)));
+ ptc = Insert(preheader, new (global_allocator_) HSelect(
+ cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
+ needs_cleanup = true; // don't know the exact amount
}
// Generate loop control:
// stc = <trip-count>;
+ // ptc = min(stc, ptc);
// vtc = stc - (stc - ptc) % chunk;
// i = 0;
HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
@@ -867,6 +946,10 @@
DCHECK(IsPowerOfTwo(chunk));
HInstruction* diff = stc;
if (ptc != nullptr) {
+ if (trip_count == 0) {
+ HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
+ ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
+ }
diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
}
HInstruction* rem = Insert(
@@ -889,9 +972,13 @@
needs_cleanup = true;
}
- // Generate dynamic peeling loop for alignment, if needed:
+ // Generate alignment peeling loop, if needed:
// for ( ; i < ptc; i += 1)
// <loop-body>
+ //
+ // NOTE: The alignment forced by the peeling loop is preserved even if data is
+ // moved around during suspend checks, since all analysis was based on
+ // nothing more than the Android runtime alignment conventions.
if (ptc != nullptr) {
vector_mode_ = kSequential;
GenerateNewLoop(node,
@@ -1118,7 +1205,7 @@
GenerateVecSub(index, offset);
GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
} else {
- vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false));
+ vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
}
return true;
}
@@ -1144,9 +1231,9 @@
DataType::Type from = conversion->GetInputType();
DataType::Type to = conversion->GetResultType();
if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
- size_t size_vec = DataType::Size(type);
- size_t size_from = DataType::Size(from);
- size_t size_to = DataType::Size(to);
+ uint32_t size_vec = DataType::Size(type);
+ uint32_t size_from = DataType::Size(from);
+ uint32_t size_to = DataType::Size(to);
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1325,6 +1412,16 @@
return false;
}
+uint32_t HLoopOptimization::GetVectorSizeInBytes() {
+ switch (compiler_driver_->GetInstructionSet()) {
+ case kArm:
+ case kThumb2:
+ return 8; // 64-bit SIMD
+ default:
+ return 16; // 128-bit SIMD
+ }
+}
+
bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
switch (compiler_driver_->GetInstructionSet()) {
@@ -1537,12 +1634,13 @@
HInstruction* vector = nullptr;
if (vector_mode_ == kVector) {
// Vector store or load.
+ bool is_string_char_at = false;
HInstruction* base = org->InputAt(0);
if (opb != nullptr) {
vector = new (global_allocator_) HVecStore(
global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
} else {
- bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
+ is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HVecLoad(global_allocator_,
base,
opa,
@@ -1552,11 +1650,17 @@
is_string_char_at,
dex_pc);
}
- // Known dynamically enforced alignment?
- if (vector_peeling_candidate_ != nullptr &&
- vector_peeling_candidate_->base == base &&
- vector_peeling_candidate_->offset == offset) {
- vector->AsVecMemoryOperation()->SetAlignment(Alignment(kAlignedBase, 0));
+ // Known (forced/adjusted/original) alignment?
+ if (vector_dynamic_peeling_candidate_ != nullptr) {
+ if (vector_dynamic_peeling_candidate_->offset == offset && // TODO: diffs too?
+ DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
+ vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
+ vector->AsVecMemoryOperation()->SetAlignment( // forced
+ Alignment(GetVectorSizeInBytes(), 0));
+ }
+ } else {
+ vector->AsVecMemoryOperation()->SetAlignment( // adjusted/original
+ ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
}
} else {
// Scalar store or load.
@@ -1612,7 +1716,7 @@
// a [initial, initial, .., initial] vector for min/max.
HVecOperation* red_vector = new_red->AsVecOperation();
HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
- size_t vector_length = red_vector->GetVectorLength();
+ uint32_t vector_length = red_vector->GetVectorLength();
DataType::Type type = red_vector->GetPackedType();
if (kind == HVecReduce::ReductionKind::kSum) {
new_init = Insert(vector_preheader_,
@@ -1644,9 +1748,9 @@
HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
if (instruction->IsPhi()) {
HInstruction* input = instruction->InputAt(1);
- if (input->IsVecOperation()) {
+ if (input->IsVecOperation() && !input->IsVecExtractScalar()) {
HVecOperation* input_vector = input->AsVecOperation();
- size_t vector_length = input_vector->GetVectorLength();
+ uint32_t vector_length = input_vector->GetVectorLength();
DataType::Type type = input_vector->GetPackedType();
HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
@@ -1774,7 +1878,7 @@
break;
}
default:
- LOG(FATAL) << "Unsupported SIMD intrinsic";
+ LOG(FATAL) << "Unsupported SIMD intrinsic " << org->GetId();
UNREACHABLE();
} // switch invoke
} else {
@@ -2005,35 +2109,72 @@
// Vectorization heuristics.
//
+Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
+ DataType::Type type,
+ bool is_string_char_at,
+ uint32_t peeling) {
+ // Combine the alignment and hidden offset that is guaranteed by
+ // the Android runtime with a known starting index adjusted as bytes.
+ int64_t value = 0;
+ if (IsInt64AndGet(offset, /*out*/ &value)) {
+ uint32_t start_offset =
+ HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
+ return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
+ }
+ // Otherwise, the Android runtime guarantees at least natural alignment.
+ return Alignment(DataType::Size(type), 0);
+}
+
+void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
+ const ArrayReference* peeling_candidate) {
+ // Current heuristic: pick the best static loop peeling factor, if any,
+ // or otherwise use dynamic loop peeling on suggested peeling candidate.
+ uint32_t max_vote = 0;
+ for (int32_t i = 0; i < 16; i++) {
+ if (peeling_votes[i] > max_vote) {
+ max_vote = peeling_votes[i];
+ vector_static_peeling_factor_ = i;
+ }
+ }
+ if (max_vote == 0) {
+ vector_dynamic_peeling_candidate_ = peeling_candidate;
+ }
+}
+
+uint32_t HLoopOptimization::MaxNumberPeeled() {
+ if (vector_dynamic_peeling_candidate_ != nullptr) {
+ return vector_length_ - 1u; // worst-case
+ }
+ return vector_static_peeling_factor_; // known exactly
+}
+
bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
- // Current heuristic: non-empty body with sufficient number
- // of iterations (if known).
+ // Current heuristic: non-empty body with sufficient number of iterations (if known).
// TODO: refine by looking at e.g. operation count, alignment, etc.
+ // TODO: trip count is really unsigned entity, provided the guarding test
+ // is satisfied; deal with this more carefully later
+ uint32_t max_peel = MaxNumberPeeled();
if (vector_length_ == 0) {
return false; // nothing found
- } else if (0 < trip_count && trip_count < vector_length_) {
+ } else if (trip_count < 0) {
+ return false; // guard against non-taken/large
+ } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
return false; // insufficient iterations
}
return true;
}
-void HLoopOptimization::SetPeelingCandidate(const ArrayReference* candidate,
- int64_t trip_count ATTRIBUTE_UNUSED) {
- // Current heuristic: none.
- // TODO: implement
- vector_peeling_candidate_ = candidate;
-}
-
static constexpr uint32_t ARM64_SIMD_MAXIMUM_UNROLL_FACTOR = 8;
static constexpr uint32_t ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE = 50;
uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
+ uint32_t max_peel = MaxNumberPeeled();
switch (compiler_driver_->GetInstructionSet()) {
case kArm64: {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length_, 0u);
- if (trip_count < 2 * vector_length_) {
+ if (trip_count < (2 * vector_length_ + max_peel)) {
return kNoUnrollingFactor;
}
// Don't unroll for large loop body size.
@@ -2045,7 +2186,7 @@
// - At least one iteration of the transformed loop should be executed.
// - The loop body shouldn't be "too big" (heuristic).
uint32_t uf1 = ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE / instruction_count;
- uint32_t uf2 = trip_count / vector_length_;
+ uint32_t uf2 = (trip_count - max_peel) / vector_length_;
uint32_t unroll_factor =
TruncToPowerOfTwo(std::min({uf1, uf2, ARM64_SIMD_MAXIMUM_UNROLL_FACTOR}));
DCHECK_GE(unroll_factor, 1u);
@@ -2112,7 +2253,7 @@
HInstruction* reduction = inputs[1];
if (HasReductionFormat(reduction, phi)) {
HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
- int32_t use_count = 0;
+ uint32_t use_count = 0;
bool single_use_inside_loop =
// Reduction update only used by phi.
reduction->GetUses().HasExactlyOneElement() &&
@@ -2205,7 +2346,7 @@
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
bool collect_loop_uses,
- /*out*/ int32_t* use_count) {
+ /*out*/ uint32_t* use_count) {
// Deal with regular uses.
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
@@ -2276,7 +2417,7 @@
// Assigning the last value is always successful if there are no uses.
// Otherwise, it succeeds in a no early-exit loop by generating the
// proper last value assignment.
- int32_t use_count = 0;
+ uint32_t use_count = 0;
return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
(use_count == 0 ||
(!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 768fe55..51e0a98 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -94,20 +94,24 @@
* Representation of a unit-stride array reference.
*/
struct ArrayReference {
- ArrayReference(HInstruction* b, HInstruction* o, DataType::Type t, bool l)
- : base(b), offset(o), type(t), lhs(l) { }
+ ArrayReference(HInstruction* b, HInstruction* o, DataType::Type t, bool l, bool c = false)
+ : base(b), offset(o), type(t), lhs(l), is_string_char_at(c) { }
bool operator<(const ArrayReference& other) const {
return
(base < other.base) ||
(base == other.base &&
(offset < other.offset || (offset == other.offset &&
(type < other.type ||
- (type == other.type && lhs < other.lhs)))));
+ (type == other.type &&
+ (lhs < other.lhs ||
+ (lhs == other.lhs &&
+ is_string_char_at < other.is_string_char_at)))))));
}
- HInstruction* base; // base address
- HInstruction* offset; // offset + i
- DataType::Type type; // component type
- bool lhs; // def/use
+ HInstruction* base; // base address
+ HInstruction* offset; // offset + i
+ DataType::Type type; // component type
+ bool lhs; // def/use
+ bool is_string_char_at; // compressed string read
};
//
@@ -152,6 +156,7 @@
bool generate_code,
DataType::Type type,
uint64_t restrictions);
+ uint32_t GetVectorSizeInBytes();
bool TrySetVectorType(DataType::Type type, /*out*/ uint64_t* restrictions);
bool TrySetVectorLength(uint32_t length);
void GenerateVecInv(HInstruction* org, DataType::Type type);
@@ -183,8 +188,14 @@
uint64_t restrictions);
// Vectorization heuristics.
+ Alignment ComputeAlignment(HInstruction* offset,
+ DataType::Type type,
+ bool is_string_char_at,
+ uint32_t peeling = 0);
+ void SetAlignmentStrategy(uint32_t peeling_votes[],
+ const ArrayReference* peeling_candidate);
+ uint32_t MaxNumberPeeled();
bool IsVectorizationProfitable(int64_t trip_count);
- void SetPeelingCandidate(const ArrayReference* candidate, int64_t trip_count);
uint32_t GetUnrollingFactor(HBasicBlock* block, int64_t trip_count);
//
@@ -202,7 +213,7 @@
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
bool collect_loop_uses,
- /*out*/ int32_t* use_count);
+ /*out*/ uint32_t* use_count);
bool IsUsedOutsideLoop(HLoopInformation* loop_info,
HInstruction* instruction);
bool TryReplaceWithLastValue(HLoopInformation* loop_info,
@@ -254,8 +265,9 @@
// Contents reside in phase-local heap memory.
ScopedArenaSet<ArrayReference>* vector_refs_;
- // Dynamic loop peeling candidate for alignment.
- const ArrayReference* vector_peeling_candidate_;
+ // Static or dynamic loop peeling for alignment.
+ uint32_t vector_static_peeling_factor_;
+ const ArrayReference* vector_dynamic_peeling_candidate_;
// Dynamic data dependence test of the form a != b.
HInstruction* vector_runtime_test_a_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index affe639..1cd9142 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -43,6 +43,7 @@
#include "art_method-inl.h"
#include "base/callee_save_type.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/macros.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 0a50681..a02fbf8 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -24,6 +24,7 @@
#include "common_runtime_test.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 9f8ed77..8427e7b 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -16,6 +16,7 @@
#include "elf_file.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "common_compiler_test.h"
#include "elf_file.h"
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index dc570da..d3d42b9 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -26,6 +26,7 @@
#include "android-base/stringprintf.h"
#include "art_method-inl.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 8c4ee6e..23c3a5c 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -167,10 +167,17 @@
std::vector<MethodId*>* method_ids,
std::vector<FieldId*>* field_ids) {
bool has_id = false;
- for (const Instruction& instruction : code->Instructions()) {
- CHECK_GT(instruction.SizeInCodeUnits(), 0u);
+ IterationRange<DexInstructionIterator> instructions = code->Instructions();
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for (; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ SafeDexInstructionIterator next = it;
+ ++next;
+ if (next.IsErrorState() || next > instructions.end()) {
+ break;
+ }
has_id |= GetIdFromInstruction(collections,
- &instruction,
+ it.Inst(),
type_ids,
string_ids,
method_ids,
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 6fcd6ff..9927576 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -19,6 +19,7 @@
#include "common_runtime_test.h"
+#include "base/file_utils.h"
#include "exec_utils.h"
#include "oat_file.h"
#include "os.h"
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index f34e7ec..38d3c6e 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -317,6 +317,30 @@
return true;
}
+ template <typename Mutator>
+ bool MutateDexFile(File* output_dex, const std::string& input_jar, const Mutator& mutator) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ CHECK(DexFileLoader::Open(input_jar.c_str(),
+ input_jar.c_str(),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg,
+ &dex_files)) << error_msg;
+ EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ CHECK(dex->EnableWrite()) << "Failed to enable write";
+ mutator(const_cast<DexFile*>(dex.get()));
+ if (!output_dex->WriteFully(dex->Begin(), dex->Size())) {
+ return false;
+ }
+ }
+ if (output_dex->Flush() != 0) {
+ PLOG(FATAL) << "Could not flush the output file.";
+ }
+ return true;
+ }
+
// Create a profile with some subset of methods and classes.
void CreateProfile(const std::string& input_dex,
const std::string& out_profile,
@@ -518,8 +542,10 @@
const char* dex_filename,
ScratchFile* profile_file,
std::vector<std::string>& dexlayout_exec_argv) {
- WriteBase64ToFile(dex_filename, dex_file->GetFile());
- EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ if (dex_filename != nullptr) {
+ WriteBase64ToFile(dex_filename, dex_file->GetFile());
+ EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ }
if (profile_file != nullptr) {
CreateProfile(dex_file->GetFilename(), profile_file->GetFilename(), dex_file->GetFilename());
}
@@ -673,4 +699,58 @@
dexlayout_exec_argv));
}
+// Test that instructions that go past the end of the code items don't cause crashes.
+TEST_F(DexLayoutTest, CodeItemOverrun) {
+ ScratchFile temp_dex;
+ MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [] (DexFile* dex) {
+ bool mutated_successfully = false;
+ // Change the dex instructions to make an opcode that spans past the end of the code item.
+ for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& def = dex->GetClassDef(i);
+ const uint8_t* data = dex->GetClassData(def);
+ if (data == nullptr) {
+ continue;
+ }
+ ClassDataItemIterator it(*dex, data);
+ it.SkipAllFields();
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
+ if (item != nullptr) {
+ IterationRange<DexInstructionIterator> instructions = item->Instructions();
+ if (instructions.begin() != instructions.end()) {
+ DexInstructionIterator last_instruction = instructions.begin();
+ for (auto dex_it = instructions.begin(); dex_it != instructions.end(); ++dex_it) {
+ last_instruction = dex_it;
+ }
+ if (last_instruction->SizeInCodeUnits() == 1) {
+ // Set the opcode to something that will go past the end of the code item.
+ const_cast<Instruction*>(last_instruction.Inst())->SetOpcode(
+ Instruction::CONST_STRING_JUMBO);
+ mutated_successfully = true;
+ // Test that the safe iterator doesn't go past the end.
+ SafeDexInstructionIterator it2(instructions.begin(), instructions.end());
+ while (!it2.IsErrorState()) {
+ ++it2;
+ }
+ EXPECT_TRUE(it2 == last_instruction);
+ EXPECT_TRUE(it2 < instructions.end());
+ }
+ }
+ }
+ it.Next();
+ }
+ }
+ CHECK(mutated_successfully)
+ << "Failed to find candidate code item with only one code unit in last instruction.";
+ });
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ /*dex_filename*/ nullptr,
+ nullptr /* profile_file */,
+ dexlayout_exec_argv));
+}
+
} // namespace art
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 08d38d5..2c57e40 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -18,7 +18,9 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "base/file_utils.h"
#include "compiler_filter.h"
+#include "class_loader_context.h"
#include "dex_file.h"
#include "noop_compiler_callbacks.h"
#include "oat_file_assistant.h"
@@ -175,7 +177,15 @@
oat_fd_ = std::stoi(option.substr(strlen("--oat-fd=")).ToString(), nullptr, 0);
} else if (option.starts_with("--vdex-fd")) {
vdex_fd_ = std::stoi(option.substr(strlen("--vdex-fd=")).ToString(), nullptr, 0);
- } else { Usage("Unknown argument '%s'", option.data()); }
+ } else if (option.starts_with("--class-loader-context=")) {
+ std::string context_str = option.substr(strlen("--class-loader-context=")).ToString();
+ class_loader_context_ = ClassLoaderContext::Create(context_str);
+ if (class_loader_context_ == nullptr) {
+ Usage("Invalid --class-loader-context '%s'", context_str.c_str());
+ }
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
}
if (image_.empty()) {
@@ -255,9 +265,8 @@
return kNoDexOptNeeded;
}
- // TODO(calin): Pass the class loader context as an argument to dexoptanalyzer. b/62269291.
int dexoptNeeded = oat_file_assistant->GetDexOptNeeded(
- compiler_filter_, assume_profile_changed_, downgrade_);
+ compiler_filter_, assume_profile_changed_, downgrade_, class_loader_context_.get());
// Convert OatFileAssitant codes to dexoptanalyzer codes.
switch (dexoptNeeded) {
@@ -280,6 +289,7 @@
std::string dex_file_;
InstructionSet isa_;
CompilerFilter::Filter compiler_filter_;
+ std::unique_ptr<ClassLoaderContext> class_loader_context_;
bool assume_profile_changed_;
bool downgrade_;
std::string image_;
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 52fe973..d0f05d9 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -24,6 +24,7 @@
#include "android-base/strings.h"
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "exec_utils.h"
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index eebfec4..c6090ef 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -24,6 +24,7 @@
defaults: ["art_defaults"],
host_supported: true,
srcs: [
+ "deopt_manager.cc",
"events.cc",
"fixed_up_dex_file.cc",
"object_tagging.cc",
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index c2584e6..5f726b1 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -71,6 +71,7 @@
namespace openjdkjvmti {
EventHandler gEventHandler;
+DeoptManager gDeoptManager;
#define ENSURE_NON_NULL(n) \
do { \
@@ -1711,6 +1712,7 @@
extern "C" bool ArtPlugin_Initialize() {
art::Runtime* runtime = art::Runtime::Current();
+ gDeoptManager.Setup();
if (runtime->IsStarted()) {
PhaseUtil::SetToLive();
} else {
@@ -1731,6 +1733,7 @@
extern "C" bool ArtPlugin_Deinitialize() {
gEventHandler.Shutdown();
+ gDeoptManager.Shutdown();
PhaseUtil::Unregister();
ThreadUtil::Unregister();
ClassUtil::Unregister();
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 3edefaf..1263460 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -39,6 +39,7 @@
#include <jni.h>
+#include "deopt_manager.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -60,9 +61,6 @@
class ObjectTagTable;
-// Make sure that the DEFAULT_MUTEX_ACQUIRED_AFTER macro works.
-using art::Locks;
-
// A structure that is a jvmtiEnv with additional information for the runtime.
struct ArtJvmTiEnv : public jvmtiEnv {
art::JavaVMExt* art_vm;
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
new file mode 100644
index 0000000..f843054
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.cc
@@ -0,0 +1,322 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <functional>
+
+#include "deopt_manager.h"
+
+#include "art_jvmti.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/mutex-inl.h"
+#include "dex_file_annotations.h"
+#include "events-inl.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "modifiers.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+#include "ti_phase.h"
+
+namespace openjdkjvmti {
+
+// TODO We should make this much more selective in the future so we only return true when we
+// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
+// we can just assume that we care we are loaded at all.
+//
+// Even if we don't keep track of this at the method level we might want to keep track of it at the
+// level of enabled capabilities.
+bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(
+ art::ArtMethod* method ATTRIBUTE_UNUSED) {
+ return true;
+}
+
+bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
+ return !manager_->MethodHasBreakpoints(method);
+}
+
+DeoptManager::DeoptManager()
+ : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock"),
+ deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
+ performing_deoptimization_(false),
+ global_deopt_count_(0),
+ deopter_count_(0),
+ inspection_callback_(this) { }
+
+void DeoptManager::Setup() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->AddMethodInspectionCallback(&inspection_callback_);
+}
+
+void DeoptManager::Shutdown() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("remove method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
+}
+
+bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
+ art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
+ return MethodHasBreakpointsLocked(method);
+}
+
+bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
+ if (deopter_count_ == 0) {
+ return false;
+ }
+ auto elem = breakpoint_status_.find(method);
+ return elem != breakpoint_status_.end() && elem->second != 0;
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ RemoveDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ AddDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable());
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+
+ if (MethodHasBreakpointsLocked(method)) {
+ // Don't need to do anything extra.
+ breakpoint_status_[method]++;
+ // Another thread might be deoptimizing the very method we just added new breakpoints for. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ return;
+ }
+ breakpoint_status_[method] = 1;
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ if (instrumentation->IsForcedInterpretOnly()) {
+ // We are already interpreting everything so no need to do anything.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (is_default) {
+ AddDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedDeoptimization(self, method);
+ }
+}
+
+void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable()) << method->PrettyMethod();
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might
+ // need but since that is very heavy we will instead just use a condition variable to make sure we
+ // don't race with ourselves.
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+ DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
+ << "breakpoints present!";
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ breakpoint_status_[method] -= 1;
+ if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
+ // We don't need to do anything since we are interpreting everything anyway.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (breakpoint_status_[method] == 0) {
+ if (UNLIKELY(is_default)) {
+ RemoveDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedUndeoptimization(self, method);
+ }
+ } else {
+ // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) {
+ while (performing_deoptimization_) {
+ deoptimization_condition_.Wait(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) {
+ WaitForDeoptimizationToFinishLocked(self);
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+}
+
+class ScopedDeoptimizationContext : public art::ValueObject {
+ public:
+ ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
+ RELEASE(deopt->deoptimization_status_lock_)
+ ACQUIRE(art::Locks::mutator_lock_)
+ ACQUIRE(art::Roles::uninterruptible_)
+ : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) {
+ deopt_->WaitForDeoptimizationToFinishLocked(self_);
+ DCHECK(!deopt->performing_deoptimization_)
+ << "Already performing deoptimization on another thread!";
+ // Use performing_deoptimization_ to keep track of the lock.
+ deopt_->performing_deoptimization_ = true;
+ deopt_->deoptimization_status_lock_.Unlock(self_);
+ art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
+ /*long_suspend*/ false);
+ uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods");
+ }
+
+ ~ScopedDeoptimizationContext()
+ RELEASE(art::Locks::mutator_lock_)
+ RELEASE(art::Roles::uninterruptible_) {
+ // Can be suspended again.
+ self_->EndAssertNoThreadSuspension(uninterruptible_cause_);
+ // Release the mutator lock.
+ art::Runtime::Current()->GetThreadList()->ResumeAll();
+ // Let other threads know it's fine to proceed.
+ art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
+ deopt_->performing_deoptimization_ = false;
+ deopt_->deoptimization_condition_.Broadcast(self_);
+ }
+
+ private:
+ art::Thread* self_;
+ DeoptManager* deopt_;
+ const char* uninterruptible_cause_;
+};
+
+void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
+ global_deopt_count_++;
+ if (global_deopt_count_ == 1) {
+ PerformGlobalDeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
+ DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existant global deoptimization!";
+ global_deopt_count_--;
+ if (global_deopt_count_ == 0) {
+ PerformGlobalUndeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Deoptimize(method);
+}
+
+void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
+}
+
+void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+
+void DeoptManager::RemoveDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present";
+ deopter_count_--;
+ if (deopter_count_ == 0) {
+ ScopedDeoptimizationContext sdc(self, this);
+ // TODO Give this a real key.
+ art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization("");
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::AddDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange stsc(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ deopter_count_++;
+ if (deopter_count_ == 1) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->EnableDeoptimization();
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::DeoptimizeThread(art::Thread* target) {
+ art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
+}
+
+extern DeoptManager gDeoptManager;
+DeoptManager* DeoptManager::Get() {
+ return &gDeoptManager;
+}
+
+} // namespace openjdkjvmti
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
new file mode 100644
index 0000000..b265fa8
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.h
@@ -0,0 +1,168 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+#define ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+
+#include <unordered_map>
+
+#include "jni.h"
+#include "jvmti.h"
+
+#include "base/mutex.h"
+#include "runtime_callbacks.h"
+#include "ti_breakpoint.h"
+
+namespace art {
+class ArtMethod;
+namespace mirror {
+class Class;
+} // namespace mirror
+} // namespace art
+
+namespace openjdkjvmti {
+
+class DeoptManager;
+
+struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
+ public:
+ explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
+
+ bool IsMethodBeingInspected(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ bool IsMethodSafeToJit(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ DeoptManager* manager_;
+};
+
+class ScopedDeoptimizationContext;
+
+class DeoptManager {
+ public:
+ DeoptManager();
+
+ void Setup();
+ void Shutdown();
+
+ void RemoveDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ void AddDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ bool MethodHasBreakpoints(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_);
+
+ void RemoveMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static DeoptManager* Get();
+
+ private:
+ bool MethodHasBreakpointsLocked(art::ArtMethod* method)
+ REQUIRES(deoptimization_status_lock_);
+
+ // Wait until nothing is currently in the middle of deoptimizing/undeoptimizing something. This is
+ // needed to ensure that everything is synchronized since threads need to drop the
+ // deoptimization_status_lock_ while deoptimizing methods.
+ void WaitForDeoptimizationToFinish(art::Thread* self)
+ RELEASE(deoptimization_status_lock_) REQUIRES(!art::Locks::mutator_lock_);
+
+ void WaitForDeoptimizationToFinishLocked(art::Thread* self)
+ REQUIRES(deoptimization_status_lock_, !art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalDeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalUndeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
+ // static constexpr const char* kDeoptManagerThreadName = "JVMTI_DeoptManagerWorkerThread";
+
+ art::Mutex deoptimization_status_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
+ bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of times we have gotten requests to deopt everything.
+ uint32_t global_deopt_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of users of deoptimization there currently are.
+ uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // A map from methods to the number of breakpoints in them from all envs.
+ std::unordered_map<art::ArtMethod*, uint32_t> breakpoint_status_
+ GUARDED_BY(deoptimization_status_lock_);
+
+ // The MethodInspectionCallback we use to tell the runtime if we care about particular methods.
+ JvmtiMethodInspectionCallback inspection_callback_;
+
+ // Helper for setting up/tearing-down for deoptimization.
+ friend class ScopedDeoptimizationContext;
+};
+
+} // namespace openjdkjvmti
+#endif // ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index d97916f..7f77f90 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -480,6 +480,7 @@
ArtJvmtiEvent event = added ? ArtJvmtiEvent::kClassFileLoadHookNonRetransformable
: ArtJvmtiEvent::kClassFileLoadHookRetransformable;
return (added && caps.can_access_local_variables == 1) ||
+ caps.can_generate_breakpoint_events == 1 ||
(caps.can_retransform_classes == 1 &&
IsEventEnabledAnywhere(event) &&
env->event_masks.IsEnabledAnywhere(event));
@@ -497,6 +498,9 @@
if (added && caps.can_access_local_variables == 1) {
HandleLocalAccessCapabilityAdded();
}
+ if (caps.can_generate_breakpoint_events == 1) {
+ HandleBreakpointEventsChanged(added);
+ }
}
}
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 381dc1f..6a64441 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -37,6 +37,7 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/logging.h"
+#include "deopt_manager.h"
#include "dex_file_types.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
@@ -810,9 +811,49 @@
}
}
+static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
+ switch (event) {
+ case ArtJvmtiEvent::kBreakpoint:
+ case ArtJvmtiEvent::kException:
+ return false;
+ // TODO We should support more of these or at least do something to make them discriminate by
+ // thread.
+ case ArtJvmtiEvent::kMethodEntry:
+ case ArtJvmtiEvent::kExceptionCatch:
+ case ArtJvmtiEvent::kMethodExit:
+ case ArtJvmtiEvent::kFieldModification:
+ case ArtJvmtiEvent::kFieldAccess:
+ case ArtJvmtiEvent::kSingleStep:
+ case ArtJvmtiEvent::kFramePop:
+ return true;
+ default:
+ LOG(FATAL) << "Unexpected event type!";
+ UNREACHABLE();
+ }
+}
+
static void SetupTraceListener(JvmtiMethodTraceListener* listener,
ArtJvmtiEvent event,
bool enable) {
+ bool needs_full_deopt = EventNeedsFullDeopt(event);
+ // Make sure we can deopt.
+ {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ DeoptManager* deopt_manager = DeoptManager::Get();
+ if (enable) {
+ deopt_manager->AddDeoptimizationRequester();
+ if (needs_full_deopt) {
+ deopt_manager->AddDeoptimizeAllMethods();
+ }
+ } else {
+ if (needs_full_deopt) {
+ deopt_manager->RemoveDeoptimizeAllMethods();
+ }
+ deopt_manager->RemoveDeoptimizationRequester();
+ }
+ }
+
+ // Add the actual listeners.
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
uint32_t new_events = GetInstrumentationEventsFor(event);
art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
@@ -821,11 +862,6 @@
art::gc::kCollectorTypeInstrumentation);
art::ScopedSuspendAll ssa("jvmti method tracing installation");
if (enable) {
- // TODO Depending on the features being used we should be able to avoid deoptimizing everything
- // like we do here.
- if (!instr->AreAllMethodsDeoptimized()) {
- instr->EnableMethodTracing("jvmti-tracing", /*needs_interpreter*/true);
- }
instr->AddListener(listener, new_events);
} else {
instr->RemoveListener(listener, new_events);
@@ -910,6 +946,7 @@
}
// FramePop can never be disabled once it's been turned on since we would either need to deal
// with dangling pointers or have missed events.
+ // TODO We really need to make this not the case anymore.
case ArtJvmtiEvent::kFramePop:
if (!enable || (enable && frame_pop_enabled)) {
break;
@@ -1046,6 +1083,14 @@
return ERR(NONE);
}
+void EventHandler::HandleBreakpointEventsChanged(bool added) {
+ if (added) {
+ DeoptManager::Get()->AddDeoptimizationRequester();
+ } else {
+ DeoptManager::Get()->RemoveDeoptimizationRequester();
+ }
+}
+
void EventHandler::Shutdown() {
// Need to remove the method_trace_listener_ if it's there.
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index a062e15..aed24e5 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -232,6 +232,7 @@
void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
+ void HandleBreakpointEventsChanged(bool enable);
bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
diff --git a/openjdkjvmti/ti_breakpoint.cc b/openjdkjvmti/ti_breakpoint.cc
index 9c3687f..8e5b56e 100644
--- a/openjdkjvmti/ti_breakpoint.cc
+++ b/openjdkjvmti/ti_breakpoint.cc
@@ -37,6 +37,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/mutex-inl.h"
+#include "deopt_manager.h"
#include "dex_file_annotations.h"
#include "events-inl.h"
#include "jni_internal.h"
@@ -63,18 +64,30 @@
}
void BreakpointUtil::RemoveBreakpointsInClass(ArtJvmTiEnv* env, art::mirror::Class* klass) {
- art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
std::vector<Breakpoint> to_remove;
- for (const Breakpoint& b : env->breakpoints) {
- if (b.GetMethod()->GetDeclaringClass() == klass) {
- to_remove.push_back(b);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ for (const Breakpoint& b : env->breakpoints) {
+ if (b.GetMethod()->GetDeclaringClass() == klass) {
+ to_remove.push_back(b);
+ }
+ }
+ for (const Breakpoint& b : to_remove) {
+ auto it = env->breakpoints.find(b);
+ DCHECK(it != env->breakpoints.end());
+ env->breakpoints.erase(it);
}
}
- for (const Breakpoint& b : to_remove) {
- auto it = env->breakpoints.find(b);
- DCHECK(it != env->breakpoints.end());
- env->breakpoints.erase(it);
+ if (!to_remove.empty()) {
+ LOG(WARNING) << "Methods with breakpoints potentially not being un-deoptimized.";
}
+ // TODO Figure out how to do this.
+ // DeoptManager* deopt = DeoptManager::Get();
+ // for (const Breakpoint& b : to_remove) {
+ // // TODO It might be good to send these all at once instead.
+ // // deopt->RemoveMethodBreakpointSuspended(b.GetMethod());
+ // LOG(WARNING) << "not un-deopting methods! :-0";
+ // }
}
jvmtiError BreakpointUtil::SetBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -82,20 +95,23 @@
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
- art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (location < 0 || static_cast<uint32_t>(location) >=
art_method->GetCodeItem()->insns_size_in_code_units_) {
return ERR(INVALID_LOCATION);
}
- auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
- if (!res_pair.second) {
- // Didn't get inserted because it's already present!
- return ERR(DUPLICATE);
+ DeoptManager::Get()->AddMethodBreakpoint(art_method);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
+ if (LIKELY(res_pair.second)) {
+ return OK;
+ }
}
- return OK;
+ // Didn't get inserted because it's already present!
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
+ return ERR(DUPLICATE);
}
jvmtiError BreakpointUtil::ClearBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -103,15 +119,17 @@
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
- art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
- auto pos = env->breakpoints.find(/* Breakpoint */ {art_method, location});
- if (pos == env->breakpoints.end()) {
- return ERR(NOT_FOUND);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto pos = env->breakpoints.find(/* Breakpoint */ {art_method, location});
+ if (pos == env->breakpoints.end()) {
+ return ERR(NOT_FOUND);
+ }
+ env->breakpoints.erase(pos);
}
- env->breakpoints.erase(pos);
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
return OK;
}
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 41679da..5d63285 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -86,21 +86,6 @@
TiMethodCallback gMethodCallback;
-// TODO We should make this much more selective in the future so we only return true when we
-// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
-// we can just assume that we care we are loaded at all.
-//
-// Even if we don't keep track of this at the method level we might want to keep track of it at the
-// level of enabled capabilities.
-struct TiMethodInspectionCallback : public art::MethodInspectionCallback {
- bool IsMethodBeingInspected(art::ArtMethod* method ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return true;
- }
-};
-
-TiMethodInspectionCallback gMethodInspectionCallback;
-
void MethodUtil::Register(EventHandler* handler) {
gMethodCallback.event_handler = handler;
art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -108,7 +93,6 @@
art::ScopedSuspendAll ssa("Add method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->AddMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
void MethodUtil::Unregister() {
@@ -117,7 +101,6 @@
art::ScopedSuspendAll ssa("Remove method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->RemoveMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
jvmtiError MethodUtil::GetBytecodes(jvmtiEnv* env,
diff --git a/runtime/Android.bp b/runtime/Android.bp
index a615437..afc7d27 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -35,6 +35,7 @@
"base/arena_bit_vector.cc",
"base/bit_vector.cc",
"base/file_magic.cc",
+ "base/file_utils.cc",
"base/hex_dump.cc",
"base/logging.cc",
"base/mutex.cc",
@@ -203,6 +204,7 @@
"runtime.cc",
"runtime_callbacks.cc",
"runtime_common.cc",
+ "runtime_intrinsics.cc",
"runtime_options.cc",
"scoped_thread_state_change.cc",
"signal_catcher.cc",
@@ -402,7 +404,6 @@
export_generated_headers: ["cpp-define-generator-asm-support"],
include_dirs: [
"art/sigchainlib",
- "art",
],
header_libs: [
"art_cmdlineparser_headers",
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index adfc88f..280e593 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2921,7 +2921,7 @@
INCREASE_FRAME 16 // Reserve space for JValue result.
str xzr, [sp, #0] // Initialize result to zero.
mov x0, sp // Set r0 to point to result.
- bl artInvokePolymorphic // ArtInvokePolymorphic(result, receiver, thread, save_area)
+ bl artInvokePolymorphic // artInvokePolymorphic(result, receiver, thread, save_area)
uxtb w0, w0 // Result is the return type descriptor as a char.
sub w0, w0, 'A' // Convert to zero based index.
cmp w0, 'Z' - 'A'
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ee3f17d..489c52c 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -3228,7 +3228,7 @@
sw $zero, 20($sp) # Initialize JValue result.
sw $zero, 16($sp)
la $t9, artInvokePolymorphic
- jalr $t9 # (result, receiver, Thread*, context)
+ jalr $t9 # artInvokePolymorphic(result, receiver, Thread*, context)
addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result
.macro MATCH_RETURN_TYPE c, handler
li $t0, \c
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index d4ad275..98ffe65 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -3028,7 +3028,7 @@
daddiu $sp, $sp, -8 # Reserve space for JValue result.
.cfi_adjust_cfa_offset 8
sd $zero, 0($sp) # Initialize JValue result.
- jal artInvokePolymorphic # (result, receiver, Thread*, context)
+ jal artInvokePolymorphic # artInvokePolymorphic(result, receiver, Thread*, context)
move $a0, $sp # Make $a0 a pointer to the JValue result
.macro MATCH_RETURN_TYPE c, handler
li $t0, \c
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index eecca58..25716dc 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2419,7 +2419,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver (method handle)
PUSH eax // pass JResult
- call SYMBOL(artInvokePolymorphic) // (result, receiver, Thread*, SP)
+ call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
subl LITERAL('A'), %eax // Eliminate out of bounds options
cmpb LITERAL('Z' - 'A'), %al
ja .Lcleanup_and_return
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d4297df..80f5c34 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -166,6 +166,8 @@
return kInterface;
} else if (IsDirect()) {
return kDirect;
+ } else if (IsPolymorphicSignature()) {
+ return kPolymorphic;
} else {
return kVirtual;
}
@@ -427,6 +429,12 @@
/* lookup_in_resolved_boot_classes */ true);
}
+bool ArtMethod::IsAnnotatedWithPolymorphicSignature() {
+ return IsAnnotatedWith(WellKnownClasses::java_lang_invoke_MethodHandle_PolymorphicSignature,
+ DexFile::kDexVisibilityRuntime,
+ /* lookup_in_resolved_boot_classes */ true);
+}
+
bool ArtMethod::IsAnnotatedWith(jclass klass,
uint32_t visibility,
bool lookup_in_resolved_boot_classes) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index caef81c..fe85cb4 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -271,6 +271,12 @@
bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Methods with a polymorphic signature have constraints that they
+ // are native and varargs. Check these first before possibly expensive call.
+ return IsNative() && IsVarargs() && IsAnnotatedWithPolymorphicSignature();
+ }
+
bool SkipAccessChecks() {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
}
@@ -316,6 +322,10 @@
// -- Unrelated to the GC notion of "critical".
bool IsAnnotatedWithCriticalNative();
+ // Checks to see if the method was annotated with
+ // @java.lang.invoke.MethodHandle.PolymorphicSignature.
+ bool IsAnnotatedWithPolymorphicSignature();
+
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/base/file_utils.cc b/runtime/base/file_utils.cc
new file mode 100644
index 0000000..323a065
--- /dev/null
+++ b/runtime/base/file_utils.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "file_utils.h"
+
+#include <inttypes.h>
+#include <pthread.h>
+#include <sys/mman.h> // For madvise
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+// We need dladdr.
+#ifndef __APPLE__
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#define DEFINED_GNU_SOURCE
+#endif
+#include <dlfcn.h>
+#include <libgen.h>
+#ifdef DEFINED_GNU_SOURCE
+#undef _GNU_SOURCE
+#undef DEFINED_GNU_SOURCE
+#endif
+#endif
+
+
+#include <memory>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "base/stl_util.h"
+#include "base/unix_file/fd_file.h"
+#include "dex_file-inl.h"
+#include "dex_file_loader.h"
+#include "dex_instruction.h"
+#include "oat_quick_method_header.h"
+#include "os.h"
+#include "scoped_thread_state_change-inl.h"
+#include "utf-inl.h"
+
+#if defined(__APPLE__)
+#include <crt_externs.h>
+#include <sys/syscall.h>
+#include "AvailabilityMacros.h" // For MAC_OS_X_VERSION_MAX_ALLOWED
+#endif
+
+#if defined(__linux__)
+#include <linux/unistd.h>
+#endif
+
+namespace art {
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+bool ReadFileToString(const std::string& file_name, std::string* result) {
+ File file(file_name, O_RDONLY, false);
+ if (!file.IsOpened()) {
+ return false;
+ }
+
+ std::vector<char> buf(8 * KB);
+ while (true) {
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
+ if (n == -1) {
+ return false;
+ }
+ if (n == 0) {
+ return true;
+ }
+ result->append(&buf[0], n);
+ }
+}
+
+bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
+ File file(file_name, O_RDONLY, false);
+ if (!file.IsOpened()) {
+ return false;
+ }
+
+ constexpr size_t kBufSize = 256; // Small buffer. Avoid stack overflow and stack size warnings.
+ char buf[kBufSize + 1]; // +1 for terminator.
+ size_t filled_to = 0;
+ while (true) {
+ DCHECK_LT(filled_to, kBufSize);
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[filled_to], kBufSize - filled_to));
+ if (n <= 0) {
+ // Print the rest of the buffer, if it exists.
+ if (filled_to > 0) {
+ buf[filled_to] = 0;
+ LOG(level) << buf;
+ }
+ return n == 0;
+ }
+ // Scan for '\n'.
+ size_t i = filled_to;
+ bool found_newline = false;
+ for (; i < filled_to + n; ++i) {
+ if (buf[i] == '\n') {
+ // Found a line break, that's something to print now.
+ buf[i] = 0;
+ LOG(level) << buf;
+ // Copy the rest to the front.
+ if (i + 1 < filled_to + n) {
+ memmove(&buf[0], &buf[i + 1], filled_to + n - i - 1);
+ filled_to = filled_to + n - i - 1;
+ } else {
+ filled_to = 0;
+ }
+ found_newline = true;
+ break;
+ }
+ }
+ if (found_newline) {
+ continue;
+ } else {
+ filled_to += n;
+ // Check if we must flush now.
+ if (filled_to == kBufSize) {
+ buf[kBufSize] = 0;
+ LOG(level) << buf;
+ filled_to = 0;
+ }
+ }
+ }
+}
+
+std::string GetAndroidRootSafe(std::string* error_msg) {
+ // Prefer ANDROID_ROOT if it's set.
+ const char* android_dir = getenv("ANDROID_ROOT");
+ if (android_dir != nullptr) {
+ if (!OS::DirectoryExists(android_dir)) {
+ *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_dir);
+ return "";
+ }
+ return android_dir;
+ }
+
+ // Check where libart is from, and derive from there. Only do this for non-Mac.
+#ifndef __APPLE__
+ {
+ Dl_info info;
+ if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
+ // Make a duplicate of the fname so dirname can modify it.
+ UniqueCPtr<char> fname(strdup(info.dli_fname));
+
+ char* dir1 = dirname(fname.get()); // This is the lib directory.
+ char* dir2 = dirname(dir1); // This is the "system" directory.
+ if (OS::DirectoryExists(dir2)) {
+ std::string tmp = dir2; // Make a copy here so that fname can be released.
+ return tmp;
+ }
+ }
+ }
+#endif
+
+ // Try "/system".
+ if (!OS::DirectoryExists("/system")) {
+ *error_msg = "Failed to find ANDROID_ROOT directory /system";
+ return "";
+ }
+ return "/system";
+}
+
+std::string GetAndroidRoot() {
+ std::string error_msg;
+ std::string ret = GetAndroidRootSafe(&error_msg);
+ if (ret.empty()) {
+ LOG(FATAL) << error_msg;
+ UNREACHABLE();
+ }
+ return ret;
+}
+
+
+static const char* GetAndroidDirSafe(const char* env_var,
+ const char* default_dir,
+ std::string* error_msg) {
+ const char* android_dir = getenv(env_var);
+ if (android_dir == nullptr) {
+ if (OS::DirectoryExists(default_dir)) {
+ android_dir = default_dir;
+ } else {
+ *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
+ return nullptr;
+ }
+ }
+ if (!OS::DirectoryExists(android_dir)) {
+ *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
+ return nullptr;
+ }
+ return android_dir;
+}
+
+static const char* GetAndroidDir(const char* env_var, const char* default_dir) {
+ std::string error_msg;
+ const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
+ if (dir != nullptr) {
+ return dir;
+ } else {
+ LOG(FATAL) << error_msg;
+ return nullptr;
+ }
+}
+
+const char* GetAndroidData() {
+ return GetAndroidDir("ANDROID_DATA", "/data");
+}
+
+const char* GetAndroidDataSafe(std::string* error_msg) {
+ return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
+}
+
+std::string GetDefaultBootImageLocation(std::string* error_msg) {
+ std::string android_root = GetAndroidRootSafe(error_msg);
+ if (android_root.empty()) {
+ return "";
+ }
+ return StringPrintf("%s/framework/boot.art", android_root.c_str());
+}
+
+void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
+ CHECK(subdir != nullptr);
+ std::string error_msg;
+ const char* android_data = GetAndroidDataSafe(&error_msg);
+ if (android_data == nullptr) {
+ *have_android_data = false;
+ *dalvik_cache_exists = false;
+ *is_global_cache = false;
+ return;
+ } else {
+ *have_android_data = true;
+ }
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ *dalvik_cache = dalvik_cache_root + subdir;
+ *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
+ *is_global_cache = strcmp(android_data, "/data") == 0;
+ if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
+ // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
+ *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
+ (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
+ }
+}
+
+std::string GetDalvikCache(const char* subdir) {
+ CHECK(subdir != nullptr);
+ const char* android_data = GetAndroidData();
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ const std::string dalvik_cache = dalvik_cache_root + subdir;
+ if (!OS::DirectoryExists(dalvik_cache.c_str())) {
+ // TODO: Check callers. Traditional behavior is to not abort.
+ return "";
+ }
+ return dalvik_cache;
+}
+
+bool GetDalvikCacheFilename(const char* location, const char* cache_location,
+ std::string* filename, std::string* error_msg) {
+ if (location[0] != '/') {
+ *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
+ return false;
+ }
+ std::string cache_file(&location[1]); // skip leading slash
+ if (!android::base::EndsWith(location, ".dex") &&
+ !android::base::EndsWith(location, ".art") &&
+ !android::base::EndsWith(location, ".oat")) {
+ cache_file += "/";
+ cache_file += DexFileLoader::kClassesDex;
+ }
+ std::replace(cache_file.begin(), cache_file.end(), '/', '@');
+ *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ return true;
+}
+
+std::string GetVdexFilename(const std::string& oat_location) {
+ return ReplaceFileExtension(oat_location, "vdex");
+}
+
+static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
+ // in = /foo/bar/baz
+ // out = /foo/bar/<isa>/baz
+ size_t pos = filename->rfind('/');
+ CHECK_NE(pos, std::string::npos) << *filename << " " << isa;
+ filename->insert(pos, "/", 1);
+ filename->insert(pos + 1, GetInstructionSetString(isa));
+}
+
+std::string GetSystemImageFilename(const char* location, const InstructionSet isa) {
+ // location = /system/framework/boot.art
+ // filename = /system/framework/<isa>/boot.art
+ std::string filename(location);
+ InsertIsaDirectory(isa, &filename);
+ return filename;
+}
+
+bool FileExists(const std::string& filename) {
+ struct stat buffer;
+ return stat(filename.c_str(), &buffer) == 0;
+}
+
+bool FileExistsAndNotEmpty(const std::string& filename) {
+ struct stat buffer;
+ if (stat(filename.c_str(), &buffer) != 0) {
+ return false;
+ }
+ return buffer.st_size > 0;
+}
+
+std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
+ const size_t last_ext = filename.find_last_of('.');
+ if (last_ext == std::string::npos) {
+ return filename + "." + new_extension;
+ } else {
+ return filename.substr(0, last_ext + 1) + new_extension;
+ }
+}
+
+int64_t GetFileSizeBytes(const std::string& filename) {
+ struct stat stat_buf;
+ int rc = stat(filename.c_str(), &stat_buf);
+ return rc == 0 ? stat_buf.st_size : -1;
+}
+
+int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice) {
+ DCHECK_LE(begin, end);
+ begin = AlignUp(begin, kPageSize);
+ end = AlignDown(end, kPageSize);
+ if (begin < end) {
+ int result = madvise(const_cast<uint8_t*>(begin), end - begin, advice);
+ if (result != 0) {
+ PLOG(WARNING) << "madvise failed " << result;
+ }
+ return result;
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/runtime/base/file_utils.h b/runtime/base/file_utils.h
new file mode 100644
index 0000000..007f3b4
--- /dev/null
+++ b/runtime/base/file_utils.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_FILE_UTILS_H_
+#define ART_RUNTIME_BASE_FILE_UTILS_H_
+
+#include <stdlib.h>
+
+#include <string>
+
+#include "arch/instruction_set.h"
+#include "base/logging.h"
+
+namespace art {
+
+bool ReadFileToString(const std::string& file_name, std::string* result);
+bool PrintFileToLog(const std::string& file_name, LogSeverity level);
+
+// Find $ANDROID_ROOT, /system, or abort.
+std::string GetAndroidRoot();
+// Find $ANDROID_ROOT, /system, or return an empty string.
+std::string GetAndroidRootSafe(std::string* error_msg);
+
+// Find $ANDROID_DATA, /data, or abort.
+const char* GetAndroidData();
+// Find $ANDROID_DATA, /data, or return null.
+const char* GetAndroidDataSafe(std::string* error_msg);
+
+// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
+// Returns an empty string if ANDROID_ROOT is not set.
+std::string GetDefaultBootImageLocation(std::string* error_msg);
+
+// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
+// could not be found.
+std::string GetDalvikCache(const char* subdir);
+// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
+// have_android_data will be set to true if we have an ANDROID_DATA that exists,
+// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
+// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
+void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
+
+// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
+// rooted at cache_location.
+bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
+ std::string* filename, std::string* error_msg);
+
+// Returns the system location for an image
+std::string GetSystemImageFilename(const char* location, InstructionSet isa);
+
+// Returns the vdex filename for the given oat filename.
+std::string GetVdexFilename(const std::string& oat_filename);
+
+// Returns true if the file exists.
+bool FileExists(const std::string& filename);
+bool FileExistsAndNotEmpty(const std::string& filename);
+
+// Returns `filename` with the text after the last occurrence of '.' replaced with
+// `extension`. If `filename` does not contain a period, returns a string containing `filename`,
+// a period, and `new_extension`.
+// Example: ReplaceFileExtension("foo.bar", "abc") == "foo.abc"
+// ReplaceFileExtension("foo", "abc") == "foo.abc"
+std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
+
+// Return the file size in bytes or -1 if the file does not exists.
+int64_t GetFileSizeBytes(const std::string& filename);
+
+// Madvise the largest page aligned region within begin and end.
+int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_FILE_UTILS_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 189c0d0..4b56d3b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -664,7 +664,7 @@
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
- #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/base/safe_copy.cc b/runtime/base/safe_copy.cc
index c76ea11..b46b921 100644
--- a/runtime/base/safe_copy.cc
+++ b/runtime/base/safe_copy.cc
@@ -24,7 +24,7 @@
#include <android-base/macros.h>
-#include "runtime/base/bit_utils.h"
+#include "bit_utils.h"
namespace art {
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 149c33f..f15acf9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -27,6 +27,7 @@
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 85df14a..8898afe 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -346,6 +346,10 @@
return Dbg::IsDebuggerActive();
}
+bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) {
+ return !Dbg::MethodHasAnyBreakpoints(m);
+}
+
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 18126b1..ec37833 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -55,6 +55,7 @@
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
bool IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* m) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 57cef3d..5f9b3cf 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -23,6 +23,7 @@
#include <gtest/gtest.h>
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
diff --git a/runtime/dex_file_layout.cc b/runtime/dex_file_layout.cc
index 4375d7f..c3fae15 100644
--- a/runtime/dex_file_layout.cc
+++ b/runtime/dex_file_layout.cc
@@ -18,6 +18,7 @@
#include <sys/mman.h>
+#include "base/file_utils.h"
#include "dex_file.h"
#include "utils.h"
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 99fe53b..e64c0f6 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -119,6 +119,26 @@
}
}
+size_t Instruction::CodeUnitsRequiredForSizeOfComplexOpcode() const {
+ const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
+ // Handle special NOP encoded variable length sequences.
+ switch (*insns) {
+ case kPackedSwitchSignature:
+ FALLTHROUGH_INTENDED;
+ case kSparseSwitchSignature:
+ return 2;
+ case kArrayDataSignature:
+ return 4;
+ default:
+ if ((*insns & 0xFF) == 0) {
+ return 1; // NOP.
+ } else {
+ LOG(FATAL) << "Unreachable: " << DumpString(nullptr);
+ UNREACHABLE();
+ }
+ }
+}
+
std::string Instruction::DumpHex(size_t code_units) const {
size_t inst_length = SizeInCodeUnits();
if (inst_length > code_units) {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 2f28dff..09c78b2 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -225,6 +225,12 @@
}
}
+ // Code units required to calculate the size of the instruction.
+ size_t CodeUnitsRequiredForSizeComputation() const {
+ const int8_t result = kInstructionDescriptors[Opcode()].size_in_code_units;
+ return UNLIKELY(result < 0) ? CodeUnitsRequiredForSizeOfComplexOpcode() : 1;
+ }
+
// Reads an instruction out of the stream at the specified address.
static const Instruction* At(const uint16_t* code) {
DCHECK(code != nullptr);
@@ -638,6 +644,9 @@
private:
size_t SizeInCodeUnitsComplexOpcode() const;
+ // Return how many code unit words are required to compute the size of the opcode.
+ size_t CodeUnitsRequiredForSizeOfComplexOpcode() const;
+
uint32_t Fetch32(size_t offset) const {
return (Fetch16(offset) | ((uint32_t) Fetch16(offset + 1) << 16));
}
diff --git a/runtime/dex_instruction_iterator.h b/runtime/dex_instruction_iterator.h
index 280746e..9e4dea3 100644
--- a/runtime/dex_instruction_iterator.h
+++ b/runtime/dex_instruction_iterator.h
@@ -24,19 +24,68 @@
namespace art {
-class DexInstructionIterator : public std::iterator<std::forward_iterator_tag, Instruction> {
+// Base helper class to prevent duplicated comparators.
+class DexInstructionIteratorBase : public std::iterator<std::forward_iterator_tag, Instruction> {
public:
using value_type = std::iterator<std::forward_iterator_tag, Instruction>::value_type;
using difference_type = std::iterator<std::forward_iterator_tag, value_type>::difference_type;
- DexInstructionIterator() = default;
- DexInstructionIterator(const DexInstructionIterator&) = default;
- DexInstructionIterator(DexInstructionIterator&&) = default;
- DexInstructionIterator& operator=(const DexInstructionIterator&) = default;
- DexInstructionIterator& operator=(DexInstructionIterator&&) = default;
+ DexInstructionIteratorBase() = default;
+ explicit DexInstructionIteratorBase(const value_type* inst) : inst_(inst) {}
- explicit DexInstructionIterator(const value_type* inst) : inst_(inst) {}
- explicit DexInstructionIterator(const uint16_t* inst) : inst_(value_type::At(inst)) {}
+ const value_type* Inst() const {
+ return inst_;
+ }
+
+ // Return the dex pc for an iterator compared to the code item begin.
+ uint32_t GetDexPC(const DexInstructionIteratorBase& code_item_begin) {
+ return reinterpret_cast<const uint16_t*>(inst_) -
+ reinterpret_cast<const uint16_t*>(code_item_begin.inst_);
+ }
+
+ protected:
+ const value_type* inst_ = nullptr;
+};
+
+
+static ALWAYS_INLINE inline bool operator==(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return lhs.Inst() == rhs.Inst();
+}
+
+static inline bool operator!=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return !(lhs == rhs);
+}
+
+static inline bool operator<(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return lhs.Inst() < rhs.Inst();
+}
+
+static inline bool operator>(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return rhs < lhs;
+}
+
+static inline bool operator<=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return !(rhs < lhs);
+}
+
+static inline bool operator>=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ return !(lhs < rhs);
+}
+
+class DexInstructionIterator : public DexInstructionIteratorBase {
+ public:
+ using value_type = std::iterator<std::forward_iterator_tag, Instruction>::value_type;
+ using difference_type = std::iterator<std::forward_iterator_tag, value_type>::difference_type;
+ using DexInstructionIteratorBase::DexInstructionIteratorBase;
+
+ explicit DexInstructionIterator(const uint16_t* inst)
+ : DexInstructionIteratorBase(value_type::At(inst)) {}
// Value after modification.
DexInstructionIterator& operator++() {
@@ -58,51 +107,68 @@
const value_type* operator->() const {
return &**this;
}
+};
- // Return the dex pc for an iterator compared to the code item begin.
- uint32_t GetDexPC(const DexInstructionIterator& code_item_begin) {
- return reinterpret_cast<const uint16_t*>(inst_) -
- reinterpret_cast<const uint16_t*>(code_item_begin.inst_);
+class SafeDexInstructionIterator : public DexInstructionIteratorBase {
+ public:
+ explicit SafeDexInstructionIterator(const DexInstructionIteratorBase& start,
+ const DexInstructionIteratorBase& end)
+ : DexInstructionIteratorBase(start.Inst())
+ , end_(end.Inst()) {}
+
+ // Value after modification, does not read past the end of the allowed region. May increment past
+ // the end of the code item though.
+ SafeDexInstructionIterator& operator++() {
+ AssertValid();
+ const size_t size_code_units = Inst()->CodeUnitsRequiredForSizeComputation();
+ const size_t available = reinterpret_cast<const uint16_t*>(end_) -
+ reinterpret_cast<const uint16_t*>(Inst());
+ if (UNLIKELY(size_code_units > available)) {
+ error_state_ = true;
+ return *this;
+ }
+ const size_t instruction_size = inst_->SizeInCodeUnits();
+ if (UNLIKELY(instruction_size > available)) {
+ error_state_ = true;
+ return *this;
+ }
+ inst_ = inst_->RelativeAt(instruction_size);
+ return *this;
}
- const value_type* Inst() const {
- return inst_;
+ // Value before modification.
+ SafeDexInstructionIterator operator++(int) {
+ SafeDexInstructionIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ const value_type& operator*() const {
+ AssertValid();
+ return *inst_;
+ }
+
+ const value_type* operator->() const {
+ AssertValid();
+ return &**this;
+ }
+
+ // Returns true if the iterator is in an error state. This occurs when an instruction couldn't
+ // have its size computed without reading past the end iterator.
+ bool IsErrorState() const {
+ return error_state_;
}
private:
- const value_type* inst_ = nullptr;
+ ALWAYS_INLINE void AssertValid() const {
+ DCHECK(!IsErrorState());
+ DCHECK_LT(Inst(), end_);
+ }
+
+ const value_type* end_ = nullptr;
+ bool error_state_ = false;
};
-static ALWAYS_INLINE inline bool operator==(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return lhs.Inst() == rhs.Inst();
-}
-
-static inline bool operator!=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return !(lhs == rhs);
-}
-
-static inline bool operator<(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return lhs.Inst() < rhs.Inst();
-}
-
-static inline bool operator>(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return rhs < lhs;
-}
-
-static inline bool operator<=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return !(rhs < lhs);
-}
-
-static inline bool operator>=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return !(lhs < rhs);
-}
-
} // namespace art
#endif // ART_RUNTIME_DEX_INSTRUCTION_ITERATOR_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index cf5cc11..4d7c2a1 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2574,7 +2574,7 @@
// each type.
extern "C" uintptr_t artInvokePolymorphic(
JValue* result,
- mirror::Object* raw_method_handle,
+ mirror::Object* raw_receiver,
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2602,26 +2602,29 @@
RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
gc_visitor.VisitArguments();
- // Wrap raw_method_handle in a Handle for safety.
- StackHandleScope<2> hs(self);
- Handle<mirror::MethodHandle> method_handle(
- hs.NewHandle(ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(raw_method_handle))));
- raw_method_handle = nullptr;
+ // Wrap raw_receiver in a Handle for safety.
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
+ raw_receiver = nullptr;
self->EndAssertNoThreadSuspension(old_cause);
- // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
+ // Resolve method.
ClassLinker* linker = Runtime::Current()->GetClassLinker();
ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
self, inst.VRegB(), caller_method, kVirtual);
- DCHECK((resolved_method ==
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
- (resolved_method ==
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke)));
- if (UNLIKELY(method_handle.IsNull())) {
+
+ if (UNLIKELY(receiver_handle.IsNull())) {
ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
return static_cast<uintptr_t>('V');
}
+ // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996).
+ DCHECK_EQ(resolved_method->GetDeclaringClass(),
+ WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle));
+
+ Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
+ ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+
Handle<mirror::MethodType> method_type(
hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
@@ -2662,16 +2665,28 @@
// consecutive order.
uint32_t unused_args[Instruction::kMaxVarArgRegs] = {};
uint32_t first_callee_arg = first_arg + 1;
- if (!DoInvokePolymorphic<true /* is_range */>(self,
- resolved_method,
- *shadow_frame,
- method_handle,
- method_type,
- unused_args,
- first_callee_arg,
- result)) {
- DCHECK(self->IsExceptionPending());
+
+ bool isExact = (jni::EncodeArtMethod(resolved_method) ==
+ WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
+ bool success = false;
+ if (isExact) {
+ success = MethodHandleInvokeExact<true/*is_range*/>(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result);
+ } else {
+ success = MethodHandleInvoke<true/*is_range*/>(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result);
}
+ DCHECK(success || self->IsExceptionPending());
// Pop transition record.
self->PopManagedStackFragment(fragment);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 4d4d8ff..7beff96 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
#include "base/systrace.h"
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2dc5acc..c6caf4b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -24,6 +24,7 @@
#include "base/bounded_fifo.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4f54582..9f62666 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -28,6 +28,7 @@
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/histogram-inl.h"
#include "base/memory_tool.h"
#include "base/stl_util.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index f0eada3..74813b4 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -30,6 +30,7 @@
#include "art_method-inl.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/macros.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index 5999548..bdb2eda 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -22,6 +22,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index beb43df..3cd04a6 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include "art_field-inl.h"
+#include "base/file_utils.h"
#include "mirror/class-inl.h"
#include "mirror/object-refvisitor-inl.h"
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 071d1ae..74265bb 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -39,23 +39,23 @@
#define RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET 0x28
DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))))
#define THREAD_FLAGS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_IS_GC_MARKING_OFFSET 52
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_CARD_TABLE_OFFSET 136
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
#define CODEITEM_INSNS_OFFSET 16
DEFINE_CHECK_EQ(static_cast<int32_t>(CODEITEM_INSNS_OFFSET), (static_cast<int32_t>(__builtin_offsetof(art::DexFile::CodeItem, insns_))))
#define MIRROR_CLASS_DEX_CACHE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class:: DexCacheOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class::DexCacheOffset().Int32Value())))
#define MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET 48
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache:: ResolvedMethodsOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())))
#define MIRROR_OBJECT_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object:: ClassOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object::ClassOffset().Int32Value())))
#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object:: MonitorOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object::MonitorOffset().Int32Value())))
#define MIRROR_CLASS_STATUS_INITIALIZED 0xb
DEFINE_CHECK_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED), (static_cast<uint32_t>((art::mirror::Class::kStatusInitialized))))
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
@@ -65,17 +65,17 @@
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
#define ART_METHOD_JNI_OFFSET_32 20
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_JNI_OFFSET_64 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_32 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_DECLARING_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: DeclaringClassOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod::DeclaringClassOffset().Int32Value())))
#define ART_METHOD_ACCESS_FLAGS_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: AccessFlagsOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod::AccessFlagsOffset().Int32Value())))
#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2c82cb1..49f2021 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -137,11 +137,12 @@
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
- return Dbg::IsDebuggerActive() &&
- Runtime::Current()->IsJavaDebuggable() &&
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
- !method->IsProxyMethod();
+ !method->IsProxyMethod() &&
+ Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
}
void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 9fb9fe7..0a1ae36 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -22,6 +22,7 @@
#include "debugger.h"
#include "dex_file_types.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "intrinsics_enum.h"
#include "jit/jit.h"
#include "jvalue.h"
#include "method_handles-inl.h"
@@ -588,11 +589,12 @@
}
template<bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result)
+static bool DoMethodHandleInvokeCommon(Thread* self,
+ ShadowFrame& shadow_frame,
+ bool invoke_exact,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure to check for async exceptions
if (UNLIKELY(self->ObserveAsyncException())) {
@@ -638,41 +640,381 @@
return false;
}
- ArtMethod* invoke_method =
- class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
- self, invoke_method_idx, shadow_frame.GetMethod(), kVirtual);
-
// There is a common dispatch method for method handles that takes
// arguments either from a range or an array of arguments depending
// on whether the DEX instruction is invoke-polymorphic/range or
// invoke-polymorphic. The array here is for the latter.
uint32_t args[Instruction::kMaxVarArgRegs] = {};
- if (is_range) {
+ if (UNLIKELY(is_range)) {
// VRegC is the register holding the method handle. Arguments passed
// to the method handle's target do not include the method handle.
uint32_t first_arg = inst->VRegC_4rcc() + 1;
- return DoInvokePolymorphic<is_range>(self,
- invoke_method,
- shadow_frame,
- method_handle,
- callsite_type,
- args /* unused */,
- first_arg,
- result);
+ static const bool kIsRange = true;
+ if (invoke_exact) {
+ return art::MethodHandleInvokeExact<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args /* unused */,
+ first_arg,
+ result);
+ } else {
+ return art::MethodHandleInvoke<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args /* unused */,
+ first_arg,
+ result);
+ }
} else {
// Get the register arguments for the invoke.
inst->GetVarArgs(args, inst_data);
// Drop the first register which is the method handle performing the invoke.
memmove(args, args + 1, sizeof(args[0]) * (Instruction::kMaxVarArgRegs - 1));
args[Instruction::kMaxVarArgRegs - 1] = 0;
- return DoInvokePolymorphic<is_range>(self,
- invoke_method,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- args[0],
- result);
+ static const bool kIsRange = false;
+ if (invoke_exact) {
+ return art::MethodHandleInvokeExact<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ args[0],
+ result);
+ } else {
+ return art::MethodHandleInvoke<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ args[0],
+ result);
+ }
+ }
+}
+
+bool DoMethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
+ static const bool kIsRange = false;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
+ static const bool kIsRange = true;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ }
+}
+
+bool DoMethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
+ static const bool kIsRange = false;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
+ static const bool kIsRange = true;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ }
+}
+
+static bool UnimplementedSignaturePolymorphicMethod(Thread* self ATTRIBUTE_UNUSED,
+ ShadowFrame& shadow_frame ATTRIBUTE_UNUSED,
+ const Instruction* inst ATTRIBUTE_UNUSED,
+ uint16_t inst_data ATTRIBUTE_UNUSED,
+ JValue* result ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ UNIMPLEMENTED(FATAL) << "TODO(oth): b/65872996";
+ return false;
+}
+
+bool DoVarHandleCompareAndExchange(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndExchangeAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndExchangeRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAdd(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAddAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAddRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAnd(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAndAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAndRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOr(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOrAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOrRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXor(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXorAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXorRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetOpaque(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetVolatile(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetOpaque(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetVolatile(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetPlain(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+template<bool is_range>
+bool DoInvokePolymorphic(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) {
+ const int invoke_method_idx = inst->VRegB();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtMethod* invoke_method =
+ class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+ self, invoke_method_idx, shadow_frame.GetMethod(), kVirtual);
+
+ // Ensure intrinsic identifiers are initialized.
+ DCHECK(invoke_method->IsIntrinsic());
+
+ // Dispatch based on intrinsic identifier associated with method.
+ switch (static_cast<art::Intrinsics>(invoke_method->GetIntrinsic())) {
+#define CASE_SIGNATURE_POLYMORPHIC_INTRINSIC(Name, ...) \
+ case Intrinsics::k##Name: \
+ return Do ## Name(self, shadow_frame, inst, inst_data, result);
+#include "intrinsics_list.h"
+ SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(CASE_SIGNATURE_POLYMORPHIC_INTRINSIC)
+#undef INTRINSICS_LIST
+#undef SIGNATURE_POLYMORPHIC_INTRINSICS_LIST
+#undef CASE_SIGNATURE_POLYMORPHIC_INTRINSIC
+ default:
+ LOG(FATAL) << "Unreachable: " << invoke_method->GetIntrinsic();
+ UNREACHABLE();
+ return false;
}
}
@@ -839,19 +1181,16 @@
// Invoke the bootstrap method handle.
JValue result;
- // This array of arguments is unused. DoInvokePolymorphic() operates on either a
+ // This array of arguments is unused. DoMethodHandleInvokeExact() operates on either a
// an argument array or a range, but always takes an array argument.
uint32_t args_unused[Instruction::kMaxVarArgRegs];
- ArtMethod* invoke_exact =
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
- bool invoke_success = DoInvokePolymorphic<true /* is_range */>(self,
- invoke_exact,
- *bootstrap_frame,
- bootstrap,
- bootstrap_method_type,
- args_unused,
- 0,
- &result);
+ bool invoke_success = art::MethodHandleInvokeExact<true /* is_range */>(self,
+ *bootstrap_frame,
+ bootstrap,
+ bootstrap_method_type,
+ args_unused,
+ 0,
+ &result);
if (!invoke_success) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -942,16 +1281,13 @@
inst->GetVarArgs(args, inst_data);
}
- ArtMethod* invoke_exact =
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
- return DoInvokePolymorphic<is_range>(self,
- invoke_exact,
- shadow_frame,
- target,
- target_method_type,
- args,
- args[0],
- result);
+ return art::MethodHandleInvokeExact<is_range>(self,
+ shadow_frame,
+ target,
+ target_method_type,
+ args,
+ args[0],
+ result);
}
template <bool is_range>
@@ -1344,16 +1680,6 @@
EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
-// Explicit DoInvokeCustom template function declarations.
-#define EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(_is_range) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokeCustom<_is_range>( \
- Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data, JValue* result)
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(false);
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(true);
-#undef EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL
-
// Explicit DoInvokePolymorphic template function declarations.
#define EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
@@ -1364,6 +1690,16 @@
EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
+// Explicit DoInvokeCustom template function declarations.
+#define EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(_is_range) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoInvokeCustom<_is_range>( \
+ Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
+ uint16_t inst_data, JValue* result)
+EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(false);
+EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(true);
+#undef EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL
+
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index e7f67eb..f097bc7 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -222,7 +222,18 @@
return class_linker->ResolveMethodType(self, method_type_index, referrer);
}
-// Performs a signature polymorphic invoke (invoke-polymorphic/invoke-polymorphic-range).
+#define DECLARE_SIGNATURE_POLYMORPHIC_HANDLER(Name, ...) \
+bool Do ## Name(Thread* self, \
+ ShadowFrame& shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
+#include "intrinsics_list.h"
+INTRINSICS_LIST(DECLARE_SIGNATURE_POLYMORPHIC_HANDLER)
+#undef INTRINSICS_LIST
+#undef DECLARE_SIGNATURE_POLYMORPHIC_HANDLER
+
+// Performs a invoke-polymorphic or invoke-polymorphic-range.
template<bool is_range>
bool DoInvokePolymorphic(Thread* self,
ShadowFrame& shadow_frame,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 26de6b4..37593bc 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -16,8 +16,8 @@
#include "interpreter/interpreter_intrinsics.h"
-#include "compiler/intrinsics_enum.h"
#include "dex_instruction.h"
+#include "intrinsics_enum.h"
#include "interpreter/interpreter_common.h"
namespace art {
@@ -323,14 +323,14 @@
return true;
}
-#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
-static ALWAYS_INLINE bool name(ShadowFrame* /* shadow_frame */, \
- const Instruction* /* inst */, \
- uint16_t /* inst_data */, \
- JValue* /* result_register */) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- std::atomic_thread_fence(std_memory_operation); \
- return true; \
+#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, \
+ const Instruction* inst ATTRIBUTE_UNUSED, \
+ uint16_t inst_data ATTRIBUTE_UNUSED, \
+ JValue* result_register ATTRIBUTE_UNUSED) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ std::atomic_thread_fence(std_memory_operation); \
+ return true; \
}
// The VarHandle fence methods are static (unlike sun.misc.Unsafe versions).
@@ -342,6 +342,63 @@
VARHANDLE_FENCE_INTRINSIC(MterpVarHandleLoadLoadFence, std::memory_order_acquire)
VARHANDLE_FENCE_INTRINSIC(MterpVarHandleStoreStoreFence, std::memory_order_release)
+#define METHOD_HANDLE_INVOKE_INTRINSIC(name) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { \
+ return DoInvokePolymorphic<false>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+ } else { \
+ return DoInvokePolymorphic<true>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+ } \
+}
+
+METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvokeExact)
+METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvoke)
+
+#define VAR_HANDLE_ACCESSOR_INTRINSIC(name) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return Do##name(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+}
+
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchange)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGet);
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAdd)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAnd)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOr)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXor)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetOpaque)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetVolatile)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetOpaque)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetVolatile)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetPlain)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetRelease)
+
// Macro to help keep track of what's left to implement.
#define UNIMPLEMENTED_CASE(name) \
case Intrinsics::k##name: \
@@ -494,6 +551,39 @@
INTRINSIC_CASE(VarHandleReleaseFence)
INTRINSIC_CASE(VarHandleLoadLoadFence)
INTRINSIC_CASE(VarHandleStoreStoreFence)
+ INTRINSIC_CASE(MethodHandleInvokeExact)
+ INTRINSIC_CASE(MethodHandleInvoke)
+ INTRINSIC_CASE(VarHandleCompareAndExchange)
+ INTRINSIC_CASE(VarHandleCompareAndExchangeAcquire)
+ INTRINSIC_CASE(VarHandleCompareAndExchangeRelease)
+ INTRINSIC_CASE(VarHandleCompareAndSet)
+ INTRINSIC_CASE(VarHandleGet)
+ INTRINSIC_CASE(VarHandleGetAcquire)
+ INTRINSIC_CASE(VarHandleGetAndAdd)
+ INTRINSIC_CASE(VarHandleGetAndAddAcquire)
+ INTRINSIC_CASE(VarHandleGetAndAddRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAnd)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAndAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAndRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOr)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOrAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOrRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXor)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXorAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXorRelease)
+ INTRINSIC_CASE(VarHandleGetAndSet)
+ INTRINSIC_CASE(VarHandleGetAndSetAcquire)
+ INTRINSIC_CASE(VarHandleGetAndSetRelease)
+ INTRINSIC_CASE(VarHandleGetOpaque)
+ INTRINSIC_CASE(VarHandleGetVolatile)
+ INTRINSIC_CASE(VarHandleSet)
+ INTRINSIC_CASE(VarHandleSetOpaque)
+ INTRINSIC_CASE(VarHandleSetRelease)
+ INTRINSIC_CASE(VarHandleSetVolatile)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSet)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetAcquire)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetPlain)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetRelease)
case Intrinsics::kNone:
res = false;
break;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 7a8ae9a..31e7986 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1636,6 +1636,18 @@
result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
}
+// Checks whether the runtime is s64-bit. This is needed for the clinit of
+// java.lang.invoke.VarHandle clinit. The clinit determines sets of
+// available VarHandle accessors and these differ based on machine
+// word size.
+void UnstartedRuntime::UnstartedJNIVMRuntimeIs64Bit(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ jboolean is64bit = (pointer_size == PointerSize::k64) ? JNI_TRUE : JNI_FALSE;
+ result->SetZ(is64bit);
+}
+
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index e7047c7..c029e07 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -80,6 +80,7 @@
// Methods that are native.
#define UNSTARTED_RUNTIME_JNI_LIST(V) \
+ V(VMRuntimeIs64Bit, "boolean dalvik.system.VMRuntime.is64Bit()") \
V(VMRuntimeNewUnpaddedArray, "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)") \
V(VMStackGetCallingClassLoader, "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()") \
V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
diff --git a/compiler/intrinsics_enum.h b/runtime/intrinsics_enum.h
similarity index 88%
rename from compiler/intrinsics_enum.h
rename to runtime/intrinsics_enum.h
index 5528181..d46d0cc 100644
--- a/compiler/intrinsics_enum.h
+++ b/runtime/intrinsics_enum.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_INTRINSICS_ENUM_H_
-#define ART_COMPILER_INTRINSICS_ENUM_H_
+#ifndef ART_RUNTIME_INTRINSICS_ENUM_H_
+#define ART_RUNTIME_INTRINSICS_ENUM_H_
namespace art {
@@ -32,4 +32,4 @@
} // namespace art
-#endif // ART_COMPILER_INTRINSICS_ENUM_H_
+#endif // ART_RUNTIME_INTRINSICS_ENUM_H_
diff --git a/compiler/intrinsics_list.h b/runtime/intrinsics_list.h
similarity index 71%
rename from compiler/intrinsics_list.h
rename to runtime/intrinsics_list.h
index bfefead..d007728 100644
--- a/compiler/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -14,23 +14,76 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_INTRINSICS_LIST_H_
-#define ART_COMPILER_INTRINSICS_LIST_H_
+#ifndef ART_RUNTIME_INTRINSICS_LIST_H_
+#define ART_RUNTIME_INTRINSICS_LIST_H_
-// All intrinsics supported by ART. Format is name, then whether it is expected
-// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an
-// environment, may have side effects, or may throw exceptions.
-
+// This file defines the set of intrinsics that are supported by ART
+// in the compiler and runtime. Neither compiler nor runtime has
+// intrinsics for all methods here.
+//
+// The entries in the INTRINSICS_LIST below have the following format:
+//
+// 1. name
+// 2. invocation-type (art::InvokeType value).
+// 3. needs-environment (art::IntrinsicNeedsEnvironmentOrCache value)
+// 4. side-effects (art::IntrinsicSideEffects value)
+// 5. exception-info (art::::IntrinsicExceptions value)
+// 6. declaring class descriptor
+// 7. method name
+// 8. method descriptor
+//
+// The needs-environment, side-effects and exception-info are compiler
+// related properties (compiler/optimizing/nodes.h) that should not be
+// used outside of the compiler.
+//
// Note: adding a new intrinsic requires an art image version change,
// as the modifiers flag for some ArtMethods will need to be changed.
+//
+// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an
+// OOME. The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to
+// GVN Integer.valueOf (kNoSideEffects), and it is also OK to remove it if
+// it's unused.
+//
+// Note: Thread.interrupted is marked with kAllSideEffects due to the lack
+// of finer grain side effects representation.
-// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME.
-// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf
-// (kNoSideEffects), and it is also OK to remove it if it's unused.
+// Intrinsics for methods with signature polymorphic behaviours.
+#define SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V) \
+ V(MethodHandleInvokeExact, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/MethodHandle;", "invokeExact", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(MethodHandleInvoke, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/MethodHandle;", "invoke", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchange, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchange", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchangeAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchangeAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchangeRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchangeRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndSet", "([Ljava/lang/Object;)Z") \
+ V(VarHandleGet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "get", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAdd, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAdd", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAddAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAddAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAddRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAddRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAnd, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAnd", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAndAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAndAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAndRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAndRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOr, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOr", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOrAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOrAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOrRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOrRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXor, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXor", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXorAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXorAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXorRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXorRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSet", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSetAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSetRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetOpaque, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getOpaque", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetVolatile, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getVolatile", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "set", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetOpaque, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setOpaque", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setRelease", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetVolatile, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setVolatile", "([Ljava/lang/Object;)V") \
+ V(VarHandleWeakCompareAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSet", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetAcquire", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetPlain, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetPlain", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetRelease", "([Ljava/lang/Object;)Z")
-// Note: Thread.interrupted is marked with kAllSideEffects due to the lack of finer grain
-// side effects representation.
-
+// The complete list of intrinsics.
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \
V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \
@@ -164,6 +217,7 @@
V(VarHandleReleaseFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "releaseFence", "()V") \
V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
+ SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
-#endif // ART_COMPILER_INTRINSICS_LIST_H_
-#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
+#endif // ART_RUNTIME_INTRINSICS_LIST_H_
+#undef ART_RUNTIME_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/runtime/invoke_type.h b/runtime/invoke_type.h
index a003f7f..2b877e6 100644
--- a/runtime/invoke_type.h
+++ b/runtime/invoke_type.h
@@ -22,12 +22,13 @@
namespace art {
enum InvokeType : uint32_t {
- kStatic, // <<static>>
- kDirect, // <<direct>>
- kVirtual, // <<virtual>>
- kSuper, // <<super>>
- kInterface, // <<interface>>
- kMaxInvokeType = kInterface
+ kStatic, // <<static>>
+ kDirect, // <<direct>>
+ kVirtual, // <<virtual>>
+ kSuper, // <<super>>
+ kInterface, // <<interface>>
+ kPolymorphic, // <<polymorphic>>
+ kMaxInvokeType = kPolymorphic
};
std::ostream& operator<<(std::ostream& os, const InvokeType& rhs);
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 97a3b71..72b5a94 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -272,9 +272,12 @@
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
+ RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Don't compile the method if it has breakpoints.
- if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
- VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to breakpoint";
+ if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) {
+ VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
+ << " due to not being safe to jit according to runtime-callbacks. For example, there"
+ << " could be breakpoints in this method.";
return false;
}
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 19501de..805b9c1 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -34,6 +34,7 @@
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/mutex.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 743604c..7f68d2f 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -34,6 +34,7 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "globals.h"
#include "utils.h"
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 65f39e4..5a5d571 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -355,15 +355,6 @@
num_method_params);
}
-inline bool IsMethodHandleInvokeExact(const ArtMethod* const method) {
- if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) {
- return true;
- } else {
- DCHECK_EQ(method, jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke));
- return false;
- }
-}
-
inline bool IsInvoke(const mirror::MethodHandle::Kind handle_kind) {
return handle_kind <= mirror::MethodHandle::Kind::kLastInvokeKind;
}
@@ -416,15 +407,14 @@
}
template <bool is_range>
-static inline bool DoCallPolymorphic(ArtMethod* called_method,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> target_type,
- Thread* self,
- ShadowFrame& shadow_frame,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> target_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
@@ -552,15 +542,15 @@
}
template <bool is_range>
-static inline bool DoCallTransform(ArtMethod* called_method,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> callee_type,
- Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> receiver,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeTransform(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> receiver,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// This can be fixed to two, because the method we're calling here
// (MethodHandle.transformInternal) doesn't have any locals and the signature
@@ -753,34 +743,34 @@
Handle<mirror::MethodType> callee_type =
(handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
: handle_type;
- return DoCallTransform<is_range>(called_method,
- callsite_type,
- callee_type,
- self,
- shadow_frame,
- method_handle /* receiver */,
- args,
- first_arg,
- result);
+ return MethodHandleInvokeTransform<is_range>(called_method,
+ callsite_type,
+ callee_type,
+ self,
+ shadow_frame,
+ method_handle /* receiver */,
+ args,
+ first_arg,
+ result);
} else {
- return DoCallPolymorphic<is_range>(called_method,
- callsite_type,
- handle_type,
- self,
- shadow_frame,
- args,
- first_arg,
- result);
+ return MethodHandleInvokeMethod<is_range>(called_method,
+ callsite_type,
+ handle_type,
+ self,
+ shadow_frame,
+ args,
+ first_arg,
+ result);
}
}
// Helper for getters in invoke-polymorphic.
-inline static void DoFieldGetForInvokePolymorphic(Thread* self,
- const ShadowFrame& shadow_frame,
- ObjPtr<mirror::Object>& obj,
- ArtField* field,
- Primitive::Type field_type,
- JValue* result)
+inline static void MethodHandleFieldGet(Thread* self,
+ const ShadowFrame& shadow_frame,
+ ObjPtr<mirror::Object>& obj,
+ ArtField* field,
+ Primitive::Type field_type,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -817,12 +807,12 @@
}
// Helper for setters in invoke-polymorphic.
-inline bool DoFieldPutForInvokePolymorphic(Thread* self,
- ShadowFrame& shadow_frame,
- ObjPtr<mirror::Object>& obj,
- ArtField* field,
- Primitive::Type field_type,
- JValue& value)
+inline bool MethodHandleFieldPut(Thread* self,
+ ShadowFrame& shadow_frame,
+ ObjPtr<mirror::Object>& obj,
+ ArtField* field,
+ Primitive::Type field_type,
+ JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!Runtime::Current()->IsActiveTransaction());
static const bool kTransaction = false; // Not in a transaction.
@@ -895,14 +885,13 @@
}
template <bool is_range, bool do_conversions>
-bool DoInvokePolymorphicFieldAccess(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+bool MethodHandleFieldAccess(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
@@ -913,7 +902,7 @@
case mirror::MethodHandle::kInstanceGet: {
size_t obj_reg = is_range ? first_arg : args[0];
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
- DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+ MethodHandleFieldGet(self, shadow_frame, obj, field, field_type, result);
if (do_conversions && !ConvertReturnValue(callsite_type, handle_type, result)) {
DCHECK(self->IsExceptionPending());
return false;
@@ -926,7 +915,7 @@
DCHECK(self->IsExceptionPending());
return false;
}
- DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+ MethodHandleFieldGet(self, shadow_frame, obj, field, field_type, result);
if (do_conversions && !ConvertReturnValue(callsite_type, handle_type, result)) {
DCHECK(self->IsExceptionPending());
return false;
@@ -951,7 +940,7 @@
return false;
}
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
- return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+ return MethodHandleFieldPut(self, shadow_frame, obj, field, field_type, value);
}
case mirror::MethodHandle::kStaticPut: {
ObjPtr<mirror::Object> obj = GetAndInitializeDeclaringClass(self, field);
@@ -974,7 +963,7 @@
DCHECK(self->IsExceptionPending());
return false;
}
- return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+ return MethodHandleFieldPut(self, shadow_frame, obj, field, field_type, value);
}
default:
LOG(FATAL) << "Unreachable: " << handle_kind;
@@ -983,26 +972,24 @@
}
template <bool is_range>
-static inline bool DoInvokePolymorphicNonExact(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeInternal(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
- ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
- CHECK(handle_type != nullptr);
-
if (IsFieldAccess(handle_kind)) {
+ ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
DCHECK(!callsite_type->IsExactMatch(handle_type.Ptr()));
if (!callsite_type->IsConvertible(handle_type.Ptr())) {
ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
return false;
}
const bool do_convert = true;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
+ return MethodHandleFieldAccess<is_range, do_convert>(
self,
shadow_frame,
method_handle,
@@ -1011,7 +998,6 @@
first_arg,
result);
}
-
return DoInvokePolymorphicMethod<is_range>(self,
shadow_frame,
method_handle,
@@ -1022,27 +1008,32 @@
}
template <bool is_range>
-bool DoInvokePolymorphicExact(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeExactInternal(
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
- const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
Handle<mirror::MethodType> method_handle_type(hs.NewHandle(method_handle->GetMethodType()));
+ if (!callsite_type->IsExactMatch(method_handle_type.Get())) {
+ ThrowWrongMethodTypeException(method_handle_type.Get(), callsite_type.Get());
+ return false;
+ }
+
+ const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
if (IsFieldAccess(handle_kind)) {
const bool do_convert = false;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
+ return MethodHandleFieldAccess<is_range, do_convert>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
// Slow-path check.
@@ -1120,77 +1111,77 @@
} // namespace
template <bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ArtMethod* invoke_method,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+inline bool MethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::MethodType> method_handle_type = method_handle->GetMethodType();
- if (IsMethodHandleInvokeExact(invoke_method)) {
- // We need to check the nominal type of the handle in addition to the
- // real type. The "nominal" type is present when MethodHandle.asType is
- // called any handle, and results in the declared type of the handle
- // changing.
- ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
- if (UNLIKELY(nominal_type != nullptr)) {
- if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
- ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
- return false;
- }
-
- if (LIKELY(!nominal_type->IsExactMatch(method_handle_type.Ptr()))) {
- // Different nominal type means we have to treat as non-exact.
- return DoInvokePolymorphicNonExact<is_range>(self,
+ if (UNLIKELY(callsite_type->IsExactMatch(method_handle->GetMethodType()))) {
+ // A non-exact invoke that can be invoked exactly.
+ return MethodHandleInvokeExactInternal<is_range>(self,
shadow_frame,
method_handle,
callsite_type,
args,
first_arg,
result);
- }
- }
-
- if (!callsite_type->IsExactMatch(method_handle_type.Ptr())) {
- ThrowWrongMethodTypeException(method_handle_type.Ptr(), callsite_type.Get());
- return false;
- }
- return DoInvokePolymorphicExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
} else {
- if (UNLIKELY(callsite_type->IsExactMatch(method_handle_type.Ptr()))) {
- // A non-exact invoke that can be invoked exactly.
- return DoInvokePolymorphicExact<is_range>(self,
+ return MethodHandleInvokeInternal<is_range>(self,
shadow_frame,
method_handle,
callsite_type,
args,
first_arg,
result);
- }
- return DoInvokePolymorphicNonExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
}
}
-#define EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(_is_range) \
+template <bool is_range>
+bool MethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We need to check the nominal type of the handle in addition to the
+ // real type. The "nominal" type is present when MethodHandle.asType is
+ // called any handle, and results in the declared type of the handle
+ // changing.
+ ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
+ if (UNLIKELY(nominal_type != nullptr)) {
+ if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
+ ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
+ return false;
+ }
+ if (LIKELY(!nominal_type->IsExactMatch(method_handle->GetMethodType()))) {
+ // Different nominal type means we have to treat as non-exact.
+ return MethodHandleInvokeInternal<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
+ }
+ return MethodHandleInvokeExactInternal<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+}
+
+#define EXPLICIT_DO_METHOD_HANDLE_METHOD(_name, _is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokePolymorphic<_is_range>( \
+ bool MethodHandle##_name<_is_range>( \
Thread* self, \
- ArtMethod* invoke_method, \
ShadowFrame& shadow_frame, \
Handle<mirror::MethodHandle> method_handle, \
Handle<mirror::MethodType> callsite_type, \
@@ -1198,8 +1189,10 @@
uint32_t first_arg, \
JValue* result)
-EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
-EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(false);
-#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
+EXPLICIT_DO_METHOD_HANDLE_METHOD(Invoke, true);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(Invoke, false);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(InvokeExact, true);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(InvokeExact, false);
+#undef EXPLICIT_DO_METHOD_HANDLE_METHOD
} // namespace art
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 55680f0..8641918 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -202,14 +202,23 @@
};
template <bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ArtMethod* invoke_method,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+bool MethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <bool is_range>
+bool MethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/mirror/emulated_stack_frame.cc b/runtime/mirror/emulated_stack_frame.cc
index a6129ccc..f82bfbf 100644
--- a/runtime/mirror/emulated_stack_frame.cc
+++ b/runtime/mirror/emulated_stack_frame.cc
@@ -289,7 +289,7 @@
static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-// Explicit DoInvokePolymorphic template function declarations.
+// Explicit CreateFromShadowFrameAndArgs template function declarations.
#define EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
mirror::EmulatedStackFrame* EmulatedStackFrame::CreateFromShadowFrameAndArgs<_is_range>( \
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4ab8908..7d9d8be 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -20,9 +20,11 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker.h"
+#include <class_loader_context.h>
#include "common_throws.h"
#include "compiler_filter.h"
#include "dex_file-inl.h"
@@ -459,6 +461,7 @@
const char* filename,
const char* instruction_set,
const char* compiler_filter_name,
+ const char* class_loader_context,
bool profile_changed,
bool downgrade) {
if ((filename == nullptr) || !OS::FileExists(filename)) {
@@ -485,6 +488,19 @@
return -1;
}
+ std::unique_ptr<ClassLoaderContext> context = nullptr;
+ if (class_loader_context != nullptr) {
+ context = ClassLoaderContext::Create(class_loader_context);
+
+ if (context == nullptr) {
+ ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
+ std::string message(StringPrintf("Class loader context '%s' is invalid.",
+ class_loader_context));
+ env->ThrowNew(iae.get(), message.c_str());
+ return -1;
+ }
+ }
+
// TODO: Verify the dex location is well formed, and throw an IOException if
// not?
@@ -495,8 +511,10 @@
return OatFileAssistant::kNoDexOptNeeded;
}
- // TODO(calin): Extend DexFile.getDexOptNeeded to accept the class loader context. b/62269291.
- return oat_file_assistant.GetDexOptNeeded(filter, profile_changed, downgrade);
+ return oat_file_assistant.GetDexOptNeeded(filter,
+ profile_changed,
+ downgrade,
+ context.get());
}
static jstring DexFile_getDexFileStatus(JNIEnv* env,
@@ -532,6 +550,7 @@
jstring javaFilename,
jstring javaInstructionSet,
jstring javaTargetCompilerFilter,
+ jstring javaClassLoaderContext,
jboolean newProfile,
jboolean downgrade) {
ScopedUtfChars filename(env, javaFilename);
@@ -549,10 +568,16 @@
return -1;
}
+ NullableScopedUtfChars class_loader_context(env, javaClassLoaderContext);
+ if (env->ExceptionCheck()) {
+ return -1;
+ }
+
return GetDexOptNeeded(env,
filename.c_str(),
instruction_set.c_str(),
target_compiler_filter.c_str(),
+ class_loader_context.c_str(),
newProfile == JNI_TRUE,
downgrade == JNI_TRUE);
}
@@ -731,7 +756,7 @@
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(DexFile, getDexOptNeeded,
- "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZ)I"),
+ "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZ)I"),
NATIVE_METHOD(DexFile, openDexFileNative,
"(Ljava/lang/String;"
"Ljava/lang/String;"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index dd98e25..f5057b0 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -16,6 +16,7 @@
#include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "debugger.h"
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 7e16357..f166714 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -40,6 +40,7 @@
#include "android-base/stringprintf.h"
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
#include "base/unix_file/fd_file.h"
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 1269dca..d64986e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -38,6 +38,7 @@
#include "art_method.h"
#include "base/bit_vector.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index a7fe9b1..9f6bf69 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -23,6 +23,7 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker.h"
@@ -1168,12 +1169,13 @@
const OatFile* file = GetFile();
if (file == nullptr) {
- return false;
+ // No oat file means we have nothing to verify.
+ return true;
}
- size_t dir_index = file->GetLocation().rfind('/');
+ size_t dir_index = oat_file_assistant_->dex_location_.rfind('/');
std::string classpath_dir = (dir_index != std::string::npos)
- ? file->GetLocation().substr(0, dir_index)
+ ? oat_file_assistant_->dex_location_.substr(0, dir_index)
: "";
if (!context->OpenDexFiles(oat_file_assistant_->isa_, classpath_dir)) {
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d99036d..6d14971 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -1466,6 +1466,33 @@
default_filter, false, false, updated_context.get()));
}
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string context_location = GetScratchDir() + "/ContextDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+ Copy(GetDexSrc2(), context_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ const CompilerFilter::Filter default_filter =
+ OatFileAssistant::kDefaultCompilerFilterForDexLoading;
+ std::string error_msg;
+ std::string context_str = "PCL[" + context_location + "]";
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+ int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+ // A relative context simulates a dependent split context.
+ std::unique_ptr<ClassLoaderContext> relative_context =
+ ClassLoaderContext::Create("PCL[ContextDex.jar]");
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ default_filter, false, false, relative_context.get()));
+}
+
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 940195c..ee35d9c 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -25,6 +25,7 @@
#include "art_field-inl.h"
#include "base/bit_vector-inl.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 71d7b6c..526f6d1 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -18,6 +18,7 @@
#include <sstream>
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stringpiece.h"
#include "debugger.h"
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index c88799c..139de2b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -60,6 +60,7 @@
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -143,6 +144,7 @@
#include "quick/quick_method_frame_info.h"
#include "reflection.h"
#include "runtime_callbacks.h"
+#include "runtime_intrinsics.h"
#include "runtime_options.h"
#include "scoped_thread_state_change-inl.h"
#include "sigchain.h"
@@ -738,6 +740,11 @@
InitNativeMethods();
}
+ // IntializeIntrinsics needs to be called after the WellKnownClasses::Init in InitNativeMethods
+ // because in checking the invocation types of intrinsic methods ArtMethod::GetInvokeType()
+ // needs the SignaturePolymorphic annotation class which is initialized in WellKnownClasses::Init.
+ InitializeIntrinsics();
+
// Initialize well known thread group values that may be accessed threads while attaching.
InitThreadGroups(self);
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index f164f7c..339fe82 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -43,6 +43,17 @@
Remove(cb, &method_inspection_callbacks_);
}
+bool RuntimeCallbacks::IsMethodSafeToJit(ArtMethod* m) {
+ for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
+ if (!cb->IsMethodSafeToJit(m)) {
+ DCHECK(cb->IsMethodBeingInspected(m))
+ << "Contract requires that !IsMethodSafeToJit(m) -> IsMethodBeingInspected(m)";
+ return false;
+ }
+ }
+ return true;
+}
+
bool RuntimeCallbacks::IsMethodBeingInspected(ArtMethod* m) {
for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
if (cb->IsMethodBeingInspected(m)) {
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index c936049..c1ba964 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -104,6 +104,11 @@
// Returns true if the method is being inspected currently and the runtime should not modify it in
// potentially dangerous ways (i.e. replace with compiled version, JIT it, etc).
virtual bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ // Returns true if the method is safe to Jit, false otherwise.
+ // Note that '!IsMethodSafeToJit(m) implies IsMethodBeingInspected(m)'. That is that if this
+ // method returns false IsMethodBeingInspected must return true.
+ virtual bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
class RuntimeCallbacks {
@@ -167,6 +172,11 @@
// on by some code.
bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns false if some MethodInspectionCallback indicates the method cannot be safetly jitted
+ // (which implies that it is being Inspected). Returns true otherwise. If it returns false the
+ // entrypoint should not be changed to JITed code.
+ bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
void AddMethodInspectionCallback(MethodInspectionCallback* cb)
REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveMethodInspectionCallback(MethodInspectionCallback* cb)
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 940e461..eb69d91 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -25,6 +25,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
diff --git a/runtime/runtime_intrinsics.cc b/runtime/runtime_intrinsics.cc
new file mode 100644
index 0000000..f710ebe
--- /dev/null
+++ b/runtime/runtime_intrinsics.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_intrinsics.h"
+
+#include "art_method-inl.h"
+#include "class_linker.h"
+#include "intrinsics_enum.h"
+#include "invoke_type.h"
+#include "mirror/class.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+// Initialize an intrinsic. Returns true if the intrinsic is already
+// initialized, false otherwise.
+bool InitializeIntrinsic(Thread* self,
+ Intrinsics intrinsic,
+ InvokeType invoke_type,
+ const char* class_name,
+ const char* method_name,
+ const char* signature)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ PointerSize image_size = class_linker->GetImagePointerSize();
+ ObjPtr<mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
+ if (cls == nullptr) {
+ LOG(FATAL) << "Could not find class of intrinsic " << class_name;
+ }
+
+ ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size);
+ if (method == nullptr || method->GetDeclaringClass() != cls) {
+ LOG(FATAL) << "Could not find method of intrinsic "
+ << class_name << " " << method_name << " " << signature;
+ }
+
+ CHECK_EQ(method->GetInvokeType(), invoke_type);
+ if (method->IsIntrinsic()) {
+ CHECK_EQ(method->GetIntrinsic(), static_cast<uint32_t>(intrinsic));
+ return true;
+ } else {
+ method->SetIntrinsic(static_cast<uint32_t>(intrinsic));
+ return false;
+ }
+}
+
+} // namespace
+
+void InitializeIntrinsics() {
+ ScopedObjectAccess soa(Thread::Current());
+ // Initialization here uses the short-circuit operator || to stop
+ // initializing if there's an already initialized intrinsic.
+#define SETUP_INTRINSICS(Name, InvokeType, _, __, ___, ClassName, MethodName, Signature) \
+ InitializeIntrinsic(soa.Self(), \
+ Intrinsics::k##Name, \
+ InvokeType, \
+ ClassName, \
+ MethodName, \
+ Signature) ||
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(SETUP_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef SETUP_INTRINSICS
+ true;
+}
+
+} // namespace art
diff --git a/runtime/runtime_intrinsics.h b/runtime/runtime_intrinsics.h
new file mode 100644
index 0000000..98dc9bc
--- /dev/null
+++ b/runtime/runtime_intrinsics.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_INTRINSICS_H_
+#define ART_RUNTIME_RUNTIME_INTRINSICS_H_
+
+namespace art {
+
+void InitializeIntrinsics();
+
+} // namespace art
+
+#endif // ART_RUNTIME_RUNTIME_INTRINSICS_H_
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index a1f14be..bf5d718 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -34,6 +34,7 @@
#endif
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 47ffb4e..065b6e2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -39,6 +39,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
#include "base/systrace.h"
@@ -1346,36 +1347,26 @@
}
void Thread::RunCheckpointFunction() {
- bool done = false;
- do {
- // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is
- // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock
- // to prevent a race between setting the kCheckpointRequest flag and clearing it.
- Closure* checkpoint = nullptr;
- {
- MutexLock mu(this, *Locks::thread_suspend_count_lock_);
- if (tlsPtr_.checkpoint_function != nullptr) {
- checkpoint = tlsPtr_.checkpoint_function;
- if (!checkpoint_overflow_.empty()) {
- // Overflow list not empty, copy the first one out and continue.
- tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
- checkpoint_overflow_.pop_front();
- } else {
- // No overflow checkpoints, this means that we are on the last pending checkpoint.
- tlsPtr_.checkpoint_function = nullptr;
- AtomicClearFlag(kCheckpointRequest);
- done = true;
- }
- } else {
- LOG(FATAL) << "Checkpoint flag set without pending checkpoint";
- }
+ // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If
+ // there are no more checkpoints we will also clear the kCheckpointRequest flag.
+ Closure* checkpoint;
+ {
+ MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+ checkpoint = tlsPtr_.checkpoint_function;
+ if (!checkpoint_overflow_.empty()) {
+ // Overflow list not empty, copy the first one out and continue.
+ tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
+ checkpoint_overflow_.pop_front();
+ } else {
+ // No overflow checkpoints. Clear the kCheckpointRequest flag
+ tlsPtr_.checkpoint_function = nullptr;
+ AtomicClearFlag(kCheckpointRequest);
}
-
- // Outside the lock, run the checkpoint functions that we collected.
- ScopedTrace trace("Run checkpoint function");
- DCHECK(checkpoint != nullptr);
- checkpoint->Run(this);
- } while (!done);
+ }
+ // Outside the lock, run the checkpoint function.
+ ScopedTrace trace("Run checkpoint function");
+ CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint";
+ checkpoint->Run(this);
}
void Thread::RunEmptyCheckpoint() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 42b38da..3b917ba 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1356,6 +1356,9 @@
WARN_UNUSED
REQUIRES(Locks::thread_suspend_count_lock_);
+ // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
+ // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
+ // the kCheckpointRequest flag is cleared.
void RunCheckpointFunction();
void RunEmptyCheckpoint();
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 1f6bd74..f6533a7 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -18,41 +18,20 @@
#include <inttypes.h>
#include <pthread.h>
-#include <sys/mman.h> // For madvise
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
-// We need dladdr.
-#ifndef __APPLE__
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#define DEFINED_GNU_SOURCE
-#endif
-#include <dlfcn.h>
-#include <libgen.h>
-#ifdef DEFINED_GNU_SOURCE
-#undef _GNU_SOURCE
-#undef DEFINED_GNU_SOURCE
-#endif
-#endif
-
-
#include <memory>
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
+#include "base/file_utils.h"
#include "dex_file-inl.h"
-#include "dex_file_loader.h"
-#include "dex_instruction.h"
-#include "oat_quick_method_header.h"
#include "os.h"
-#include "scoped_thread_state_change-inl.h"
#include "utf-inl.h"
#if defined(__APPLE__)
@@ -92,78 +71,6 @@
return result;
}
-bool ReadFileToString(const std::string& file_name, std::string* result) {
- File file(file_name, O_RDONLY, false);
- if (!file.IsOpened()) {
- return false;
- }
-
- std::vector<char> buf(8 * KB);
- while (true) {
- int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
- if (n == -1) {
- return false;
- }
- if (n == 0) {
- return true;
- }
- result->append(&buf[0], n);
- }
-}
-
-bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
- File file(file_name, O_RDONLY, false);
- if (!file.IsOpened()) {
- return false;
- }
-
- constexpr size_t kBufSize = 256; // Small buffer. Avoid stack overflow and stack size warnings.
- char buf[kBufSize + 1]; // +1 for terminator.
- size_t filled_to = 0;
- while (true) {
- DCHECK_LT(filled_to, kBufSize);
- int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[filled_to], kBufSize - filled_to));
- if (n <= 0) {
- // Print the rest of the buffer, if it exists.
- if (filled_to > 0) {
- buf[filled_to] = 0;
- LOG(level) << buf;
- }
- return n == 0;
- }
- // Scan for '\n'.
- size_t i = filled_to;
- bool found_newline = false;
- for (; i < filled_to + n; ++i) {
- if (buf[i] == '\n') {
- // Found a line break, that's something to print now.
- buf[i] = 0;
- LOG(level) << buf;
- // Copy the rest to the front.
- if (i + 1 < filled_to + n) {
- memmove(&buf[0], &buf[i + 1], filled_to + n - i - 1);
- filled_to = filled_to + n - i - 1;
- } else {
- filled_to = 0;
- }
- found_newline = true;
- break;
- }
- }
- if (found_newline) {
- continue;
- } else {
- filled_to += n;
- // Check if we must flush now.
- if (filled_to == kBufSize) {
- buf[kBufSize] = 0;
- LOG(level) << buf;
- filled_to = 0;
- }
- }
- }
-}
-
void AppendPrettyDescriptor(const char* descriptor, std::string* result) {
// Count the number of '['s to get the dimensionality.
const char* c = descriptor;
@@ -718,197 +625,6 @@
*task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
-std::string GetAndroidRootSafe(std::string* error_msg) {
- // Prefer ANDROID_ROOT if it's set.
- const char* android_dir = getenv("ANDROID_ROOT");
- if (android_dir != nullptr) {
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_dir);
- return "";
- }
- return android_dir;
- }
-
- // Check where libart is from, and derive from there. Only do this for non-Mac.
-#ifndef __APPLE__
- {
- Dl_info info;
- if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
- // Make a duplicate of the fname so dirname can modify it.
- UniqueCPtr<char> fname(strdup(info.dli_fname));
-
- char* dir1 = dirname(fname.get()); // This is the lib directory.
- char* dir2 = dirname(dir1); // This is the "system" directory.
- if (OS::DirectoryExists(dir2)) {
- std::string tmp = dir2; // Make a copy here so that fname can be released.
- return tmp;
- }
- }
- }
-#endif
-
- // Try "/system".
- if (!OS::DirectoryExists("/system")) {
- *error_msg = "Failed to find ANDROID_ROOT directory /system";
- return "";
- }
- return "/system";
-}
-
-std::string GetAndroidRoot() {
- std::string error_msg;
- std::string ret = GetAndroidRootSafe(&error_msg);
- if (ret.empty()) {
- LOG(FATAL) << error_msg;
- UNREACHABLE();
- }
- return ret;
-}
-
-
-static const char* GetAndroidDirSafe(const char* env_var,
- const char* default_dir,
- std::string* error_msg) {
- const char* android_dir = getenv(env_var);
- if (android_dir == nullptr) {
- if (OS::DirectoryExists(default_dir)) {
- android_dir = default_dir;
- } else {
- *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
- return nullptr;
- }
- }
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
- return nullptr;
- }
- return android_dir;
-}
-
-static const char* GetAndroidDir(const char* env_var, const char* default_dir) {
- std::string error_msg;
- const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
- if (dir != nullptr) {
- return dir;
- } else {
- LOG(FATAL) << error_msg;
- return nullptr;
- }
-}
-
-const char* GetAndroidData() {
- return GetAndroidDir("ANDROID_DATA", "/data");
-}
-
-const char* GetAndroidDataSafe(std::string* error_msg) {
- return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
-}
-
-std::string GetDefaultBootImageLocation(std::string* error_msg) {
- std::string android_root = GetAndroidRootSafe(error_msg);
- if (android_root.empty()) {
- return "";
- }
- return StringPrintf("%s/framework/boot.art", android_root.c_str());
-}
-
-void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
- CHECK(subdir != nullptr);
- std::string error_msg;
- const char* android_data = GetAndroidDataSafe(&error_msg);
- if (android_data == nullptr) {
- *have_android_data = false;
- *dalvik_cache_exists = false;
- *is_global_cache = false;
- return;
- } else {
- *have_android_data = true;
- }
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- *dalvik_cache = dalvik_cache_root + subdir;
- *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
- *is_global_cache = strcmp(android_data, "/data") == 0;
- if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
- // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
- *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
- (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
- }
-}
-
-std::string GetDalvikCache(const char* subdir) {
- CHECK(subdir != nullptr);
- const char* android_data = GetAndroidData();
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- const std::string dalvik_cache = dalvik_cache_root + subdir;
- if (!OS::DirectoryExists(dalvik_cache.c_str())) {
- // TODO: Check callers. Traditional behavior is to not abort.
- return "";
- }
- return dalvik_cache;
-}
-
-bool GetDalvikCacheFilename(const char* location, const char* cache_location,
- std::string* filename, std::string* error_msg) {
- if (location[0] != '/') {
- *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
- return false;
- }
- std::string cache_file(&location[1]); // skip leading slash
- if (!android::base::EndsWith(location, ".dex") &&
- !android::base::EndsWith(location, ".art") &&
- !android::base::EndsWith(location, ".oat")) {
- cache_file += "/";
- cache_file += DexFileLoader::kClassesDex;
- }
- std::replace(cache_file.begin(), cache_file.end(), '/', '@');
- *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
- return true;
-}
-
-std::string GetVdexFilename(const std::string& oat_location) {
- return ReplaceFileExtension(oat_location, "vdex");
-}
-
-static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
- // in = /foo/bar/baz
- // out = /foo/bar/<isa>/baz
- size_t pos = filename->rfind('/');
- CHECK_NE(pos, std::string::npos) << *filename << " " << isa;
- filename->insert(pos, "/", 1);
- filename->insert(pos + 1, GetInstructionSetString(isa));
-}
-
-std::string GetSystemImageFilename(const char* location, const InstructionSet isa) {
- // location = /system/framework/boot.art
- // filename = /system/framework/<isa>/boot.art
- std::string filename(location);
- InsertIsaDirectory(isa, &filename);
- return filename;
-}
-
-bool FileExists(const std::string& filename) {
- struct stat buffer;
- return stat(filename.c_str(), &buffer) == 0;
-}
-
-bool FileExistsAndNotEmpty(const std::string& filename) {
- struct stat buffer;
- if (stat(filename.c_str(), &buffer) != 0) {
- return false;
- }
- return buffer.st_size > 0;
-}
-
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
- const size_t last_ext = filename.find_last_of('.');
- if (last_ext == std::string::npos) {
- return filename + "." + new_extension;
- } else {
- return filename.substr(0, last_ext + 1) + new_extension;
- }
-}
-
std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
@@ -952,30 +668,10 @@
*parsed_value = value;
}
-int64_t GetFileSizeBytes(const std::string& filename) {
- struct stat stat_buf;
- int rc = stat(filename.c_str(), &stat_buf);
- return rc == 0 ? stat_buf.st_size : -1;
-}
-
void SleepForever() {
while (true) {
usleep(1000000);
}
}
-int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice) {
- DCHECK_LE(begin, end);
- begin = AlignUp(begin, kPageSize);
- end = AlignDown(end, kPageSize);
- if (begin < end) {
- int result = madvise(const_cast<uint8_t*>(begin), end - begin, advice);
- if (result != 0) {
- PLOG(WARNING) << "madvise failed " << result;
- }
- return result;
- }
- return 0;
-}
-
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index fbf812a..ede32dc 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -20,12 +20,8 @@
#include <pthread.h>
#include <stdlib.h>
-#include <limits>
-#include <memory>
#include <random>
#include <string>
-#include <type_traits>
-#include <vector>
#include "arch/instruction_set.h"
#include "base/casts.h"
@@ -118,9 +114,6 @@
// additionally allowing names that begin with '<' and end with '>'.
bool IsValidMemberName(const char* s);
-bool ReadFileToString(const std::string& file_name, std::string* result);
-bool PrintFileToLog(const std::string& file_name, LogSeverity level);
-
// Splits a string using the given separator character into a vector of
// strings. Empty strings will be omitted.
void Split(const std::string& s, char separator, std::vector<std::string>* result);
@@ -131,58 +124,12 @@
// Returns the given thread's name.
std::string GetThreadName(pid_t tid);
-// Reads data from "/proc/self/task/${tid}/stat".
-void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu);
-
// Sets the name of the current thread. The name may be truncated to an
// implementation-defined limit.
void SetThreadName(const char* thread_name);
-// Find $ANDROID_ROOT, /system, or abort.
-std::string GetAndroidRoot();
-// Find $ANDROID_ROOT, /system, or return an empty string.
-std::string GetAndroidRootSafe(std::string* error_msg);
-
-// Find $ANDROID_DATA, /data, or abort.
-const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return null.
-const char* GetAndroidDataSafe(std::string* error_msg);
-
-// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
-// Returns an empty string if ANDROID_ROOT is not set.
-std::string GetDefaultBootImageLocation(std::string* error_msg);
-
-// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
-// could not be found.
-std::string GetDalvikCache(const char* subdir);
-// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
-// have_android_data will be set to true if we have an ANDROID_DATA that exists,
-// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
-// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
-void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
-
-// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
-// rooted at cache_location.
-bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
- std::string* filename, std::string* error_msg);
-
-// Returns the system location for an image
-std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-
-// Returns the vdex filename for the given oat filename.
-std::string GetVdexFilename(const std::string& oat_filename);
-
-// Returns true if the file exists.
-bool FileExists(const std::string& filename);
-bool FileExistsAndNotEmpty(const std::string& filename);
-
-// Returns `filename` with the text after the last occurrence of '.' replaced with
-// `extension`. If `filename` does not contain a period, returns a string containing `filename`,
-// a period, and `new_extension`.
-// Example: ReplaceFileExtension("foo.bar", "abc") == "foo.abc"
-// ReplaceFileExtension("foo", "abc") == "foo.abc"
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
+// Reads data from "/proc/self/task/${tid}/stat".
+void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu);
class VoidFunctor {
public:
@@ -273,9 +220,6 @@
return dist(rng);
}
-// Return the file size in bytes or -1 if the file does not exists.
-int64_t GetFileSizeBytes(const std::string& filename);
-
// Sleep forever and never come back.
NO_RETURN void SleepForever();
@@ -335,9 +279,6 @@
return (opnd < 0) ? -1 : ((opnd == 0) ? 0 : 1);
}
-// Madvise the largest page aligned region within begin and end.
-int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
-
template <typename Func, typename... Args>
static inline void CheckedCall(const Func& function, const char* what, Args... args) {
int rc = function(args...);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index e846c98..efb20ba 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -20,6 +20,7 @@
#include <stdlib.h>
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 0033167..6555e14 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -987,9 +987,17 @@
size_t monitor_enter_count = 0;
IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- DexInstructionIterator inst = instructions.begin();
- for ( ; inst < instructions.end(); ++inst) {
- Instruction::Code opcode = inst->Opcode();
+ // We can't assume the instruction is well formed, handle the case where calculating the size
+ // goes past the end of the code item.
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for ( ; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ SafeDexInstructionIterator next = it;
+ ++next;
+ if (next.IsErrorState() || next > instructions.end()) {
+ break;
+ }
+ Instruction::Code opcode = it->Opcode();
switch (opcode) {
case Instruction::APUT_OBJECT:
case Instruction::CHECK_CAST:
@@ -1010,13 +1018,13 @@
default:
break;
}
- GetInstructionFlags(inst.GetDexPC(instructions.begin())).SetIsOpcode();
+ GetInstructionFlags(it.GetDexPC(instructions.begin())).SetIsOpcode();
}
- if (inst != instructions.end()) {
+ if (it != instructions.end()) {
const size_t insns_size = code_item_->insns_size_in_code_units_;
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "code did not end where expected ("
- << inst.GetDexPC(instructions.begin()) << " vs. "
+ << it.GetDexPC(instructions.begin()) << " vs. "
<< insns_size << ")";
return false;
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index bfcd95c..829dea9 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -52,6 +52,7 @@
jclass WellKnownClasses::java_lang_Daemons;
jclass WellKnownClasses::java_lang_Error;
jclass WellKnownClasses::java_lang_invoke_MethodHandle;
+jclass WellKnownClasses::java_lang_invoke_MethodHandle_PolymorphicSignature;
jclass WellKnownClasses::java_lang_IllegalAccessError;
jclass WellKnownClasses::java_lang_NoClassDefFoundError;
jclass WellKnownClasses::java_lang_Object;
@@ -298,6 +299,7 @@
java_lang_Error = CacheClass(env, "java/lang/Error");
java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError");
java_lang_invoke_MethodHandle = CacheClass(env, "java/lang/invoke/MethodHandle");
+ java_lang_invoke_MethodHandle_PolymorphicSignature = CacheClass(env, "java/lang/invoke/MethodHandle$PolymorphicSignature");
java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError");
java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor");
java_lang_reflect_Executable = CacheClass(env, "java/lang/reflect/Executable");
@@ -334,6 +336,7 @@
java_lang_invoke_MethodHandle_invokeExact = CacheMethod(env, java_lang_invoke_MethodHandle, false, "invokeExact", "([Ljava/lang/Object;)Ljava/lang/Object;");
java_lang_invoke_MethodHandles_lookup = CacheMethod(env, "java/lang/invoke/MethodHandles", true, "lookup", "()Ljava/lang/invoke/MethodHandles$Lookup;");
java_lang_invoke_MethodHandles_Lookup_findConstructor = CacheMethod(env, "java/lang/invoke/MethodHandles$Lookup", false, "findConstructor", "(Ljava/lang/Class;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;");
+
java_lang_ref_FinalizerReference_add = CacheMethod(env, "java/lang/ref/FinalizerReference", true, "add", "(Ljava/lang/Object;)V");
java_lang_ref_ReferenceQueue_add = CacheMethod(env, "java/lang/ref/ReferenceQueue", true, "add", "(Ljava/lang/ref/Reference;)V");
@@ -434,6 +437,7 @@
java_lang_Error = nullptr;
java_lang_IllegalAccessError = nullptr;
java_lang_invoke_MethodHandle = nullptr;
+ java_lang_invoke_MethodHandle_PolymorphicSignature = nullptr;
java_lang_NoClassDefFoundError = nullptr;
java_lang_Object = nullptr;
java_lang_OutOfMemoryError = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 7deef63..b2fd4d6 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -64,6 +64,7 @@
static jclass java_lang_Error;
static jclass java_lang_IllegalAccessError;
static jclass java_lang_invoke_MethodHandle;
+ static jclass java_lang_invoke_MethodHandle_PolymorphicSignature;
static jclass java_lang_NoClassDefFoundError;
static jclass java_lang_Object;
static jclass java_lang_OutOfMemoryError;
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index ad705c5..58b33be 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -28,6 +28,7 @@
#include <backtrace/Backtrace.h>
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "gc/heap.h"
diff --git a/test/203-multi-checkpoint/expected.txt b/test/203-multi-checkpoint/expected.txt
new file mode 100644
index 0000000..e1e30e3
--- /dev/null
+++ b/test/203-multi-checkpoint/expected.txt
@@ -0,0 +1,5 @@
+JNI_OnLoad called
+Other thread running
+pushing checkpoints
+checkpoints pushed
+Passed!
diff --git a/test/203-multi-checkpoint/info.txt b/test/203-multi-checkpoint/info.txt
new file mode 100644
index 0000000..a96ba97
--- /dev/null
+++ b/test/203-multi-checkpoint/info.txt
@@ -0,0 +1,4 @@
+Test that we correctly handle checkpoints that suspend.
+
+This could cause problems with asserts when there were multiple checkpoints
+queued and earlier ones suspended.
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
new file mode 100644
index 0000000..0799b6e
--- /dev/null
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method-inl.h"
+#include "base/mutex-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_pool.h"
+
+namespace art {
+
+struct TestClosure : public Closure {
+ bool first_run_start;
+ bool first_run_end;
+ bool second_run;
+ bool second_run_interleaved;
+
+ void Run(Thread* self) OVERRIDE {
+ CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
+ if (!first_run_start) {
+ CHECK(!second_run);
+ first_run_start = true;
+ // Suspend ourself so that we will perform the second run.
+ {
+ ScopedObjectAccess soa(self);
+ self->FullSuspendCheck();
+ }
+ first_run_end = true;
+ } else {
+ CHECK(!second_run);
+ CHECK(first_run_start);
+ second_run = true;
+ second_run_interleaved = !first_run_end;
+ }
+ }
+
+ void Check() {
+ CHECK(first_run_start);
+ CHECK(first_run_end);
+ CHECK(second_run);
+ CHECK(second_run_interleaved);
+ }
+};
+
+static TestClosure gTestClosure = {};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_checkCheckpointsRun(JNIEnv*, jclass) {
+ gTestClosure.Check();
+}
+
+struct SetupClosure : public Closure {
+ void Run(Thread* self) OVERRIDE {
+ CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
+ ScopedObjectAccess soa(self);
+ MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
+ // Both should succeed since we are in runnable and have the lock.
+ CHECK(self->RequestCheckpoint(&gTestClosure)) << "Could not set first checkpoint.";
+ CHECK(self->RequestCheckpoint(&gTestClosure)) << "Could not set second checkpoint.";
+ }
+};
+
+static SetupClosure gSetupClosure = {};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_pushCheckpoints(JNIEnv*, jclass, jobject thr) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ MutexLock tll_mu(self, *Locks::thread_list_lock_);
+ Thread* target = Thread::FromManagedThread(soa, thr);
+ while (true) {
+ MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
+ if (target->RequestCheckpoint(&gSetupClosure)) {
+ break;
+ }
+ }
+}
+
+} // namespace art
diff --git a/test/203-multi-checkpoint/src/Main.java b/test/203-multi-checkpoint/src/Main.java
new file mode 100644
index 0000000..187f622
--- /dev/null
+++ b/test/203-multi-checkpoint/src/Main.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.Semaphore;
+
+public class Main {
+ static final Semaphore start = new Semaphore(0);
+ static volatile boolean continue_loop = true;
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Thread t = new Thread(Main::runTargetThread, "Target Thread");
+
+ t.start();
+ // Wait for other thread to start.
+ start.acquire();
+
+ System.out.println("pushing checkpoints");
+ pushCheckpoints(t);
+
+ System.out.println("checkpoints pushed");
+ continue_loop = false;
+
+ t.join();
+
+ checkCheckpointsRun();
+
+ System.out.println("Passed!");
+ }
+
+ public static native void pushCheckpoints(Thread t);
+ public static native boolean checkCheckpointsRun();
+
+ public static void doNothing() {}
+ public static void runTargetThread() {
+ System.out.println("Other thread running");
+ try {
+ start.release();
+ while (continue_loop) {
+ doNothing();
+ }
+ } catch (Exception e) {
+ throw new Error("Exception occurred!", e);
+ }
+ }
+}
diff --git a/test/593-checker-boolean-2-integral-conv/build b/test/593-checker-boolean-2-integral-conv/build
index 3721955..49292c9 100755
--- a/test/593-checker-boolean-2-integral-conv/build
+++ b/test/593-checker-boolean-2-integral-conv/build
@@ -20,7 +20,4 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
-# See b/65168732
-export USE_D8=false
-
./default-build "$@"
diff --git a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
index 00ebaaf..494ab95 100644
--- a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
+++ b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
@@ -30,6 +30,143 @@
return-void
.end method
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToByte(Z)B
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-byte v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToShort(Z)S
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-short v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToChar(Z)C
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-char v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: Return [<<Sel>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToInt(Z)I
+ .registers 2
+ if-eqz p0, :cond_4
+ const/4 v0, 0x1
+
+ :goto_3
+ return v0
+
+ :cond_4
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
## CHECK-START: long SmaliTests.booleanToLong(boolean) builder (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
diff --git a/test/593-checker-boolean-2-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java
index 3503b2e..fdc0919 100644
--- a/test/593-checker-boolean-2-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-2-integral-conv/src/Main.java
@@ -32,24 +32,6 @@
System.out.println("passed");
}
- /// CHECK-START: byte Main.booleanToByte(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: byte Main.booleanToByte(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: byte Main.booleanToByte(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -58,24 +40,6 @@
return (byte)(b ? 1 : 0);
}
- /// CHECK-START: short Main.booleanToShort(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: short Main.booleanToShort(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: short Main.booleanToShort(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -84,24 +48,6 @@
return (short)(b ? 1 : 0);
}
- /// CHECK-START: char Main.booleanToChar(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
- /// CHECK-START: char Main.booleanToChar(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
/// CHECK-START: char Main.booleanToChar(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -110,22 +56,6 @@
return (char)(b ? 1 : 0);
}
- /// CHECK-START: int Main.booleanToInt(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Phi>>]
-
- /// CHECK-START: int Main.booleanToInt(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Sel>>]
-
/// CHECK-START: int Main.booleanToInt(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
diff --git a/test/667-checker-simd-alignment/expected.txt b/test/667-checker-simd-alignment/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/667-checker-simd-alignment/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/667-checker-simd-alignment/info.txt b/test/667-checker-simd-alignment/info.txt
new file mode 100644
index 0000000..a46bfaa
--- /dev/null
+++ b/test/667-checker-simd-alignment/info.txt
@@ -0,0 +1 @@
+Test SIMD vectorization alignment optimizations.
diff --git a/test/667-checker-simd-alignment/src/Main.java b/test/667-checker-simd-alignment/src/Main.java
new file mode 100644
index 0000000..a6235b8
--- /dev/null
+++ b/test/667-checker-simd-alignment/src/Main.java
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for zero vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.staticallyAligned(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyAligned(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+ static void staticallyAligned(int[] a) {
+ // Starts at offset 12 (hidden) + 1 * 4 relative to base alignment.
+ // So no peeling, aligned vector, no cleanup.
+ for (int i = 1; i < 9; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyAlignedN(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyAlignedN(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyAlignedN(int[] a) {
+ // Starts at offset 12 (hidden) + 1 * 4 relative to base alignment.
+ // So no peeling, aligned vector, cleanup.
+ for (int i = 1; i < a.length; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyMisaligned(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyMisaligned(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<PhiP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Phi>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Phi>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+ static void staticallyMisaligned(int[] a) {
+ // Starts at offset 12 (hidden) + 0 * 4 relative to base alignment.
+ // Yes, Art runtime misaligns the most common access pattern :-(
+ // Static peeling to the rescue, aligned vector, no cleanup.
+ for (int i = 0; i < 9; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyMisalignedN(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyMisalignedN(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<GetP:i\d+>> ArrayGet [<<Par>>,<<PhiP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<GetP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Phi>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Phi>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<PhiC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyMisalignedN(int[] a) {
+ // Starts at offset 12 (hidden) + 0 * 4 relative to base alignment.
+ // Yes, Art runtime misaligns the most common access pattern :-(
+ // Static peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < a.length; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyUnknownAligned(int[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Off>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Nrm>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Nrm>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyUnknownAligned(int[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmP:i\d+>> Add [<<PhiP>>,<<Off>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<Off>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<Off>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyUnknownAligned(int[] a, int off) {
+ // Starts at an unknown offset due to parameter off.
+ // Dynamic peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < 9; i++) {
+ a[off + i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyUnknownAlignedN(int[], int, int) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Off>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Nrm>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Nrm>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyUnknownAlignedN(int[], int, int) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmP:i\d+>> Add [<<PhiP>>,<<Off>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<Off>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<Off>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyUnknownAlignedN(int[] a, int off, int n) {
+ // Starts at an unknown offset due to parameter off.
+ // Dynamic peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < n; i++) {
+ a[off + i] += 1;
+ }
+ }
+
+ //
+ // Test drivers.
+ //
+
+ private static void test1() {
+ int[] a = new int[9];
+ staticallyAligned(a);
+ for (int i = 0; i < a.length; i++) {
+ int e = i > 0 ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+
+ private static void test2() {
+ for (int n = 0; n <= 71; n++) {
+ int[] a = new int[n];
+ staticallyAlignedN(a);
+ for (int i = 0; i < a.length; i++) {
+ int e = i > 0 ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+
+ private static void test3() {
+ int[] a = new int[9];
+ staticallyMisaligned(a);
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(1, a[i]);
+ }
+ }
+
+ private static void test4() {
+ for (int n = 0; n <= 71; n++) {
+ int[] a = new int[n];
+ staticallyMisalignedN(a);
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(1, a[i]);
+ }
+ }
+ }
+
+ private static void test5() {
+ for (int off = 0; off <= 8; off++) {
+ int[] a = new int[17];
+ staticallyUnknownAligned(a, off);
+ for (int i = 0; i < a.length; i++) {
+ int e = (off <= i && i < off + 9) ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+
+ private static void test6() {
+ for (int off = 0; off <= 8; off++) {
+ for (int n = 0; n <= 9; n++) {
+ int[] a = new int[17];
+ staticallyUnknownAlignedN(a, off, n);
+ for (int i = 0; i < a.length; i++) {
+ int e = (off <= i && i < off + n) ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ test1();
+ test2();
+ test4();
+ test5();
+ test6();
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/988-method-trace/gen_srcs.py b/test/988-method-trace/gen_srcs.py
index 8f1082f..225f41b 100755
--- a/test/988-method-trace/gen_srcs.py
+++ b/test/988-method-trace/gen_srcs.py
@@ -28,8 +28,8 @@
from string import Template
-# Relative path to art/compiler/intrinsics_list.h
-INTRINSICS_LIST_H = os.path.dirname(os.path.realpath(__file__)) + "/../../compiler/intrinsics_list.h"
+# Relative path to art/runtime/intrinsics_list.h
+INTRINSICS_LIST_H = os.path.dirname(os.path.realpath(__file__)) + "/../../runtime/intrinsics_list.h"
# Macro parameter index to V(). Negative means from the end.
IDX_STATIC_OR_VIRTUAL = 1
@@ -39,7 +39,8 @@
# Exclude all hidden API.
KLASS_BLACK_LIST = ['sun.misc.Unsafe', 'libcore.io.Memory', 'java.lang.StringFactory',
- 'java.lang.invoke.VarHandle' ] # TODO(b/65872996): Enable when VarHandle is visible.
+ 'java.lang.invoke.MethodHandle', # invokes are tested by 956-method-handles
+ 'java.lang.invoke.VarHandle' ] # TODO(b/65872996): will tested separately
METHOD_BLACK_LIST = [('java.lang.ref.Reference', 'getReferent'),
('java.lang.String', 'getCharsNoCheck'),
('java.lang.System', 'arraycopy')] # arraycopy has a manual test.
@@ -90,7 +91,7 @@
}
static void test() {
- // Call each intrinsic from art/compiler/intrinsics_list.h to make sure they are traced.
+ // Call each intrinsic from art/runtime/intrinsics_list.h to make sure they are traced.
$test_body
}
}
diff --git a/test/988-method-trace/src/art/Test988Intrinsics.java b/test/988-method-trace/src/art/Test988Intrinsics.java
index 099fbf2..3069f1a 100644
--- a/test/988-method-trace/src/art/Test988Intrinsics.java
+++ b/test/988-method-trace/src/art/Test988Intrinsics.java
@@ -44,7 +44,7 @@
}
static void test() {
- // Call each intrinsic from art/compiler/intrinsics_list.h to make sure they are traced.
+ // Call each intrinsic from art/runtime/intrinsics_list.h to make sure they are traced.
java.lang.Double.doubleToRawLongBits(0.0);
java.lang.Double.doubleToLongBits(0.0);
java.lang.Double.isInfinite(0.0);
diff --git a/test/993-breakpoints/breakpoints.cc b/test/993-breakpoints/breakpoints.cc
index 3734ce8..e9cf3b3 100644
--- a/test/993-breakpoints/breakpoints.cc
+++ b/test/993-breakpoints/breakpoints.cc
@@ -49,6 +49,57 @@
}
extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeObject(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticObjectMethod(clazz, method);
+ } else {
+ env->CallObjectMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeBool(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticBooleanMethod(clazz, method);
+ } else {
+ env->CallBooleanMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeLong(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticLongMethod(clazz, method);
+ } else {
+ env->CallLongMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
void JNICALL Java_art_Test993_invokeNative(JNIEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jobject target,
diff --git a/test/993-breakpoints/expected.txt b/test/993-breakpoints/expected.txt
index 96215473..1749a77e 100644
--- a/test/993-breakpoints/expected.txt
+++ b/test/993-breakpoints/expected.txt
@@ -552,6 +552,107 @@
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
Invoking "new TestClass4().callPrivateMethod()"
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
+Running Vector constructor
+ Breaking on []
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Created: []
+ Constructing: new Vector()
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running Stack constructor
+ Breaking on []
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Created: []
+ Constructing: new Stack()
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>, public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running bcp static invoke
+ Breaking on []
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Invoking "Optional::empty"
+ Breaking on [public static java.util.Optional java.util.Optional.empty() @ <NON-DETERMINISTIC>]
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::empty"
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+Running bcp private static invoke
+ Breaking on []
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Invoking "Random::seedUniquifier"
+ Breaking on [private static long java.util.Random.seedUniquifier() @ <NON-DETERMINISTIC>]
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+ Invoking "Random::seedUniquifier"
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+Running bcp private invoke
+ Breaking on []
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Invoking "Duration::toSeconds"
+ Breaking on [private java.math.BigDecimal java.time.Duration.toSeconds() @ <NON-DETERMINISTIC>]
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+ Invoking "Duration::toSeconds"
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+Running bcp invoke
+ Breaking on []
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Invoking "Optional::isPresent"
+ Breaking on [public boolean java.util.Optional.isPresent() @ <NON-DETERMINISTIC>]
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::isPresent"
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
Running TestClass1 constructor
Breaking on []
Native constructor: public art.Test993$TestClass1(), type: class art.Test993$TestClass1
diff --git a/test/993-breakpoints/src/art/Test993.java b/test/993-breakpoints/src/art/Test993.java
index 781ebff..d6a6a67 100644
--- a/test/993-breakpoints/src/art/Test993.java
+++ b/test/993-breakpoints/src/art/Test993.java
@@ -16,20 +16,20 @@
package art;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Arrays;
import java.lang.reflect.Executable;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
-import java.util.List;
-import java.util.Set;
-import java.util.Spliterator;
-import java.util.Spliterators;
-import java.util.Collection;
+
+import java.time.Duration;
+
import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.function.IntUnaryOperator;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
import java.util.function.Supplier;
public class Test993 {
@@ -120,7 +120,13 @@
}
public static void notifyBreakpointReached(Thread thr, Executable e, long loc) {
- System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + Breakpoint.locationToLine(e, loc));
+ String line;
+ if (e.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ line = Integer.valueOf(Breakpoint.locationToLine(e, loc)).toString();
+ } else {
+ line = "<NON-DETERMINISTIC>";
+ }
+ System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + line);
}
public static interface ThrowRunnable extends Runnable {
@@ -180,6 +186,57 @@
public static native void invokeNative(Method m, Class<?> clazz, Object thizz);
+ public static class InvokeNativeBool implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeBool(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeBool(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeBool(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeObject implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeObject(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeObject(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeObject(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeLong implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeLong(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeLong(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeLong(Method m, Class<?> clazz, Object thizz);
+
public static class ConstructDirect implements Runnable {
String msg;
Supplier<Object> s;
@@ -258,7 +315,15 @@
}
private static Breakpoint.Manager.BP BP(Executable m) {
- return new Breakpoint.Manager.BP(m);
+ return new Breakpoint.Manager.BP(m) {
+ public String toString() {
+ if (method.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ return super.toString();
+ } else {
+ return method.toString() + " @ <NON-DETERMINISTIC>";
+ }
+ }
+ };
}
public static void run() throws Exception {
@@ -271,6 +336,7 @@
Thread.currentThread());
runMethodTests();
+ runBCPMethodTests();
runConstructorTests();
Breakpoint.stopBreakpointWatch(Thread.currentThread());
@@ -302,6 +368,94 @@
runTestGroups("TestClass1ext constructor", tc1ext_constructors, tc1ext_bps);
}
+ // These test to make sure we are able to break on functions that might have been quickened or
+ // inlined from the boot-image. These were all chosen for being in the bootclasspath, not being
+ // long enough to prevent inlining, and not being used for the testing framework.
+ public static void runBCPMethodTests() throws Exception {
+ // The methods we will be breaking on.
+ Method bcp_private_method = Duration.class.getDeclaredMethod("toSeconds");
+ Method bcp_virtual_method = Optional.class.getDeclaredMethod("isPresent");
+ Method bcp_static_method = Optional.class.getDeclaredMethod("empty");
+ Method bcp_private_static_method = Random.class.getDeclaredMethod("seedUniquifier");
+
+ // Some constructors we will break on.
+ Constructor<?> bcp_stack_constructor = Stack.class.getConstructor();
+ Constructor<?> bcp_vector_constructor = Vector.class.getConstructor();
+ if (!(Vector.class.isAssignableFrom(Stack.class))) {
+ throw new Error("Expected Stack to extend Vector!");
+ }
+
+ // BCP constructors.
+ Runnable[] vector_constructors = new Runnable[] {
+ new ConstructNative(bcp_vector_constructor),
+ new ConstructReflect(bcp_vector_constructor),
+ new ConstructDirect("new Vector()", Vector::new),
+ };
+ Breakpoint.Manager.BP[] vector_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_vector_constructor),
+ };
+ runTestGroups("Vector constructor", vector_constructors, vector_breakpoints);
+
+ Runnable[] stack_constructors = new Runnable[] {
+ new ConstructNative(bcp_stack_constructor),
+ new ConstructReflect(bcp_stack_constructor),
+ new ConstructDirect("new Stack()", Stack::new),
+ };
+ Breakpoint.Manager.BP[] stack_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_stack_constructor), BP(bcp_vector_constructor),
+ };
+ runTestGroups("Stack constructor", stack_constructors, stack_breakpoints);
+
+ // Static function
+ Runnable[] static_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_static_method, null),
+
+ new InvokeReflect(bcp_static_method, null),
+
+ new InvokeDirect("Optional::empty", () -> { Optional.empty(); }),
+ };
+ Breakpoint.Manager.BP[] static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_static_method)
+ };
+ runTestGroups("bcp static invoke", static_invokes, static_breakpoints);
+
+ // Static private class function
+ Runnable[] private_static_invokes = new Runnable[] {
+ new InvokeNativeLong(bcp_private_static_method, null),
+
+ new InvokeDirect("Random::seedUniquifier", () -> { new Random(); }),
+ };
+ Breakpoint.Manager.BP[] private_static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_static_method)
+ };
+ runTestGroups("bcp private static invoke", private_static_invokes, private_static_breakpoints);
+
+ // private class method
+ Duration test_duration = Duration.ofDays(14);
+ Runnable[] private_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_private_method, test_duration),
+
+ new InvokeDirect("Duration::toSeconds", () -> { test_duration.multipliedBy(2); }),
+ };
+ Breakpoint.Manager.BP[] private_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_method)
+ };
+ runTestGroups("bcp private invoke", private_invokes, private_breakpoints);
+
+ // class method
+ Runnable[] public_invokes = new Runnable[] {
+ new InvokeNativeBool(bcp_virtual_method, Optional.of("test")),
+
+ new InvokeReflect(bcp_virtual_method, Optional.of("test2")),
+
+ new InvokeDirect("Optional::isPresent", () -> { Optional.of("test3").isPresent(); }),
+ };
+ Breakpoint.Manager.BP[] public_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_virtual_method)
+ };
+ runTestGroups("bcp invoke", public_invokes, public_breakpoints);
+ }
+
public static void runMethodTests() throws Exception {
// The methods we will be breaking on.
Method breakpoint_method = Test993.class.getDeclaredMethod("breakpoint");
diff --git a/test/Android.bp b/test/Android.bp
index 16b30f9..17ef114 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -359,6 +359,7 @@
"141-class-unload/jni_unload.cc",
"148-multithread-gc-annotations/gc_coverage.cc",
"149-suspend-all-stress/suspend_all.cc",
+ "203-multi-checkpoint/multi_checkpoint.cc",
"154-gc-loop/heap_interface.cc",
"454-get-vreg/get_vreg_jni.cc",
"457-regs/regs_jni.cc",
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 3e162dd..a9a0492 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -30,6 +30,10 @@
# Let users with Java 7 run ahat (b/28303627)
LOCAL_JAVA_LANGUAGE_VERSION := 1.7
+# Make this available on the classpath of the general-tests tradefed suite.
+# It is used by libcore tests that run there.
+LOCAL_COMPATIBILITY_SUITE := general-tests
+
include $(BUILD_HOST_JAVA_LIBRARY)
AHAT_JAR := $(LOCAL_BUILT_MODULE)
AHAT_API := $(intermediates.COMMON)/ahat_api.txt