Merge "ART: Add class instance fields in hprof dumps"
diff --git a/compiler/Android.bp b/compiler/Android.bp
index df896dc..307a42c 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -67,6 +67,7 @@
"optimizing/intrinsics.cc",
"optimizing/licm.cc",
"optimizing/linear_order.cc",
+ "optimizing/load_store_analysis.cc",
"optimizing/load_store_elimination.cc",
"optimizing/locations.cc",
"optimizing/loop_optimization.cc",
@@ -374,6 +375,7 @@
"jni/jni_cfi_test.cc",
"optimizing/codegen_test.cc",
+ "optimizing/load_store_analysis_test.cc",
"optimizing/optimizing_cfi_test.cc",
"optimizing/scheduler_test.cc",
],
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 912c964..0ca23a5 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -119,7 +119,7 @@
// choose to squeeze the Type into fewer than 8 bits, we'll have to declare
// patch_type_ as an uintN_t and do explicit static_cast<>s.
enum class Type : uint8_t {
- kMethod,
+ kMethodRelative, // NOTE: Actual patching is instruction_set-dependent.
kCall,
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
@@ -130,11 +130,13 @@
kBakerReadBarrierBranch, // NOTE: Actual patching is instruction_set-dependent.
};
- static LinkerPatch MethodPatch(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t target_method_idx) {
- LinkerPatch patch(literal_offset, Type::kMethod, target_dex_file);
+ static LinkerPatch RelativeMethodPatch(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t target_method_idx) {
+ LinkerPatch patch(literal_offset, Type::kMethodRelative, target_dex_file);
patch.method_idx_ = target_method_idx;
+ patch.pc_insn_offset_ = pc_insn_offset;
return patch;
}
@@ -226,6 +228,7 @@
bool IsPcRelative() const {
switch (GetType()) {
+ case Type::kMethodRelative:
case Type::kCallRelative:
case Type::kTypeRelative:
case Type::kTypeBssEntry:
@@ -240,7 +243,7 @@
}
MethodReference TargetMethod() const {
- DCHECK(patch_type_ == Type::kMethod ||
+ DCHECK(patch_type_ == Type::kMethodRelative ||
patch_type_ == Type::kCall ||
patch_type_ == Type::kCallRelative);
return MethodReference(target_dex_file_, method_idx_);
@@ -281,7 +284,8 @@
}
uint32_t PcInsnOffset() const {
- DCHECK(patch_type_ == Type::kTypeRelative ||
+ DCHECK(patch_type_ == Type::kMethodRelative ||
+ patch_type_ == Type::kTypeRelative ||
patch_type_ == Type::kTypeBssEntry ||
patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry ||
diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc
index 81a2cde..72b2282 100644
--- a/compiler/compiled_method_test.cc
+++ b/compiler/compiled_method_test.cc
@@ -50,10 +50,14 @@
const DexFile* dex_file1 = reinterpret_cast<const DexFile*>(1);
const DexFile* dex_file2 = reinterpret_cast<const DexFile*>(2);
LinkerPatch patches[] = {
- LinkerPatch::MethodPatch(16u, dex_file1, 1000u),
- LinkerPatch::MethodPatch(16u, dex_file1, 1001u),
- LinkerPatch::MethodPatch(16u, dex_file2, 1000u),
- LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // Index 3.
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Index 3.
+ LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1001u),
LinkerPatch::CodePatch(16u, dex_file1, 1000u),
LinkerPatch::CodePatch(16u, dex_file1, 1001u),
LinkerPatch::CodePatch(16u, dex_file2, 1000u),
@@ -107,10 +111,14 @@
LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 1u),
- LinkerPatch::MethodPatch(32u, dex_file1, 1000u),
- LinkerPatch::MethodPatch(32u, dex_file1, 1001u),
- LinkerPatch::MethodPatch(32u, dex_file2, 1000u),
- LinkerPatch::MethodPatch(32u, dex_file2, 1001u), // Index 3.
+ LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1001u),
LinkerPatch::CodePatch(32u, dex_file1, 1000u),
LinkerPatch::CodePatch(32u, dex_file1, 1001u),
LinkerPatch::CodePatch(32u, dex_file2, 1000u),
@@ -164,7 +172,7 @@
LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 1u),
- LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // identical with patch at index 3.
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Same as patch at index 3.
};
constexpr size_t last_index = arraysize(patches) - 1u;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 6572d17..bbd28b2 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -71,11 +71,11 @@
};
const LinkerPatch raw_patches1[] = {
LinkerPatch::CodePatch(0u, nullptr, 1u),
- LinkerPatch::MethodPatch(4u, nullptr, 1u),
+ LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 1u),
};
const LinkerPatch raw_patches2[] = {
LinkerPatch::CodePatch(0u, nullptr, 1u),
- LinkerPatch::MethodPatch(4u, nullptr, 2u),
+ LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 2u),
};
ArrayRef<const LinkerPatch> patches[] = {
ArrayRef<const LinkerPatch>(raw_patches1),
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index c033c2d..6d2b243 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -54,7 +54,7 @@
inline bool IsAdrpPatch(const LinkerPatch& patch) {
switch (patch.GetType()) {
- case LinkerPatch::Type::kMethod:
+ case LinkerPatch::Type::kMethodRelative:
case LinkerPatch::Type::kCall:
case LinkerPatch::Type::kCallRelative:
case LinkerPatch::Type::kBakerReadBarrierBranch:
@@ -250,11 +250,13 @@
if ((insn & 0xfffffc00) == 0x91000000) {
// ADD immediate, 64-bit with imm12 == 0 (unset).
if (!kEmitCompilerReadBarrier) {
- DCHECK(patch.GetType() == LinkerPatch::Type::kStringRelative ||
+ DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
+ patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative) << patch.GetType();
} else {
// With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
- DCHECK(patch.GetType() == LinkerPatch::Type::kStringRelative ||
+ DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
+ patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
patch.GetType() == LinkerPatch::Type::kStringBssEntry ||
patch.GetType() == LinkerPatch::Type::kTypeBssEntry) << patch.GetType();
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5091c0b..bc411df 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1331,9 +1331,12 @@
PatchCodeAddress(&patched_code_, literal_offset, target_offset);
break;
}
- case LinkerPatch::Type::kMethod: {
- ArtMethod* method = GetTargetMethod(patch);
- PatchMethodAddress(&patched_code_, literal_offset, method);
+ case LinkerPatch::Type::kMethodRelative: {
+ uint32_t target_offset = GetTargetMethodOffset(GetTargetMethod(patch));
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
break;
}
case LinkerPatch::Type::kBakerReadBarrierBranch: {
@@ -1457,6 +1460,15 @@
}
}
+ uint32_t GetTargetMethodOffset(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(writer_->HasBootImage());
+ method = writer_->image_writer_->GetImageMethodAddress(method);
+ size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
+ uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
+ // TODO: Clean up offset types. The target offset must be treated as signed.
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method) - oat_data_begin);
+ }
+
uint32_t GetTargetObjectOffset(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(writer_->HasBootImage());
object = writer_->image_writer_->GetImageAddress(object);
@@ -1486,34 +1498,6 @@
data[3] = (address >> 24) & 0xffu;
}
- void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (writer_->HasBootImage()) {
- method = writer_->image_writer_->GetImageMethodAddress(method);
- } else if (kIsDebugBuild) {
- // NOTE: We're using linker patches for app->boot references when the image can
- // be relocated and therefore we need to emit .oat_patches. We're not using this
- // for app->app references, so check that the method is an image method.
- std::vector<gc::space::ImageSpace*> image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- bool contains_method = false;
- for (gc::space::ImageSpace* image_space : image_spaces) {
- size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
- contains_method |=
- image_space->GetImageHeader().GetMethodsSection().Contains(method_offset);
- }
- CHECK(contains_method);
- }
- // Note: We only patch targeting ArtMethods in image which is in the low 4gb.
- uint32_t address = PointerToLowMemUInt32(method);
- DCHECK_LE(offset + 4, code->size());
- uint8_t* data = &(*code)[offset];
- data[0] = address & 0xffu;
- data[1] = (address >> 8) & 0xffu;
- data[2] = (address >> 16) & 0xffu;
- data[3] = (address >> 24) & 0xffu;
- }
-
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t address = target_offset;
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index cb6e14b..a949c33 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+ InstructionSimplifier(graph_, /* codegen */ nullptr, /* driver */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 87ce3f6..c66bd77 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2397,9 +2397,10 @@
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -8967,6 +8968,18 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ Register temp_reg = temp.AsRegister<Register>();
+ PcRelativePatchInfo* labels = NewPcRelativeMethodPatch(invoke->GetTargetMethod());
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(temp_reg, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(temp_reg, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(temp_reg, temp_reg, ShifterOperand(PC));
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
@@ -9059,9 +9072,11 @@
__ blx(LR);
}
-CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatch(
- const DexFile& dex_file, dex::StringIndex string_index) {
- return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeMethodPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &pc_relative_method_patches_);
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
@@ -9074,6 +9089,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
}
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatch(
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+}
+
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -9138,22 +9158,26 @@
DCHECK(linker_patches->empty());
size_t size =
/* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
- /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_method_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(pc_relative_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
- } else {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
+ } else {
+ DCHECK(pc_relative_method_patches_.empty());
+ DCHECK(pc_relative_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 4763cbd..fa1c14d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -481,10 +481,11 @@
Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
- dex::StringIndex string_index);
+ PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
@@ -669,12 +670,14 @@
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HArmDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
- ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Baker read barrier patch info.
ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7ff100d..096eb07 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1449,9 +1449,10 @@
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -4510,6 +4511,17 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ // Add ADRP with its PC-relative method patch.
+ vixl::aarch64::Label* adrp_label = NewPcRelativeMethodPatch(invoke->GetTargetMethod());
+ EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
+ // Add ADD with its PC-relative method patch.
+ vixl::aarch64::Label* add_label =
+ NewPcRelativeMethodPatch(invoke->GetTargetMethod(), adrp_label);
+ EmitAddPlaceholder(add_label, XRegisterFrom(temp), XRegisterFrom(temp));
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
// Load method address from literal pool.
__ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
@@ -4633,12 +4645,13 @@
codegen_->GenerateInvokePolymorphicCall(invoke);
}
-vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
- const DexFile& dex_file,
- dex::StringIndex string_index,
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeMethodPatch(
+ MethodReference target_method,
vixl::aarch64::Label* adrp_label) {
- return
- NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ adrp_label,
+ &pc_relative_method_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
@@ -4655,6 +4668,14 @@
return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &type_bss_entry_patches_);
}
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
+ const DexFile& dex_file,
+ dex::StringIndex string_index,
+ vixl::aarch64::Label* adrp_label) {
+ return
+ NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file,
uint32_t element_offset,
@@ -4747,9 +4768,10 @@
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- pc_relative_string_patches_.size() +
+ pc_relative_method_patches_.size() +
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
+ pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
@@ -4758,15 +4780,18 @@
info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(pc_relative_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
- } else {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
+ } else {
+ DCHECK(pc_relative_method_patches_.empty());
+ DCHECK(pc_relative_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 56444dc..71e221d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -549,12 +549,11 @@
UNIMPLEMENTED(FATAL);
}
- // Add a new PC-relative string patch for an instruction and return the label
+ // Add a new PC-relative method patch for an instruction and return the label
// to be bound before the instruction. The instruction will be either the
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
- vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
- dex::StringIndex string_index,
+ vixl::aarch64::Label* NewPcRelativeMethodPatch(MethodReference target_method,
vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative type patch for an instruction and return the label
@@ -573,6 +572,14 @@
dex::TypeIndex type_index,
vixl::aarch64::Label* adrp_label = nullptr);
+ // Add a new PC-relative string patch for an instruction and return the label
+ // to be bound before the instruction. The instruction will be either the
+ // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
+ // to the associated ADRP patch label).
+ vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new PC-relative dex cache array patch for an instruction and return
// the label to be bound before the instruction. The instruction will be
// either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
@@ -787,12 +794,14 @@
Uint64ToLiteralMap uint64_literals_;
// PC-relative DexCache access info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
- ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Baker read barrier patch info.
ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c4844c1..34821f8 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2500,9 +2500,10 @@
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -9130,6 +9131,13 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ PcRelativePatchInfo* labels = NewPcRelativeMethodPatch(invoke->GetTargetMethod());
+ vixl32::Register temp_reg = RegisterFrom(temp);
+ EmitMovwMovtPlaceholder(labels, temp_reg);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
break;
@@ -9246,9 +9254,11 @@
__ blx(lr);
}
-CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
- const DexFile& dex_file, dex::StringIndex string_index) {
- return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeMethodPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &pc_relative_method_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
@@ -9261,6 +9271,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -9330,22 +9345,26 @@
DCHECK(linker_patches->empty());
size_t size =
/* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
- /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_method_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(pc_relative_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
- } else {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
+ } else {
+ DCHECK(pc_relative_method_patches_.empty());
+ DCHECK(pc_relative_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 1cf9923..91f7524 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -563,10 +563,11 @@
vixl::aarch32::Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
- dex::StringIndex string_index);
+ PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
@@ -765,12 +766,14 @@
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HArmDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
- ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Baker read barrier patch info.
ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 95be3d7..d8ac99a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1061,9 +1061,10 @@
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -1602,30 +1603,36 @@
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- pc_relative_string_patches_.size() +
+ pc_relative_method_patches_.size() +
pc_relative_type_patches_.size() +
- type_bss_entry_patches_.size();
+ type_bss_entry_patches_.size() +
+ pc_relative_string_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(pc_relative_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
- } else {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
+ } else {
+ DCHECK(pc_relative_method_patches_.empty());
+ DCHECK(pc_relative_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
}
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
- const DexFile& dex_file, dex::StringIndex string_index) {
- return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeMethodPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &pc_relative_method_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
@@ -1638,6 +1645,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -6947,7 +6959,7 @@
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
- bool has_extra_input = invoke->HasPcRelativeDexCache() && !is_r6;
+ bool has_extra_input = invoke->HasPcRelativeMethodLoadKind() && !is_r6;
IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -7084,6 +7096,7 @@
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
switch (dispatch_info.method_load_kind) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
break;
default:
@@ -7103,7 +7116,7 @@
HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
bool is_r6 = GetInstructionSetFeatures().IsR6();
- Register base_reg = (invoke->HasPcRelativeDexCache() && !is_r6)
+ Register base_reg = (invoke->HasPcRelativeMethodLoadKind() && !is_r6)
? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
: ZERO;
@@ -7121,6 +7134,16 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ PcRelativePatchInfo* info = NewPcRelativeMethodPatch(invoke->GetTargetMethod());
+ bool reordering = __ SetReorder(false);
+ Register temp_reg = temp.AsRegister<Register>();
+ EmitPcRelativeAddressPlaceholderHigh(info, temp_reg, base_reg);
+ __ Addiu(temp_reg, temp_reg, /* placeholder */ 0x5678);
+ __ SetReorder(reordering);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 449cb4c..ff1fde6 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -582,10 +582,11 @@
MipsLabel pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
- dex::StringIndex string_index);
+ PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
@@ -644,12 +645,15 @@
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HMipsDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
- ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+
// Patches for string root accesses in JIT compiled code.
ArenaDeque<JitPatchInfo> jit_string_patches_;
// Patches for class root accesses in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5cdff5a..0961391 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -958,9 +958,10 @@
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -1440,30 +1441,36 @@
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- pc_relative_string_patches_.size() +
+ pc_relative_method_patches_.size() +
pc_relative_type_patches_.size() +
- type_bss_entry_patches_.size();
+ type_bss_entry_patches_.size() +
+ pc_relative_string_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(pc_relative_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
linker_patches);
- } else {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
+ } else {
+ DCHECK(pc_relative_method_patches_.empty());
+ DCHECK(pc_relative_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
DCHECK_EQ(size, linker_patches->size());
}
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
- const DexFile& dex_file, dex::StringIndex string_index) {
- return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeMethodPatch(
+ MethodReference target_method) {
+ return NewPcRelativePatch(*target_method.dex_file,
+ target_method.dex_method_index,
+ &pc_relative_method_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
@@ -1476,6 +1483,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -4923,6 +4935,14 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ NewPcRelativeMethodPatch(invoke->GetTargetMethod());
+ EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ LoadLiteral(temp.AsRegister<GpuRegister>(),
kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 1f34ced..f49ad49 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -545,10 +545,11 @@
Mips64Label pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
- dex::StringIndex string_index);
+ PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
@@ -605,12 +606,15 @@
Uint64ToLiteralMap uint64_literals_;
// PC-relative patch info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
- ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+
// Patches for string root accesses in JIT compiled code.
StringToLiteralMap jit_string_patches_;
// Patches for class root accesses in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4a279d8..f3ec112 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1032,9 +1032,10 @@
assembler_(graph->GetArena()),
isa_features_(isa_features),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
@@ -2167,7 +2168,7 @@
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
+ if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeMethodLoadKind()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
}
return;
@@ -2176,7 +2177,7 @@
HandleInvoke(invoke);
// For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
- if (invoke->HasPcRelativeDexCache()) {
+ if (invoke->HasPcRelativeMethodLoadKind()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
}
}
@@ -4543,6 +4544,14 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
+ temp.AsRegister<Register>());
+ __ leal(temp.AsRegister<Register>(), Address(base_reg, CodeGeneratorX86::kDummy32BitOffset));
+ RecordBootMethodPatch(invoke);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
__ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
break;
@@ -4631,13 +4640,14 @@
temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
}
-void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
- DCHECK(GetCompilerOptions().IsBootImage());
- HX86ComputeBaseMethodAddress* address = load_string->InputAt(0)->AsX86ComputeBaseMethodAddress();
- string_patches_.emplace_back(address,
- load_string->GetDexFile(),
- load_string->GetStringIndex().index_);
- __ Bind(&string_patches_.back().label);
+void CodeGeneratorX86::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ boot_image_method_patches_.emplace_back(address,
+ *invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
+ __ Bind(&boot_image_method_patches_.back().label);
}
void CodeGeneratorX86::RecordBootTypePatch(HLoadClass* load_class) {
@@ -4656,6 +4666,15 @@
return &type_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ HX86ComputeBaseMethodAddress* address = load_string->InputAt(0)->AsX86ComputeBaseMethodAddress();
+ string_patches_.emplace_back(address,
+ load_string->GetDexFile(),
+ load_string->GetStringIndex().index_);
+ __ Bind(&string_patches_.back().label);
+}
+
Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
DCHECK(!GetCompilerOptions().IsBootImage());
HX86ComputeBaseMethodAddress* address =
@@ -4693,17 +4712,21 @@
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- string_patches_.size() +
+ boot_image_method_patches_.size() +
boot_image_type_patches_.size() +
- type_bss_entry_patches_.size();
+ type_bss_entry_patches_.size() +
+ string_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(boot_image_method_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
+ DCHECK(boot_image_method_patches_.empty());
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f08d642..21c527e 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -413,9 +413,10 @@
// Generate a call to a virtual method.
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
- void RecordBootStringPatch(HLoadString* load_string);
+ void RecordBootMethodPatch(HInvokeStaticOrDirect* invoke);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
+ void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(HX86ComputeBaseMethodAddress* method_address,
const DexFile& dex_file,
@@ -633,16 +634,17 @@
// PC-relative DexCache access info.
ArenaDeque<X86PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // String patch locations; type depends on configuration (app .bss or boot image).
- ArenaDeque<X86PcRelativePatchInfo> string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_;
// Type patch locations for kBssEntry.
ArenaDeque<X86PcRelativePatchInfo> type_bss_entry_patches_;
+ // String patch locations; type depends on configuration (app .bss or boot image).
+ ArenaDeque<X86PcRelativePatchInfo> string_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
-
// Patches for class root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_class_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ac0f37b..bf1c42a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -991,6 +991,12 @@
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(GetCompilerOptions().IsBootImage());
+ __ leal(temp.AsRegister<CpuRegister>(),
+ Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ RecordBootMethodPatch(invoke);
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
break;
@@ -1079,10 +1085,10 @@
kX86_64PointerSize).SizeValue()));
}
-void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
- DCHECK(GetCompilerOptions().IsBootImage());
- string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
- __ Bind(&string_patches_.back().label);
+void CodeGeneratorX86_64::RecordBootMethodPatch(HInvokeStaticOrDirect* invoke) {
+ boot_image_method_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
+ __ Bind(&boot_image_method_patches_.back().label);
}
void CodeGeneratorX86_64::RecordBootTypePatch(HLoadClass* load_class) {
@@ -1096,6 +1102,12 @@
return &type_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
+ DCHECK(GetCompilerOptions().IsBootImage());
+ string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
+ __ Bind(&string_patches_.back().label);
+}
+
Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
DCHECK(!GetCompilerOptions().IsBootImage());
string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
@@ -1128,17 +1140,21 @@
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- string_patches_.size() +
+ boot_image_method_patches_.size() +
boot_image_type_patches_.size() +
- type_bss_entry_patches_.size();
+ type_bss_entry_patches_.size() +
+ string_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(boot_image_method_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
+ DCHECK(boot_image_method_patches_.empty());
DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
@@ -1231,12 +1247,13 @@
isa_features_(isa_features),
constant_area_start_(0),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d8005cc..3039e05 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -408,9 +408,10 @@
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
- void RecordBootStringPatch(HLoadString* load_string);
+ void RecordBootMethodPatch(HInvokeStaticOrDirect* invoke);
void RecordBootTypePatch(HLoadClass* load_class);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
+ void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
@@ -603,22 +604,23 @@
// PC-relative DexCache access info.
ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
- // String patch locations; type depends on configuration (app .bss or boot image).
- ArenaDeque<PatchInfo<Label>> string_patches_;
+ // PC-relative method patch info for kBootImageLinkTimePcRelative.
+ ArenaDeque<PatchInfo<Label>> boot_image_method_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
// Type patch locations for kBssEntry.
ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
-
- // Fixups for jump tables need to be handled specially.
- ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+ // String patch locations; type depends on configuration (app .bss or boot image).
+ ArenaDeque<PatchInfo<Label>> string_patches_;
// Patches for string literals in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
-
// Patches for class literals in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_class_patches_;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8674e72..0ec6ee2 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1856,7 +1856,7 @@
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_);
- InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
+ InstructionSimplifier simplify(callee_graph, codegen_, compiler_driver_, inline_stats_);
IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2cedde9..d147166 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -30,9 +30,11 @@
public:
InstructionSimplifierVisitor(HGraph* graph,
CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
OptimizingCompilerStats* stats)
: HGraphDelegateVisitor(graph),
codegen_(codegen),
+ compiler_driver_(compiler_driver),
stats_(stats) {}
void Run();
@@ -119,6 +121,7 @@
void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
CodeGenerator* codegen_;
+ CompilerDriver* compiler_driver_;
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
@@ -130,7 +133,7 @@
};
void InstructionSimplifier::Run() {
- InstructionSimplifierVisitor visitor(graph_, codegen_, stats_);
+ InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_);
visitor.Run();
}
@@ -1896,7 +1899,7 @@
// the invoke, as we would need to look it up in the current dex file, and it
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
- HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+ HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_, compiler_driver_);
}
}
}
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index f7329a4..5e20455 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -24,6 +24,7 @@
namespace art {
class CodeGenerator;
+class CompilerDriver;
/**
* Implements optimizations specific to each instruction.
@@ -37,12 +38,14 @@
*/
class InstructionSimplifier : public HOptimization {
public:
- explicit InstructionSimplifier(HGraph* graph,
- CodeGenerator* codegen,
- OptimizingCompilerStats* stats = nullptr,
- const char* name = kInstructionSimplifierPassName)
+ InstructionSimplifier(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver,
+ OptimizingCompilerStats* stats = nullptr,
+ const char* name = kInstructionSimplifierPassName)
: HOptimization(graph, name, stats),
- codegen_(codegen) {}
+ codegen_(codegen),
+ compiler_driver_(compiler_driver) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
@@ -50,6 +53,7 @@
private:
CodeGenerator* codegen_;
+ CompilerDriver* compiler_driver_;
DISALLOW_COPY_AND_ASSIGN(InstructionSimplifier);
};
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
new file mode 100644
index 0000000..f2ee345
--- /dev/null
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_store_analysis.h"
+
+namespace art {
+
+// A cap for the number of heap locations to prevent pathological time/space consumption.
+// The number of heap locations for most of the methods stays below this threshold.
+constexpr size_t kMaxNumberOfHeapLocations = 32;
+
+void LoadStoreAnalysis::Run() {
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ heap_location_collector_.VisitBasicBlock(block);
+ }
+
+ if (heap_location_collector_.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
+ // Bail out if there are too many heap locations to deal with.
+ heap_location_collector_.CleanUp();
+ return;
+ }
+ if (!heap_location_collector_.HasHeapStores()) {
+ // Without heap stores, this pass would act mostly as GVN on heap accesses.
+ heap_location_collector_.CleanUp();
+ return;
+ }
+ if (heap_location_collector_.HasVolatile() || heap_location_collector_.HasMonitorOps()) {
+ // Don't do load/store elimination if the method has volatile field accesses or
+ // monitor operations, for now.
+ // TODO: do it right.
+ heap_location_collector_.CleanUp();
+ return;
+ }
+
+ heap_location_collector_.BuildAliasingMatrix();
+}
+
+} // namespace art
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
new file mode 100644
index 0000000..4e940f3
--- /dev/null
+++ b/compiler/optimizing/load_store_analysis.h
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LOAD_STORE_ANALYSIS_H_
+#define ART_COMPILER_OPTIMIZING_LOAD_STORE_ANALYSIS_H_
+
+#include "escape.h"
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+// A ReferenceInfo contains additional info about a reference such as
+// whether it's a singleton, returned, etc.
+class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+ public:
+ ReferenceInfo(HInstruction* reference, size_t pos)
+ : reference_(reference),
+ position_(pos),
+ is_singleton_(true),
+ is_singleton_and_not_returned_(true),
+ is_singleton_and_not_deopt_visible_(true),
+ has_index_aliasing_(false) {
+ CalculateEscape(reference_,
+ nullptr,
+ &is_singleton_,
+ &is_singleton_and_not_returned_,
+ &is_singleton_and_not_deopt_visible_);
+ }
+
+ HInstruction* GetReference() const {
+ return reference_;
+ }
+
+ size_t GetPosition() const {
+ return position_;
+ }
+
+ // Returns true if reference_ is the only name that can refer to its value during
+ // the lifetime of the method. So it's guaranteed to not have any alias in
+ // the method (including its callees).
+ bool IsSingleton() const {
+ return is_singleton_;
+ }
+
+ // Returns true if reference_ is a singleton and not returned to the caller or
+ // used as an environment local of an HDeoptimize instruction.
+ // The allocation and stores into reference_ may be eliminated for such cases.
+ bool IsSingletonAndRemovable() const {
+ return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_;
+ }
+
+ // Returns true if reference_ is a singleton and returned to the caller or
+ // used as an environment local of an HDeoptimize instruction.
+ bool IsSingletonAndNonRemovable() const {
+ return is_singleton_ &&
+ (!is_singleton_and_not_returned_ || !is_singleton_and_not_deopt_visible_);
+ }
+
+ bool HasIndexAliasing() {
+ return has_index_aliasing_;
+ }
+
+ void SetHasIndexAliasing(bool has_index_aliasing) {
+ // Only allow setting to true.
+ DCHECK(has_index_aliasing);
+ has_index_aliasing_ = has_index_aliasing;
+ }
+
+ private:
+ HInstruction* const reference_;
+ const size_t position_; // position in HeapLocationCollector's ref_info_array_.
+
+ // Can only be referred to by a single name in the method.
+ bool is_singleton_;
+ // Is singleton and not returned to caller.
+ bool is_singleton_and_not_returned_;
+ // Is singleton and not used as an environment local of HDeoptimize.
+ bool is_singleton_and_not_deopt_visible_;
+ // Some heap locations with reference_ have array index aliasing,
+ // e.g. arr[i] and arr[j] may be the same location.
+ bool has_index_aliasing_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
+};
+
+// A heap location is a reference-offset/index pair that a value can be loaded from
+// or stored to.
+class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+ public:
+ static constexpr size_t kInvalidFieldOffset = -1;
+
+ // TODO: more fine-grained array types.
+ static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
+
+ HeapLocation(ReferenceInfo* ref_info,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index)
+ : ref_info_(ref_info),
+ offset_(offset),
+ index_(index),
+ declaring_class_def_index_(declaring_class_def_index),
+ value_killed_by_loop_side_effects_(true) {
+ DCHECK(ref_info != nullptr);
+ DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
+ (offset != kInvalidFieldOffset && index == nullptr));
+ if (ref_info->IsSingleton() && !IsArrayElement()) {
+ // Assume this location's value cannot be killed by loop side effects
+ // until proven otherwise.
+ value_killed_by_loop_side_effects_ = false;
+ }
+ }
+
+ ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
+ size_t GetOffset() const { return offset_; }
+ HInstruction* GetIndex() const { return index_; }
+
+ // Returns the definition of declaring class' dex index.
+ // It's kDeclaringClassDefIndexForArrays for an array element.
+ int16_t GetDeclaringClassDefIndex() const {
+ return declaring_class_def_index_;
+ }
+
+ bool IsArrayElement() const {
+ return index_ != nullptr;
+ }
+
+ bool IsValueKilledByLoopSideEffects() const {
+ return value_killed_by_loop_side_effects_;
+ }
+
+ void SetValueKilledByLoopSideEffects(bool val) {
+ value_killed_by_loop_side_effects_ = val;
+ }
+
+ private:
+ ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
+ const size_t offset_; // offset of static/instance field.
+ HInstruction* const index_; // index of an array element.
+ const int16_t declaring_class_def_index_; // declaring class's def's dex index.
+ bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
+ // side effects because this location is stored
+ // into inside a loop. This gives
+ // better info on whether a singleton's location
+ // value may be killed by loop side effects.
+
+ DISALLOW_COPY_AND_ASSIGN(HeapLocation);
+};
+
+// A HeapLocationCollector collects all relevant heap locations and keeps
+// an aliasing matrix for all locations.
+class HeapLocationCollector : public HGraphVisitor {
+ public:
+ static constexpr size_t kHeapLocationNotFound = -1;
+ // Start with a single uint32_t word. That's enough bits for pair-wise
+ // aliasing matrix of 8 heap locations.
+ static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
+
+ explicit HeapLocationCollector(HGraph* graph)
+ : HGraphVisitor(graph),
+ ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+ aliasing_matrix_(graph->GetArena(),
+ kInitialAliasingMatrixBitVectorSize,
+ true,
+ kArenaAllocLSE),
+ has_heap_stores_(false),
+ has_volatile_(false),
+ has_monitor_operations_(false) {}
+
+ void CleanUp() {
+ heap_locations_.clear();
+ ref_info_array_.clear();
+ }
+
+ size_t GetNumberOfHeapLocations() const {
+ return heap_locations_.size();
+ }
+
+ HeapLocation* GetHeapLocation(size_t index) const {
+ return heap_locations_[index];
+ }
+
+ HInstruction* HuntForOriginalReference(HInstruction* ref) const {
+ DCHECK(ref != nullptr);
+ while (ref->IsNullCheck() || ref->IsBoundType()) {
+ ref = ref->InputAt(0);
+ }
+ return ref;
+ }
+
+ ReferenceInfo* FindReferenceInfoOf(HInstruction* ref) const {
+ for (size_t i = 0; i < ref_info_array_.size(); i++) {
+ ReferenceInfo* ref_info = ref_info_array_[i];
+ if (ref_info->GetReference() == ref) {
+ DCHECK_EQ(i, ref_info->GetPosition());
+ return ref_info;
+ }
+ }
+ return nullptr;
+ }
+
+ bool HasHeapStores() const {
+ return has_heap_stores_;
+ }
+
+ bool HasVolatile() const {
+ return has_volatile_;
+ }
+
+ bool HasMonitorOps() const {
+ return has_monitor_operations_;
+ }
+
+ // Find and return the heap location index in heap_locations_.
+ size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index) const {
+ for (size_t i = 0; i < heap_locations_.size(); i++) {
+ HeapLocation* loc = heap_locations_[i];
+ if (loc->GetReferenceInfo() == ref_info &&
+ loc->GetOffset() == offset &&
+ loc->GetIndex() == index &&
+ loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
+ return i;
+ }
+ }
+ return kHeapLocationNotFound;
+ }
+
+ // Returns true if heap_locations_[index1] and heap_locations_[index2] may alias.
+ bool MayAlias(size_t index1, size_t index2) const {
+ if (index1 < index2) {
+ return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index1, index2));
+ } else if (index1 > index2) {
+ return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index2, index1));
+ } else {
+ DCHECK(false) << "index1 and index2 are expected to be different";
+ return true;
+ }
+ }
+
+ void BuildAliasingMatrix() {
+ const size_t number_of_locations = heap_locations_.size();
+ if (number_of_locations == 0) {
+ return;
+ }
+ size_t pos = 0;
+ // Compute aliasing info between every pair of different heap locations.
+ // Save the result in a matrix represented as a BitVector.
+ for (size_t i = 0; i < number_of_locations - 1; i++) {
+ for (size_t j = i + 1; j < number_of_locations; j++) {
+ if (ComputeMayAlias(i, j)) {
+ aliasing_matrix_.SetBit(CheckedAliasingMatrixPosition(i, j, pos));
+ }
+ pos++;
+ }
+ }
+ }
+
+ private:
+ // An allocation cannot alias with a name which already exists at the point
+ // of the allocation, such as a parameter or a load happening before the allocation.
+ bool MayAliasWithPreexistenceChecking(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+ if (ref_info1->GetReference()->IsNewInstance() || ref_info1->GetReference()->IsNewArray()) {
+ // Any reference that can alias with the allocation must appear after it in the block/in
+ // the block's successors. In reverse post order, those instructions will be visited after
+ // the allocation.
+ return ref_info2->GetPosition() >= ref_info1->GetPosition();
+ }
+ return true;
+ }
+
+ bool CanReferencesAlias(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+ if (ref_info1 == ref_info2) {
+ return true;
+ } else if (ref_info1->IsSingleton()) {
+ return false;
+ } else if (ref_info2->IsSingleton()) {
+ return false;
+ } else if (!MayAliasWithPreexistenceChecking(ref_info1, ref_info2) ||
+ !MayAliasWithPreexistenceChecking(ref_info2, ref_info1)) {
+ return false;
+ }
+ return true;
+ }
+
+ // `index1` and `index2` are indices in the array of collected heap locations.
+ // Returns the position in the bit vector that tracks whether the two heap
+ // locations may alias.
+ size_t AliasingMatrixPosition(size_t index1, size_t index2) const {
+ DCHECK(index2 > index1);
+ const size_t number_of_locations = heap_locations_.size();
+ // It's (num_of_locations - 1) + ... + (num_of_locations - index1) + (index2 - index1 - 1).
+ return (number_of_locations * index1 - (1 + index1) * index1 / 2 + (index2 - index1 - 1));
+ }
+
+ // An additional position is passed in to make sure the calculated position is correct.
+ size_t CheckedAliasingMatrixPosition(size_t index1, size_t index2, size_t position) {
+ size_t calculated_position = AliasingMatrixPosition(index1, index2);
+ DCHECK_EQ(calculated_position, position);
+ return calculated_position;
+ }
+
+ // Compute if two locations may alias to each other.
+ bool ComputeMayAlias(size_t index1, size_t index2) const {
+ HeapLocation* loc1 = heap_locations_[index1];
+ HeapLocation* loc2 = heap_locations_[index2];
+ if (loc1->GetOffset() != loc2->GetOffset()) {
+ // Either two different instance fields, or one is an instance
+ // field and the other is an array element.
+ return false;
+ }
+ if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
+ // Different types.
+ return false;
+ }
+ if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
+ return false;
+ }
+ if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
+ HInstruction* array_index1 = loc1->GetIndex();
+ HInstruction* array_index2 = loc2->GetIndex();
+ DCHECK(array_index1 != nullptr);
+ DCHECK(array_index2 != nullptr);
+ if (array_index1->IsIntConstant() &&
+ array_index2->IsIntConstant() &&
+ array_index1->AsIntConstant()->GetValue() != array_index2->AsIntConstant()->GetValue()) {
+ // Different constant indices do not alias.
+ return false;
+ }
+ ReferenceInfo* ref_info = loc1->GetReferenceInfo();
+ ref_info->SetHasIndexAliasing(true);
+ }
+ return true;
+ }
+
+ ReferenceInfo* GetOrCreateReferenceInfo(HInstruction* instruction) {
+ ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
+ if (ref_info == nullptr) {
+ size_t pos = ref_info_array_.size();
+ ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos);
+ ref_info_array_.push_back(ref_info);
+ }
+ return ref_info;
+ }
+
+ void CreateReferenceInfoForReferenceType(HInstruction* instruction) {
+ if (instruction->GetType() != Primitive::kPrimNot) {
+ return;
+ }
+ DCHECK(FindReferenceInfoOf(instruction) == nullptr);
+ GetOrCreateReferenceInfo(instruction);
+ }
+
+ HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
+ size_t offset,
+ HInstruction* index,
+ int16_t declaring_class_def_index) {
+ HInstruction* original_ref = HuntForOriginalReference(ref);
+ ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
+ size_t heap_location_idx = FindHeapLocationIndex(
+ ref_info, offset, index, declaring_class_def_index);
+ if (heap_location_idx == kHeapLocationNotFound) {
+ HeapLocation* heap_loc = new (GetGraph()->GetArena())
+ HeapLocation(ref_info, offset, index, declaring_class_def_index);
+ heap_locations_.push_back(heap_loc);
+ return heap_loc;
+ }
+ return heap_locations_[heap_location_idx];
+ }
+
+ HeapLocation* VisitFieldAccess(HInstruction* ref, const FieldInfo& field_info) {
+ if (field_info.IsVolatile()) {
+ has_volatile_ = true;
+ }
+ const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
+ const size_t offset = field_info.GetFieldOffset().SizeValue();
+ return GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+ }
+
+ void VisitArrayAccess(HInstruction* array, HInstruction* index) {
+ GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
+ index, HeapLocation::kDeclaringClassDefIndexForArrays);
+ }
+
+ void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+ HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ has_heap_stores_ = true;
+ if (location->GetReferenceInfo()->IsSingleton()) {
+ // A singleton's location value may be killed by loop side effects if it's
+ // defined before that loop, and it's stored into inside that loop.
+ HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
+ if (loop_info != nullptr) {
+ HInstruction* ref = location->GetReferenceInfo()->GetReference();
+ DCHECK(ref->IsNewInstance());
+ if (loop_info->IsDefinedOutOfTheLoop(ref)) {
+ // ref's location value may be killed by this loop's side effects.
+ location->SetValueKilledByLoopSideEffects(true);
+ } else {
+ // ref is defined inside this loop so this loop's side effects cannot
+ // kill its location value at the loop header since ref/its location doesn't
+ // exist yet at the loop header.
+ }
+ }
+ } else {
+ // For non-singletons, value_killed_by_loop_side_effects_ is inited to
+ // true.
+ DCHECK_EQ(location->IsValueKilledByLoopSideEffects(), true);
+ }
+ }
+
+ void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+ VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ has_heap_stores_ = true;
+ }
+
+ // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
+ // since we cannot accurately track the fields.
+
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+ VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitArraySet(HArraySet* instruction) OVERRIDE {
+ VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ has_heap_stores_ = true;
+ }
+
+ void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+ // Any references appearing in the ref_info_array_ so far cannot alias with new_instance.
+ CreateReferenceInfoForReferenceType(new_instance);
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitParameterValue(HParameterValue* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitSelect(HSelect* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+ has_monitor_operations_ = true;
+ }
+
+ ArenaVector<ReferenceInfo*> ref_info_array_; // All references used for heap accesses.
+ ArenaVector<HeapLocation*> heap_locations_; // All heap locations.
+ ArenaBitVector aliasing_matrix_; // aliasing info between each pair of locations.
+ bool has_heap_stores_; // If there is no heap stores, LSE acts as GVN with better
+ // alias analysis and won't be as effective.
+ bool has_volatile_; // If there are volatile field accesses.
+ bool has_monitor_operations_; // If there are monitor operations.
+
+ DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
+};
+
+class LoadStoreAnalysis : public HOptimization {
+ public:
+ explicit LoadStoreAnalysis(HGraph* graph)
+ : HOptimization(graph, kLoadStoreAnalysisPassName),
+ heap_location_collector_(graph) {}
+
+ const HeapLocationCollector& GetHeapLocationCollector() const {
+ return heap_location_collector_;
+ }
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
+
+ private:
+ HeapLocationCollector heap_location_collector_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStoreAnalysis);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_LOAD_STORE_ANALYSIS_H_
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
new file mode 100644
index 0000000..2418777
--- /dev/null
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_store_analysis.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class LoadStoreAnalysisTest : public CommonCompilerTest {
+ public:
+ LoadStoreAnalysisTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ }
+
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+};
+
+TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+
+ // entry:
+ // array ParameterValue
+ // index ParameterValue
+ // c1 IntConstant
+ // c2 IntConstant
+ // c3 IntConstant
+ // array_get1 ArrayGet [array, c1]
+ // array_get2 ArrayGet [array, c2]
+ // array_set1 ArraySet [array, c1, c3]
+ // array_set2 ArraySet [array, index, c3]
+ HInstruction* array = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot);
+ HInstruction* index = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, Primitive::kPrimInt, 0);
+ HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, Primitive::kPrimInt, 0);
+ HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, Primitive::kPrimInt, 0);
+ HInstruction* array_set2 = new (&allocator_) HArraySet(array, index, c3, Primitive::kPrimInt, 0);
+ entry->AddInstruction(array);
+ entry->AddInstruction(index);
+ entry->AddInstruction(array_get1);
+ entry->AddInstruction(array_get2);
+ entry->AddInstruction(array_set1);
+ entry->AddInstruction(array_set2);
+
+ // Test HeapLocationCollector initialization.
+ // Should be no heap locations, no operations on the heap.
+ HeapLocationCollector heap_location_collector(graph_);
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
+ ASSERT_FALSE(heap_location_collector.HasHeapStores());
+
+ // Test that after visiting the graph_, it must see following heap locations
+ // array[c1], array[c2], array[index]; and it should see heap stores.
+ heap_location_collector.VisitBasicBlock(entry);
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 3U);
+ ASSERT_TRUE(heap_location_collector.HasHeapStores());
+
+ // Test queries on HeapLocationCollector's ref info and index records.
+ ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(array);
+ size_t field_off = HeapLocation::kInvalidFieldOffset;
+ size_t class_def = HeapLocation::kDeclaringClassDefIndexForArrays;
+ size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c1, class_def);
+ size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c2, class_def);
+ size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field_off, index, class_def);
+ // must find this reference info for array in HeapLocationCollector.
+ ASSERT_TRUE(ref != nullptr);
+ // must find these heap locations;
+ // and array[1], array[2], array[3] should be different heap locations.
+ ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_TRUE(loc2 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_TRUE(loc3 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_TRUE(loc1 != loc2);
+ ASSERT_TRUE(loc2 != loc3);
+ ASSERT_TRUE(loc1 != loc3);
+
+ // Test alias relationships after building aliasing matrix.
+ // array[1] and array[2] clearly should not alias;
+ // array[index] should alias with the others, because index is an unknow value.
+ heap_location_collector.BuildAliasingMatrix();
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc3));
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc3));
+}
+
+TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+
+ // entry:
+ // object ParameterValue
+ // c1 IntConstant
+ // set_field10 InstanceFieldSet [object, c1, 10]
+ // get_field10 InstanceFieldGet [object, 10]
+ // get_field20 InstanceFieldGet [object, 20]
+
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ Primitive::kPrimNot);
+ HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ Primitive::kPrimInt,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object,
+ nullptr,
+ Primitive::kPrimInt,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object,
+ nullptr,
+ Primitive::kPrimInt,
+ MemberOffset(20),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ entry->AddInstruction(object);
+ entry->AddInstruction(set_field10);
+ entry->AddInstruction(get_field10);
+ entry->AddInstruction(get_field20);
+
+ // Test HeapLocationCollector initialization.
+ // Should be no heap locations, no operations on the heap.
+ HeapLocationCollector heap_location_collector(graph_);
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
+ ASSERT_FALSE(heap_location_collector.HasHeapStores());
+
+ // Test that after visiting the graph, it must see following heap locations
+ // object.field10, object.field20 and it should see heap stores.
+ heap_location_collector.VisitBasicBlock(entry);
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 2U);
+ ASSERT_TRUE(heap_location_collector.HasHeapStores());
+
+ // Test queries on HeapLocationCollector's ref info and index records.
+ ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(object);
+ size_t loc1 = heap_location_collector.FindHeapLocationIndex(
+ ref, 10, nullptr, kUnknownClassDefIndex);
+ size_t loc2 = heap_location_collector.FindHeapLocationIndex(
+ ref, 20, nullptr, kUnknownClassDefIndex);
+ // must find references info for object and in HeapLocationCollector.
+ ASSERT_TRUE(ref != nullptr);
+ // must find these heap locations.
+ ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_TRUE(loc2 != HeapLocationCollector::kHeapLocationNotFound);
+ // different fields of same object.
+ ASSERT_TRUE(loc1 != loc2);
+ // accesses to different fields of the same object should not alias.
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+}
+
+} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 76c9d23..211528b 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "load_store_analysis.h"
#include "load_store_elimination.h"
#include "escape.h"
@@ -23,477 +24,6 @@
namespace art {
-class ReferenceInfo;
-
-// A cap for the number of heap locations to prevent pathological time/space consumption.
-// The number of heap locations for most of the methods stays below this threshold.
-constexpr size_t kMaxNumberOfHeapLocations = 32;
-
-// A ReferenceInfo contains additional info about a reference such as
-// whether it's a singleton, returned, etc.
-class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
- public:
- ReferenceInfo(HInstruction* reference, size_t pos)
- : reference_(reference),
- position_(pos),
- is_singleton_(true),
- is_singleton_and_not_returned_(true),
- is_singleton_and_not_deopt_visible_(true),
- has_index_aliasing_(false) {
- CalculateEscape(reference_,
- nullptr,
- &is_singleton_,
- &is_singleton_and_not_returned_,
- &is_singleton_and_not_deopt_visible_);
- }
-
- HInstruction* GetReference() const {
- return reference_;
- }
-
- size_t GetPosition() const {
- return position_;
- }
-
- // Returns true if reference_ is the only name that can refer to its value during
- // the lifetime of the method. So it's guaranteed to not have any alias in
- // the method (including its callees).
- bool IsSingleton() const {
- return is_singleton_;
- }
-
- // Returns true if reference_ is a singleton and not returned to the caller or
- // used as an environment local of an HDeoptimize instruction.
- // The allocation and stores into reference_ may be eliminated for such cases.
- bool IsSingletonAndRemovable() const {
- return is_singleton_and_not_returned_ && is_singleton_and_not_deopt_visible_;
- }
-
- // Returns true if reference_ is a singleton and returned to the caller or
- // used as an environment local of an HDeoptimize instruction.
- bool IsSingletonAndNonRemovable() const {
- return is_singleton_ &&
- (!is_singleton_and_not_returned_ || !is_singleton_and_not_deopt_visible_);
- }
-
- bool HasIndexAliasing() {
- return has_index_aliasing_;
- }
-
- void SetHasIndexAliasing(bool has_index_aliasing) {
- // Only allow setting to true.
- DCHECK(has_index_aliasing);
- has_index_aliasing_ = has_index_aliasing;
- }
-
- private:
- HInstruction* const reference_;
- const size_t position_; // position in HeapLocationCollector's ref_info_array_.
-
- // Can only be referred to by a single name in the method.
- bool is_singleton_;
- // Is singleton and not returned to caller.
- bool is_singleton_and_not_returned_;
- // Is singleton and not used as an environment local of HDeoptimize.
- bool is_singleton_and_not_deopt_visible_;
- // Some heap locations with reference_ have array index aliasing,
- // e.g. arr[i] and arr[j] may be the same location.
- bool has_index_aliasing_;
-
- DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
-};
-
-// A heap location is a reference-offset/index pair that a value can be loaded from
-// or stored to.
-class HeapLocation : public ArenaObject<kArenaAllocMisc> {
- public:
- static constexpr size_t kInvalidFieldOffset = -1;
-
- // TODO: more fine-grained array types.
- static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
-
- HeapLocation(ReferenceInfo* ref_info,
- size_t offset,
- HInstruction* index,
- int16_t declaring_class_def_index)
- : ref_info_(ref_info),
- offset_(offset),
- index_(index),
- declaring_class_def_index_(declaring_class_def_index),
- value_killed_by_loop_side_effects_(true) {
- DCHECK(ref_info != nullptr);
- DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
- (offset != kInvalidFieldOffset && index == nullptr));
- if (ref_info->IsSingleton() && !IsArrayElement()) {
- // Assume this location's value cannot be killed by loop side effects
- // until proven otherwise.
- value_killed_by_loop_side_effects_ = false;
- }
- }
-
- ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
- size_t GetOffset() const { return offset_; }
- HInstruction* GetIndex() const { return index_; }
-
- // Returns the definition of declaring class' dex index.
- // It's kDeclaringClassDefIndexForArrays for an array element.
- int16_t GetDeclaringClassDefIndex() const {
- return declaring_class_def_index_;
- }
-
- bool IsArrayElement() const {
- return index_ != nullptr;
- }
-
- bool IsValueKilledByLoopSideEffects() const {
- return value_killed_by_loop_side_effects_;
- }
-
- void SetValueKilledByLoopSideEffects(bool val) {
- value_killed_by_loop_side_effects_ = val;
- }
-
- private:
- ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
- const size_t offset_; // offset of static/instance field.
- HInstruction* const index_; // index of an array element.
- const int16_t declaring_class_def_index_; // declaring class's def's dex index.
- bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
- // side effects because this location is stored
- // into inside a loop. This gives
- // better info on whether a singleton's location
- // value may be killed by loop side effects.
-
- DISALLOW_COPY_AND_ASSIGN(HeapLocation);
-};
-
-static HInstruction* HuntForOriginalReference(HInstruction* ref) {
- DCHECK(ref != nullptr);
- while (ref->IsNullCheck() || ref->IsBoundType()) {
- ref = ref->InputAt(0);
- }
- return ref;
-}
-
-// A HeapLocationCollector collects all relevant heap locations and keeps
-// an aliasing matrix for all locations.
-class HeapLocationCollector : public HGraphVisitor {
- public:
- static constexpr size_t kHeapLocationNotFound = -1;
- // Start with a single uint32_t word. That's enough bits for pair-wise
- // aliasing matrix of 8 heap locations.
- static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
-
- explicit HeapLocationCollector(HGraph* graph)
- : HGraphVisitor(graph),
- ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- aliasing_matrix_(graph->GetArena(),
- kInitialAliasingMatrixBitVectorSize,
- true,
- kArenaAllocLSE),
- has_heap_stores_(false),
- has_volatile_(false),
- has_monitor_operations_(false) {}
-
- size_t GetNumberOfHeapLocations() const {
- return heap_locations_.size();
- }
-
- HeapLocation* GetHeapLocation(size_t index) const {
- return heap_locations_[index];
- }
-
- ReferenceInfo* FindReferenceInfoOf(HInstruction* ref) const {
- for (size_t i = 0; i < ref_info_array_.size(); i++) {
- ReferenceInfo* ref_info = ref_info_array_[i];
- if (ref_info->GetReference() == ref) {
- DCHECK_EQ(i, ref_info->GetPosition());
- return ref_info;
- }
- }
- return nullptr;
- }
-
- bool HasHeapStores() const {
- return has_heap_stores_;
- }
-
- bool HasVolatile() const {
- return has_volatile_;
- }
-
- bool HasMonitorOps() const {
- return has_monitor_operations_;
- }
-
- // Find and return the heap location index in heap_locations_.
- size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
- size_t offset,
- HInstruction* index,
- int16_t declaring_class_def_index) const {
- for (size_t i = 0; i < heap_locations_.size(); i++) {
- HeapLocation* loc = heap_locations_[i];
- if (loc->GetReferenceInfo() == ref_info &&
- loc->GetOffset() == offset &&
- loc->GetIndex() == index &&
- loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
- return i;
- }
- }
- return kHeapLocationNotFound;
- }
-
- // Returns true if heap_locations_[index1] and heap_locations_[index2] may alias.
- bool MayAlias(size_t index1, size_t index2) const {
- if (index1 < index2) {
- return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index1, index2));
- } else if (index1 > index2) {
- return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index2, index1));
- } else {
- DCHECK(false) << "index1 and index2 are expected to be different";
- return true;
- }
- }
-
- void BuildAliasingMatrix() {
- const size_t number_of_locations = heap_locations_.size();
- if (number_of_locations == 0) {
- return;
- }
- size_t pos = 0;
- // Compute aliasing info between every pair of different heap locations.
- // Save the result in a matrix represented as a BitVector.
- for (size_t i = 0; i < number_of_locations - 1; i++) {
- for (size_t j = i + 1; j < number_of_locations; j++) {
- if (ComputeMayAlias(i, j)) {
- aliasing_matrix_.SetBit(CheckedAliasingMatrixPosition(i, j, pos));
- }
- pos++;
- }
- }
- }
-
- private:
- // An allocation cannot alias with a name which already exists at the point
- // of the allocation, such as a parameter or a load happening before the allocation.
- bool MayAliasWithPreexistenceChecking(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
- if (ref_info1->GetReference()->IsNewInstance() || ref_info1->GetReference()->IsNewArray()) {
- // Any reference that can alias with the allocation must appear after it in the block/in
- // the block's successors. In reverse post order, those instructions will be visited after
- // the allocation.
- return ref_info2->GetPosition() >= ref_info1->GetPosition();
- }
- return true;
- }
-
- bool CanReferencesAlias(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
- if (ref_info1 == ref_info2) {
- return true;
- } else if (ref_info1->IsSingleton()) {
- return false;
- } else if (ref_info2->IsSingleton()) {
- return false;
- } else if (!MayAliasWithPreexistenceChecking(ref_info1, ref_info2) ||
- !MayAliasWithPreexistenceChecking(ref_info2, ref_info1)) {
- return false;
- }
- return true;
- }
-
- // `index1` and `index2` are indices in the array of collected heap locations.
- // Returns the position in the bit vector that tracks whether the two heap
- // locations may alias.
- size_t AliasingMatrixPosition(size_t index1, size_t index2) const {
- DCHECK(index2 > index1);
- const size_t number_of_locations = heap_locations_.size();
- // It's (num_of_locations - 1) + ... + (num_of_locations - index1) + (index2 - index1 - 1).
- return (number_of_locations * index1 - (1 + index1) * index1 / 2 + (index2 - index1 - 1));
- }
-
- // An additional position is passed in to make sure the calculated position is correct.
- size_t CheckedAliasingMatrixPosition(size_t index1, size_t index2, size_t position) {
- size_t calculated_position = AliasingMatrixPosition(index1, index2);
- DCHECK_EQ(calculated_position, position);
- return calculated_position;
- }
-
- // Compute if two locations may alias to each other.
- bool ComputeMayAlias(size_t index1, size_t index2) const {
- HeapLocation* loc1 = heap_locations_[index1];
- HeapLocation* loc2 = heap_locations_[index2];
- if (loc1->GetOffset() != loc2->GetOffset()) {
- // Either two different instance fields, or one is an instance
- // field and the other is an array element.
- return false;
- }
- if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
- // Different types.
- return false;
- }
- if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
- return false;
- }
- if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
- HInstruction* array_index1 = loc1->GetIndex();
- HInstruction* array_index2 = loc2->GetIndex();
- DCHECK(array_index1 != nullptr);
- DCHECK(array_index2 != nullptr);
- if (array_index1->IsIntConstant() &&
- array_index2->IsIntConstant() &&
- array_index1->AsIntConstant()->GetValue() != array_index2->AsIntConstant()->GetValue()) {
- // Different constant indices do not alias.
- return false;
- }
- ReferenceInfo* ref_info = loc1->GetReferenceInfo();
- ref_info->SetHasIndexAliasing(true);
- }
- return true;
- }
-
- ReferenceInfo* GetOrCreateReferenceInfo(HInstruction* instruction) {
- ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
- if (ref_info == nullptr) {
- size_t pos = ref_info_array_.size();
- ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos);
- ref_info_array_.push_back(ref_info);
- }
- return ref_info;
- }
-
- void CreateReferenceInfoForReferenceType(HInstruction* instruction) {
- if (instruction->GetType() != Primitive::kPrimNot) {
- return;
- }
- DCHECK(FindReferenceInfoOf(instruction) == nullptr);
- GetOrCreateReferenceInfo(instruction);
- }
-
- HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
- size_t offset,
- HInstruction* index,
- int16_t declaring_class_def_index) {
- HInstruction* original_ref = HuntForOriginalReference(ref);
- ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
- size_t heap_location_idx = FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
- if (heap_location_idx == kHeapLocationNotFound) {
- HeapLocation* heap_loc = new (GetGraph()->GetArena())
- HeapLocation(ref_info, offset, index, declaring_class_def_index);
- heap_locations_.push_back(heap_loc);
- return heap_loc;
- }
- return heap_locations_[heap_location_idx];
- }
-
- HeapLocation* VisitFieldAccess(HInstruction* ref, const FieldInfo& field_info) {
- if (field_info.IsVolatile()) {
- has_volatile_ = true;
- }
- const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
- const size_t offset = field_info.GetFieldOffset().SizeValue();
- return GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
- }
-
- void VisitArrayAccess(HInstruction* array, HInstruction* index) {
- GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
- index, HeapLocation::kDeclaringClassDefIndexForArrays);
- }
-
- void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
- VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
- HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
- has_heap_stores_ = true;
- if (location->GetReferenceInfo()->IsSingleton()) {
- // A singleton's location value may be killed by loop side effects if it's
- // defined before that loop, and it's stored into inside that loop.
- HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
- if (loop_info != nullptr) {
- HInstruction* ref = location->GetReferenceInfo()->GetReference();
- DCHECK(ref->IsNewInstance());
- if (loop_info->IsDefinedOutOfTheLoop(ref)) {
- // ref's location value may be killed by this loop's side effects.
- location->SetValueKilledByLoopSideEffects(true);
- } else {
- // ref is defined inside this loop so this loop's side effects cannot
- // kill its location value at the loop header since ref/its location doesn't
- // exist yet at the loop header.
- }
- }
- } else {
- // For non-singletons, value_killed_by_loop_side_effects_ is inited to
- // true.
- DCHECK_EQ(location->IsValueKilledByLoopSideEffects(), true);
- }
- }
-
- void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
- VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
- VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
- has_heap_stores_ = true;
- }
-
- // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
- // since we cannot accurately track the fields.
-
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitArraySet(HArraySet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
- has_heap_stores_ = true;
- }
-
- void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
- // Any references appearing in the ref_info_array_ so far cannot alias with new_instance.
- CreateReferenceInfoForReferenceType(new_instance);
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* instruction) OVERRIDE {
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* instruction) OVERRIDE {
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitInvokeInterface(HInvokeInterface* instruction) OVERRIDE {
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitParameterValue(HParameterValue* instruction) OVERRIDE {
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitSelect(HSelect* instruction) OVERRIDE {
- CreateReferenceInfoForReferenceType(instruction);
- }
-
- void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
- has_monitor_operations_ = true;
- }
-
- ArenaVector<ReferenceInfo*> ref_info_array_; // All references used for heap accesses.
- ArenaVector<HeapLocation*> heap_locations_; // All heap locations.
- ArenaBitVector aliasing_matrix_; // aliasing info between each pair of locations.
- bool has_heap_stores_; // If there is no heap stores, LSE acts as GVN with better
- // alias analysis and won't be as effective.
- bool has_volatile_; // If there are volatile field accesses.
- bool has_monitor_operations_; // If there are monitor operations.
-
- DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
-};
-
// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
// A heap location can be set to kUnknownHeapValue when:
// - initially set a value.
@@ -516,7 +46,7 @@
side_effects_(side_effects),
heap_values_for_(graph->GetBlocks().size(),
ArenaVector<HInstruction*>(heap_locations_collector.
- GetNumberOfHeapLocations(),
+ GetNumberOfHeapLocations(),
kUnknownHeapValue,
graph->GetArena()->Adapter(kArenaAllocLSE)),
graph->GetArena()->Adapter(kArenaAllocLSE)),
@@ -760,7 +290,7 @@
size_t offset,
HInstruction* index,
int16_t declaring_class_def_index) {
- HInstruction* original_ref = HuntForOriginalReference(ref);
+ HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
@@ -827,7 +357,7 @@
HInstruction* index,
int16_t declaring_class_def_index,
HInstruction* value) {
- HInstruction* original_ref = HuntForOriginalReference(ref);
+ HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
@@ -1127,25 +657,12 @@
// Skip this optimization.
return;
}
- HeapLocationCollector heap_location_collector(graph_);
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- heap_location_collector.VisitBasicBlock(block);
- }
- if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
- // Bail out if there are too many heap locations to deal with.
+ const HeapLocationCollector& heap_location_collector = lsa_.GetHeapLocationCollector();
+ if (heap_location_collector.GetNumberOfHeapLocations() == 0) {
+ // No HeapLocation information from LSA, skip this optimization.
return;
}
- if (!heap_location_collector.HasHeapStores()) {
- // Without heap stores, this pass would act mostly as GVN on heap accesses.
- return;
- }
- if (heap_location_collector.HasVolatile() || heap_location_collector.HasMonitorOps()) {
- // Don't do load/store elimination if the method has volatile field accesses or
- // monitor operations, for now.
- // TODO: do it right.
- return;
- }
- heap_location_collector.BuildAliasingMatrix();
+
LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
lse_visitor.VisitBasicBlock(block);
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 1d9e5c8..efe71c7 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -22,12 +22,16 @@
namespace art {
class SideEffectsAnalysis;
+class LoadStoreAnalysis;
class LoadStoreElimination : public HOptimization {
public:
- LoadStoreElimination(HGraph* graph, const SideEffectsAnalysis& side_effects)
+ LoadStoreElimination(HGraph* graph,
+ const SideEffectsAnalysis& side_effects,
+ const LoadStoreAnalysis& lsa)
: HOptimization(graph, kLoadStoreEliminationPassName),
- side_effects_(side_effects) {}
+ side_effects_(side_effects),
+ lsa_(lsa) {}
void Run() OVERRIDE;
@@ -35,6 +39,7 @@
private:
const SideEffectsAnalysis& side_effects_;
+ const LoadStoreAnalysis& lsa_;
DISALLOW_COPY_AND_ASSIGN(LoadStoreElimination);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 94787c9..963df5a 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -173,39 +173,6 @@
return false;
}
-// Detect situations with same-extension narrower operands.
-// Returns true on success and sets is_unsigned accordingly.
-static bool IsNarrowerOperands(HInstruction* a,
- HInstruction* b,
- Primitive::Type type,
- /*out*/ HInstruction** r,
- /*out*/ HInstruction** s,
- /*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r) && IsSignExtensionAndGet(b, type, s)) {
- *is_unsigned = false;
- return true;
- } else if (IsZeroExtensionAndGet(a, type, r) && IsZeroExtensionAndGet(b, type, s)) {
- *is_unsigned = true;
- return true;
- }
- return false;
-}
-
-// As above, single operand.
-static bool IsNarrowerOperand(HInstruction* a,
- Primitive::Type type,
- /*out*/ HInstruction** r,
- /*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r)) {
- *is_unsigned = false;
- return true;
- } else if (IsZeroExtensionAndGet(a, type, r)) {
- *is_unsigned = true;
- return true;
- }
- return false;
-}
-
// Detect up to two instructions a and b, and an acccumulated constant c.
static bool IsAddConstHelper(HInstruction* instruction,
/*out*/ HInstruction** a,
@@ -789,7 +756,7 @@
return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
}
-// TODO: saturation arithmetic.
+// TODO: more operations and intrinsics, detect saturation arithmetic, etc.
bool HLoopOptimization::VectorizeUse(LoopNode* node,
HInstruction* instruction,
bool generate_code,
@@ -900,38 +867,25 @@
return true;
}
// Deal with vector restrictions.
- HInstruction* opa = instruction->InputAt(0);
- HInstruction* opb = instruction->InputAt(1);
- HInstruction* r = opa;
- bool is_unsigned = false;
if ((HasVectorRestrictions(restrictions, kNoShift)) ||
(instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
return false; // unsupported instruction
- } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
- // Shifts right need extra care to account for higher order bits.
- // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
- if (instruction->IsShr() &&
- (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
- return false; // reject, unless all operands are sign-extension narrower
- } else if (instruction->IsUShr() &&
- (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
- return false; // reject, unless all operands are zero-extension narrower
- }
+ } else if ((instruction->IsShr() || instruction->IsUShr()) &&
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ return false; // hibits may impact lobits; TODO: we can do better!
}
// Accept shift operator for vectorizable/invariant operands.
// TODO: accept symbolic, albeit loop invariant shift factors.
- DCHECK(r != nullptr);
- if (generate_code && vector_mode_ != kVector) { // de-idiom
- r = opa;
- }
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
int64_t distance = 0;
- if (VectorizeUse(node, r, generate_code, type, restrictions) &&
+ if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
IsInt64AndGet(opb, /*out*/ &distance)) {
// Restrict shift distance to packed data type width.
int64_t max_distance = Primitive::ComponentSize(type) * 8;
if (0 <= distance && distance < max_distance) {
if (generate_code) {
- GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
+ GenerateVecOp(instruction, vector_map_->Get(opa), opb, type);
}
return true;
}
@@ -945,23 +899,16 @@
case Intrinsics::kMathAbsFloat:
case Intrinsics::kMathAbsDouble: {
// Deal with vector restrictions.
- HInstruction* opa = instruction->InputAt(0);
- HInstruction* r = opa;
- bool is_unsigned = false;
- if (HasVectorRestrictions(restrictions, kNoAbs)) {
+ if (HasVectorRestrictions(restrictions, kNoAbs) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
return false;
- } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
- (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
- return false; // reject, unless operand is sign-extension narrower
}
// Accept ABS(x) for vectorizable operand.
- DCHECK(r != nullptr);
- if (generate_code && vector_mode_ != kVector) { // de-idiom
- r = opa;
- }
- if (VectorizeUse(node, r, generate_code, type, restrictions)) {
+ HInstruction* opa = instruction->InputAt(0);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
if (generate_code) {
- GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+ GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
}
return true;
}
@@ -976,28 +923,18 @@
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
// Deal with vector restrictions.
- HInstruction* opa = instruction->InputAt(0);
- HInstruction* opb = instruction->InputAt(1);
- HInstruction* r = opa;
- HInstruction* s = opb;
- bool is_unsigned = false;
- if (HasVectorRestrictions(restrictions, kNoMinMax)) {
+ if (HasVectorRestrictions(restrictions, kNoMinMax) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
return false;
- } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
- !IsNarrowerOperands(opa, opb, type, &r, &s, &is_unsigned)) {
- return false; // reject, unless all operands are same-extension narrower
}
// Accept MIN/MAX(x, y) for vectorizable operands.
- DCHECK(r != nullptr && s != nullptr);
- if (generate_code && vector_mode_ != kVector) { // de-idiom
- r = opa;
- s = opb;
- }
- if (VectorizeUse(node, r, generate_code, type, restrictions) &&
- VectorizeUse(node, s, generate_code, type, restrictions)) {
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
+ VectorizeUse(node, opb, generate_code, type, restrictions)) {
if (generate_code) {
- GenerateVecOp(
- instruction, vector_map_->Get(r), vector_map_->Get(s), type, is_unsigned);
+ GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
}
return true;
}
@@ -1022,11 +959,11 @@
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(16);
case Primitive::kPrimChar:
case Primitive::kPrimShort:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoAbs;
return TrySetVectorLength(8);
case Primitive::kPrimInt:
*restrictions |= kNoDiv;
@@ -1161,14 +1098,13 @@
void HLoopOptimization::GenerateVecOp(HInstruction* org,
HInstruction* opa,
HInstruction* opb,
- Primitive::Type type,
- bool is_unsigned) {
+ Primitive::Type type) {
if (vector_mode_ == kSequential) {
- // Non-converting scalar code follows implicit integral promotion.
- if (!org->IsTypeConversion() && (type == Primitive::kPrimBoolean ||
- type == Primitive::kPrimByte ||
- type == Primitive::kPrimChar ||
- type == Primitive::kPrimShort)) {
+ // Scalar code follows implicit integral promotion.
+ if (type == Primitive::kPrimBoolean ||
+ type == Primitive::kPrimByte ||
+ type == Primitive::kPrimChar ||
+ type == Primitive::kPrimShort) {
type = Primitive::kPrimInt;
}
}
@@ -1249,6 +1185,7 @@
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
+ bool is_unsigned = false; // TODO: detect unsigned versions
vector = new (global_allocator_)
HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
break;
@@ -1257,6 +1194,7 @@
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
+ bool is_unsigned = false; // TODO: detect unsigned versions
vector = new (global_allocator_)
HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
break;
@@ -1320,7 +1258,7 @@
Primitive::Type type,
uint64_t restrictions) {
// Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
- // (note whether the sign bit in wider precision is shifted in has no effect
+ // (note whether the sign bit in higher precision is shifted in has no effect
// on the narrow precision computed by the idiom).
int64_t distance = 0;
if ((instruction->IsShr() ||
@@ -1331,7 +1269,6 @@
HInstruction* b = nullptr;
int64_t c = 0;
if (IsAddConst(instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
- DCHECK(a != nullptr && b != nullptr);
// Accept c == 1 (rounded) or c == 0 (not rounded).
bool is_rounded = false;
if (c == 1) {
@@ -1343,7 +1280,11 @@
HInstruction* r = nullptr;
HInstruction* s = nullptr;
bool is_unsigned = false;
- if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
+ if (IsZeroExtensionAndGet(a, type, &r) && IsZeroExtensionAndGet(b, type, &s)) {
+ is_unsigned = true;
+ } else if (IsSignExtensionAndGet(a, type, &r) && IsSignExtensionAndGet(b, type, &s)) {
+ is_unsigned = false;
+ } else {
return false;
}
// Deal with vector restrictions.
@@ -1354,10 +1295,6 @@
// Accept recognized halving add for vectorizable operands. Vectorized code uses the
// shorthand idiomatic operation. Sequential code uses the original scalar expressions.
DCHECK(r != nullptr && s != nullptr);
- if (generate_code && vector_mode_ != kVector) { // de-idiom
- r = instruction->InputAt(0);
- s = instruction->InputAt(1);
- }
if (VectorizeUse(node, r, generate_code, type, restrictions) &&
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
@@ -1371,7 +1308,12 @@
is_unsigned,
is_rounded));
} else {
- GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
+ VectorizeUse(node, instruction->InputAt(0), generate_code, type, restrictions);
+ VectorizeUse(node, instruction->InputAt(1), generate_code, type, restrictions);
+ GenerateVecOp(instruction,
+ vector_map_->Get(instruction->InputAt(0)),
+ vector_map_->Get(instruction->InputAt(1)),
+ type);
}
}
return true;
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 35298d4..6d5978d 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -137,11 +137,7 @@
HInstruction* opa,
HInstruction* opb,
Primitive::Type type);
- void GenerateVecOp(HInstruction* org,
- HInstruction* opa,
- HInstruction* opb,
- Primitive::Type type,
- bool is_unsigned = false);
+ void GenerateVecOp(HInstruction* org, HInstruction* opa, HInstruction* opb, Primitive::Type type);
// Vectorization idioms.
bool VectorizeHalvingAddIdiom(LoopNode* node,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 833f32b..bde7f2c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2636,15 +2636,17 @@
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs) {
switch (rhs) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
- return os << "string_init";
+ return os << "StringInit";
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
- return os << "recursive";
+ return os << "Recursive";
+ case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
+ return os << "BootImageLinkTimePcRelative";
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- return os << "direct";
+ return os << "Direct";
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- return os << "dex_cache_pc_relative";
+ return os << "DexCachePcRelative";
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
- return os << "dex_cache_via_method";
+ return os << "DexCacheViaMethod";
default:
LOG(FATAL) << "Unknown MethodLoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 72774da..4d96fbe 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4153,6 +4153,10 @@
// Use the method's own ArtMethod* loaded by the register allocator.
kRecursive,
+ // Use PC-relative boot image ArtMethod* address that will be known at link time.
+ // Used for boot image methods referenced by boot image code.
+ kBootImageLinkTimePcRelative,
+
// Use ArtMethod* at a known address, embed the direct address in the code.
// Used for app->boot calls with non-relocatable image and for JIT-compiled calls.
kDirectAddress,
@@ -4292,6 +4296,10 @@
bool HasPcRelativeDexCache() const {
return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
}
+ bool HasPcRelativeMethodLoadKind() const {
+ return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
+ GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
+ }
bool HasCurrentMethodInput() const {
// This function can be called only after the invoke has been fully initialized by the builder.
if (NeedsCurrentMethodInput(GetMethodLoadKind())) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f928f71..e5ab00b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -83,6 +83,7 @@
#include "jit/jit_code_cache.h"
#include "jni/quick/jni_compiler.h"
#include "licm.h"
+#include "load_store_analysis.h"
#include "load_store_elimination.h"
#include "loop_optimization.h"
#include "nodes.h"
@@ -465,7 +466,8 @@
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles,
SideEffectsAnalysis* most_recent_side_effects,
- HInductionVarAnalysis* most_recent_induction) {
+ HInductionVarAnalysis* most_recent_induction,
+ LoadStoreAnalysis* most_recent_lsa) {
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
@@ -499,15 +501,18 @@
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
return new (arena) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, codegen, stats, pass_name.c_str());
+ return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
return new (arena) IntrinsicsRecognizer(graph, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
CHECK(most_recent_side_effects != nullptr);
return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
+ return new (arena) LoadStoreAnalysis(graph);
} else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) LoadStoreElimination(graph, *most_recent_side_effects);
+ CHECK(most_recent_lsa != nullptr);
+ return new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa);
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
return new (arena) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
@@ -556,6 +561,7 @@
// in the pass name list.
SideEffectsAnalysis* most_recent_side_effects = nullptr;
HInductionVarAnalysis* most_recent_induction = nullptr;
+ LoadStoreAnalysis* most_recent_lsa = nullptr;
ArenaVector<HOptimization*> ret(arena->Adapter());
for (const std::string& pass_name : pass_names) {
HOptimization* opt = BuildOptimization(
@@ -568,7 +574,8 @@
dex_compilation_unit,
handles,
most_recent_side_effects,
- most_recent_induction);
+ most_recent_induction,
+ most_recent_lsa);
CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
ret.push_back(opt);
@@ -577,6 +584,8 @@
most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
+ } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
+ most_recent_lsa = down_cast<LoadStoreAnalysis*>(opt);
}
}
return ret;
@@ -763,7 +772,8 @@
HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, codegen, stats);
+ InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(
+ graph, codegen, driver, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
HConstantFolding* fold2 = new (arena) HConstantFolding(
graph, "constant_folding$after_inlining");
@@ -777,15 +787,16 @@
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2);
+ LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph);
+ LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa);
HSharpening* sharpening = new (arena) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, codegen, stats, "instruction_simplifier$after_inlining");
+ graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
- graph, codegen, stats, "instruction_simplifier$after_bce");
+ graph, codegen, driver, stats, "instruction_simplifier$after_bce");
InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
- graph, codegen, stats, "instruction_simplifier$before_codegen");
+ graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
@@ -817,6 +828,7 @@
fold3, // evaluates code generated by dynamic bce
simplify3,
side_effects2,
+ lsa,
lse,
cha_guard,
dce3,
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index ef2c432..bce54bf 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -58,6 +58,19 @@
DCHECK(base_ != nullptr);
}
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ // If this is an invoke with PC-relative pointer to a method,
+ // we need to add the base as the special input.
+ if (invoke->GetMethodLoadKind() ==
+ HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative &&
+ !IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
+ InitializePCRelativeBasePointer();
+ // Add the special argument base to the method.
+ DCHECK(!invoke->HasCurrentMethodInput());
+ invoke->AddSpecialInput(base_);
+ }
+ }
+
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a1c916f..2743df9 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -205,13 +205,13 @@
// method pointer from the invoke.
if (invoke_static_or_direct != nullptr &&
invoke_static_or_direct->HasCurrentMethodInput()) {
- DCHECK(!invoke_static_or_direct->HasPcRelativeDexCache());
+ DCHECK(!invoke_static_or_direct->HasPcRelativeMethodLoadKind());
return;
}
bool base_added = false;
if (invoke_static_or_direct != nullptr &&
- invoke_static_or_direct->HasPcRelativeDexCache() &&
+ invoke_static_or_direct->HasPcRelativeMethodLoadKind() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderX86>(invoke, codegen_)) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(invoke);
// Add the extra parameter.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 9a03163..7b8104b 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -16,6 +16,7 @@
#include "sharpening.h"
+#include "art_method-inl.h"
#include "base/casts.h"
#include "base/enums.h"
#include "class_linker.h"
@@ -41,7 +42,9 @@
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
- SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
+ SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(),
+ codegen_,
+ compiler_driver_);
} else if (instruction->IsLoadString()) {
ProcessLoadString(instruction->AsLoadString());
}
@@ -68,9 +71,21 @@
return IsInBootImage(method) && !options.GetCompilePic();
}
+static bool BootImageAOTCanEmbedMethod(ArtMethod* method, CompilerDriver* compiler_driver) {
+ DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
+ if (!compiler_driver->GetSupportBootImageFixup()) {
+ return false;
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
+ DCHECK(klass != nullptr);
+ const DexFile& dex_file = klass->GetDexFile();
+ return compiler_driver->IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
+}
void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen) {
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver) {
if (invoke->IsStringInit()) {
// Not using the dex cache arrays. But we could still try to use a better dispatch...
// TODO: Use direct_method and direct_code for the appropriate StringFactory method.
@@ -108,6 +123,10 @@
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
method_load_data = reinterpret_cast<uintptr_t>(callee);
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ } else if (codegen->GetCompilerOptions().IsBootImage() &&
+ BootImageAOTCanEmbedMethod(callee, compiler_driver)) {
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else {
// Use PC-relative access to the dex cache arrays.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
@@ -167,8 +186,8 @@
if (!compiler_driver->GetSupportBootImageFixup()) {
// compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass != nullptr) && compiler_driver->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ } else if ((klass != nullptr) &&
+ compiler_driver->IsImageClass(dex_file.StringByTypeIdx(type_index))) {
is_in_boot_image = true;
desired_load_kind = HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
} else {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index 10707c7..f74b0af 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -55,7 +55,9 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Used by Sharpening and InstructionSimplifier.
- static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+ static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver);
private:
void ProcessLoadString(HLoadString* load_string);
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 03fc959..00b5567 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -91,6 +91,7 @@
fd_ = other.fd_;
file_path_ = std::move(other.file_path_);
auto_close_ = other.auto_close_;
+ read_only_mode_ = other.read_only_mode_;
other.Release(); // Release other.
return *this;
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 7657a38..6aef348 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -186,6 +186,20 @@
ASSERT_EQ(file2.Close(), 0);
}
+TEST_F(FdFileTest, OperatorMoveEquals) {
+ // Make sure the read_only_ flag is correctly copied
+ // over.
+ art::ScratchFile tmp;
+ FdFile file(tmp.GetFilename(), O_RDONLY, false);
+ ASSERT_TRUE(file.ReadOnlyMode());
+
+ FdFile file2(tmp.GetFilename(), O_RDWR, false);
+ ASSERT_FALSE(file2.ReadOnlyMode());
+
+ file2 = std::move(file);
+ ASSERT_TRUE(file2.ReadOnlyMode());
+}
+
TEST_F(FdFileTest, EraseWithPathUnlinks) {
// New scratch file, zero-length.
art::ScratchFile tmp;
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index e0af6e8..dd90a71 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -129,6 +129,25 @@
return dex_file;
}
+// A deleter that acts like the jvmtiEnv->Deallocate so that asan does not get tripped up.
+// TODO We should everything use the actual jvmtiEnv->Allocate/Deallocate functions once we can
+// figure out which env to use.
+template <typename T>
+class FakeJvmtiDeleter {
+ public:
+ FakeJvmtiDeleter() {}
+
+ FakeJvmtiDeleter(FakeJvmtiDeleter&) = default;
+ FakeJvmtiDeleter(FakeJvmtiDeleter&&) = default;
+ FakeJvmtiDeleter& operator=(const FakeJvmtiDeleter&) = default;
+
+ template <typename U> void operator()(const U* ptr) const {
+ if (ptr != nullptr) {
+ free(const_cast<U*>(ptr));
+ }
+ }
+};
+
struct ClassCallback : public art::ClassLoadCallback {
void ClassPreDefine(const char* descriptor,
art::Handle<art::mirror::Class> klass,
@@ -173,7 +192,8 @@
// Call all Non-retransformable agents.
jint post_no_redefine_len = 0;
unsigned char* post_no_redefine_dex_data = nullptr;
- std::unique_ptr<const unsigned char> post_no_redefine_unique_ptr(nullptr);
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
+ post_no_redefine_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
self,
static_cast<JNIEnv*>(env),
@@ -190,13 +210,16 @@
post_no_redefine_dex_data = const_cast<unsigned char*>(dex_file_copy->Begin());
post_no_redefine_len = dex_file_copy->Size();
} else {
- post_no_redefine_unique_ptr = std::unique_ptr<const unsigned char>(post_no_redefine_dex_data);
+ post_no_redefine_unique_ptr =
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
+ post_no_redefine_dex_data, FakeJvmtiDeleter<const unsigned char>());
DCHECK_GT(post_no_redefine_len, 0);
}
// Call all retransformable agents.
jint final_len = 0;
unsigned char* final_dex_data = nullptr;
- std::unique_ptr<const unsigned char> final_dex_unique_ptr(nullptr);
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
+ final_dex_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
static_cast<JNIEnv*>(env),
@@ -213,7 +236,9 @@
final_dex_data = post_no_redefine_dex_data;
final_len = post_no_redefine_len;
} else {
- final_dex_unique_ptr = std::unique_ptr<const unsigned char>(final_dex_data);
+ final_dex_unique_ptr =
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
+ final_dex_data, FakeJvmtiDeleter<const unsigned char>());
DCHECK_GT(final_len, 0);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3697f21..c46bd8d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1017,6 +1017,30 @@
MemMap::Init();
+ // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
+ // If we cannot reserve it, log a warning.
+ // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
+ // is out-of-the-way enough that it should not collide with boot image mapping.
+ // Note: Don't request an error message. That will lead to a maps dump in the case of failure,
+ // leading to logspam.
+ {
+ constexpr uintptr_t kSentinelAddr =
+ RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
+ protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
+ reinterpret_cast<uint8_t*>(kSentinelAddr),
+ kPageSize,
+ PROT_NONE,
+ /* low_4g */ true,
+ /* reuse */ false,
+ /* error_msg */ nullptr));
+ if (protected_fault_page_ == nullptr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page";
+ } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
+ protected_fault_page_.reset();
+ }
+ }
+
using Opt = RuntimeArgumentMap;
VLOG(startup) << "Runtime::Init -verbose:startup enabled";
@@ -1401,27 +1425,6 @@
callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
}
- // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
- // If we cannot reserve it, log a warning.
- // Note: This is allocated last so that the heap and other things have priority, if necessary.
- {
- constexpr uintptr_t kSentinelAddr =
- RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
- protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
- reinterpret_cast<uint8_t*>(kSentinelAddr),
- kPageSize,
- PROT_NONE,
- true,
- false,
- &error_msg));
- if (protected_fault_page_ == nullptr) {
- LOG(WARNING) << "Could not reserve sentinel fault page: " << error_msg;
- } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
- LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
- protected_fault_page_.reset();
- }
- }
-
VLOG(startup) << "Runtime::Init exiting";
return true;
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
index 70c5121..e887cd3 100644
--- a/test/476-checker-ctor-memory-barrier/src/Main.java
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -261,7 +261,7 @@
/// CHECK-START: void Main.testNewString() inliner (after)
/// CHECK-NOT: ConstructorFence
- /// CHECK: InvokeStaticOrDirect method_load_kind:string_init
+ /// CHECK: InvokeStaticOrDirect method_load_kind:StringInit
/// CHECK-NOT: ConstructorFence
/// CHECK-NOT: InvokeStaticOrDirect
public static void testNewString() {
diff --git a/test/488-checker-inline-recursive-calls/src/Main.java b/test/488-checker-inline-recursive-calls/src/Main.java
index 87ff3f7..441dbbf 100644
--- a/test/488-checker-inline-recursive-calls/src/Main.java
+++ b/test/488-checker-inline-recursive-calls/src/Main.java
@@ -25,10 +25,10 @@
}
/// CHECK-START: void Main.doTopCall(boolean) inliner (before)
- /// CHECK-NOT: InvokeStaticOrDirect method_load_kind:recursive
+ /// CHECK-NOT: InvokeStaticOrDirect method_load_kind:Recursive
/// CHECK-START: void Main.doTopCall(boolean) inliner (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:recursive
+ /// CHECK: InvokeStaticOrDirect method_load_kind:Recursive
public static void doTopCall(boolean first_call) {
if (first_call) {
inline1();
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index dd77423..3f81fd6 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -42,27 +42,27 @@
}
/// CHECK-START: int Main.testSimple(int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_via_method
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCacheViaMethod
/// CHECK-START-ARM: int Main.testSimple(int) sharpening (after)
/// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-ARM64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-MIPS: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-MIPS64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-X86: int Main.testSimple(int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-X86_64: int Main.testSimple(int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-ARM: int Main.testSimple(int) dex_cache_array_fixups_arm (after)
/// CHECK: ArmDexCacheArraysBase
@@ -78,33 +78,33 @@
}
/// CHECK-START: int Main.testDiamond(boolean, int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_via_method
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCacheViaMethod
/// CHECK-START-ARM: int Main.testDiamond(boolean, int) sharpening (after)
/// CHECK-NOT: ArmDexCacheArraysBase
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-ARM64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-MIPS: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-MIPS64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-X86: int Main.testDiamond(boolean, int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-X86_64: int Main.testDiamond(boolean, int) sharpening (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-ARM: int Main.testDiamond(boolean, int) dex_cache_array_fixups_arm (after)
/// CHECK: ArmDexCacheArraysBase
@@ -148,7 +148,7 @@
/// CHECK-NEXT: X86ComputeBaseMethodAddress
/// CHECK-NEXT: Goto
/// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
/// CHECK-START-ARM: int Main.testLoop(int[], int) dex_cache_array_fixups_arm (before)
/// CHECK-NOT: ArmDexCacheArraysBase
@@ -166,7 +166,7 @@
/// CHECK-NEXT: ArmDexCacheArraysBase
/// CHECK-NEXT: Goto
/// CHECK: begin_block
- /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:DexCachePcRelative
public static int testLoop(int[] array, int x) {
// PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop.
@@ -216,33 +216,27 @@
/// CHECK-START-X86: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
/// CHECK-START-X86_64: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
/// CHECK-START-ARM: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
/// CHECK-START-ARM64: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
/// CHECK-START-MIPS: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
/// CHECK-START-MIPS64: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
+ /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry}}
public static String $noinline$getBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
@@ -285,33 +279,27 @@
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
/// CHECK-START-X86_64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
- // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry}} class_name:java.lang.String
public static Class<?> $noinline$getStringClass() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 3a2145bf..520e7c3 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -351,35 +351,6 @@
}
}
- /// CHECK-START: void Main.typeConv(byte[], byte[]) loop_optimization (before)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
- //
- // Scalar code in cleanup loop uses correct byte type on array get and type conversion.
- private static void typeConv(byte[] a, byte[] b) {
- int len = Math.min(a.length, b.length);
- for (int i = 0; i < len; i++) {
- a[i] = (byte) (b[i] + 1);
- }
- }
-
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -482,17 +453,6 @@
expectEquals(40, bt[i]);
}
- byte[] b1 = new byte[259]; // few extra iterations
- byte[] b2 = new byte[259];
- for (int i = 0; i < 259; i++) {
- b1[i] = 0;
- b2[i] = (byte) i;
- }
- typeConv(b1, b2);
- for (int i = 0; i < 259; i++) {
- expectEquals((byte)(i + 1), b1[i]);
- }
-
System.out.println("passed");
}
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
index 21d71e8..10b20b8 100644
--- a/test/640-checker-byte-simd/src/Main.java
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -135,10 +135,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -149,9 +147,9 @@
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- // TODO: would need signess flip.
- /// CHECK-START: void Main.shr2() loop_optimization (after)
- /// CHECK-NOT: VecUShr
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
index 89d4b6b..0628b36 100644
--- a/test/640-checker-char-simd/src/Main.java
+++ b/test/640-checker-char-simd/src/Main.java
@@ -134,9 +134,9 @@
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- // TODO: would need signess flip.
- /// CHECK-START: void Main.sar2() loop_optimization (after)
- /// CHECK-NOT: VecShr
+ /// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -148,10 +148,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
index 5709b5d..0d4f87a 100644
--- a/test/640-checker-double-simd/src/Main.java
+++ b/test/640-checker-double-simd/src/Main.java
@@ -122,10 +122,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(long[]) loop_optimization (after)
- /// CHECK-NOT: VecLoad
- /// CHECK-NOT: VecStore
//
- // TODO: fill in when long2double is supported
+ // TODO: fill in when supported
static void conv(long[] b) {
for (int i = 0; i < 128; i++)
a[i] = b[i];
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
index 9ee553c..97048eb 100644
--- a/test/640-checker-int-simd/src/Main.java
+++ b/test/640-checker-int-simd/src/Main.java
@@ -136,10 +136,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -151,10 +149,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
index 8f6af9d..e42c716 100644
--- a/test/640-checker-long-simd/src/Main.java
+++ b/test/640-checker-long-simd/src/Main.java
@@ -134,10 +134,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -149,10 +147,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
index f62c726..241f8e6 100644
--- a/test/640-checker-short-simd/src/Main.java
+++ b/test/640-checker-short-simd/src/Main.java
@@ -135,10 +135,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ //
+ // TODO: fill in when supported
static void sar2() {
for (int i = 0; i < 128; i++)
a[i] >>= 2;
@@ -149,9 +147,9 @@
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- // TODO: would need signess flip.
- /// CHECK-START: void Main.shr2() loop_optimization (after)
- /// CHECK-NOT: VecUShr
+ /// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
+ //
+ // TODO: fill in when supported
static void shr2() {
for (int i = 0; i < 128; i++)
a[i] >>>= 2;
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 5a63d9f..76850ab 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -22,67 +22,6 @@
private static final int SPQUIET = 1 << 22;
private static final long DPQUIET = 1L << 51;
- /// CHECK-START: void Main.doitByte(byte[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
- private static void doitByte(byte[] x) {
- for (int i = 0; i < x.length; i++) {
- x[i] = (byte) Math.abs(x[i]);
- }
- }
-
- /// CHECK-START: void Main.doitChar(char[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START: void Main.doitChar(char[]) loop_optimization (after)
- /// CHECK-NOT: VecAbs
- private static void doitChar(char[] x) {
- // Basically a nop due to zero extension.
- for (int i = 0; i < x.length; i++) {
- x[i] = (char) Math.abs(x[i]);
- }
- }
-
- /// CHECK-START: void Main.doitShort(short[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
- private static void doitShort(short[] x) {
- for (int i = 0; i < x.length; i++) {
- x[i] = (short) Math.abs(x[i]);
- }
- }
-
/// CHECK-START: void Main.doitInt(int[]) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
@@ -113,16 +52,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ // TODO: Not supported yet.
private static void doitLong(long[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -159,16 +90,8 @@
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ // TODO: Not supported yet.
private static void doitDouble(double[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = Math.abs(x[i]);
@@ -176,31 +99,6 @@
}
public static void main(String[] args) {
- // Bytes, chars, shorts.
- byte[] xb = new byte[256];
- for (int i = 0; i < 256; i++) {
- xb[i] = (byte) i;
- }
- doitByte(xb);
- for (int i = 0; i < 256; i++) {
- expectEquals32((byte) Math.abs((byte) i), xb[i]);
- }
- char[] xc = new char[1024 * 64];
- for (int i = 0; i < 1024 * 64; i++) {
- xc[i] = (char) i;
- }
- doitChar(xc);
- for (int i = 0; i < 1024 *64; i++) {
- expectEquals32((char) Math.abs((char) i), xc[i]);
- }
- short[] xs = new short[1024 * 64];
- for (int i = 0; i < 1024 * 64; i++) {
- xs[i] = (short) i;
- }
- doitShort(xs);
- for (int i = 0; i < 1024 * 64; i++) {
- expectEquals32((short) Math.abs((short) i), xs[i]);
- }
// Set up minint32, maxint32 and some others.
int[] xi = new int[8];
xi[0] = 0x80000000;
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index fe45807..8211ace 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -27,12 +27,9 @@
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -40,30 +37,6 @@
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
- private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
- int min = Math.min(x.length, Math.min(y.length, z.length));
- for (int i = 0; i < min; i++) {
- x[i] = (byte) Math.min(y[i] & 0xff, z[i] & 0xff);
- }
- }
-
/// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -72,12 +45,9 @@
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -85,30 +55,6 @@
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
- private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
- int min = Math.min(x.length, Math.min(y.length, z.length));
- for (int i = 0; i < min; i++) {
- x[i] = (byte) Math.max(y[i] & 0xff, z[i] & 0xff);
- }
- }
-
public static void main(String[] args) {
// Initialize cross-values for all possible values.
int total = 256 * 256;
@@ -131,21 +77,11 @@
byte expected = (byte) Math.min(y[i], z[i]);
expectEquals(expected, x[i]);
}
- doitMinUnsigned(x, y, z);
- for (int i = 0; i < total; i++) {
- byte expected = (byte) Math.min(y[i] & 0xff, z[i] & 0xff);
- expectEquals(expected, x[i]);
- }
doitMax(x, y, z);
for (int i = 0; i < total; i++) {
byte expected = (byte) Math.max(y[i], z[i]);
expectEquals(expected, x[i]);
}
- doitMaxUnsigned(x, y, z);
- for (int i = 0; i < total; i++) {
- byte expected = (byte) Math.max(y[i] & 0xff, z[i] & 0xff);
- expectEquals(expected, x[i]);
- }
System.out.println("passed");
}
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index e2998da..5ce7b94 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -27,12 +27,9 @@
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -48,12 +45,9 @@
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index cf04f85..e1711ae 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -48,7 +48,7 @@
/// CHECK-DAG: <<Max:d\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxDoubleDouble loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
- // TODO x86: 0.0 vs -0.0?
+ // TODO-x86: 0.0 vs -0.0?
//
/// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 6cee7b5..4e05a9d 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -30,7 +30,7 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -50,7 +50,7 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index 7cbadaf..f34f526 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -27,12 +27,9 @@
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -40,30 +37,6 @@
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
- /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
- private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
- int min = Math.min(x.length, Math.min(y.length, z.length));
- for (int i = 0; i < min; i++) {
- x[i] = (short) Math.min(y[i] & 0xffff, z[i] & 0xffff);
- }
- }
-
/// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -72,12 +45,9 @@
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -85,30 +55,6 @@
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
- /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
- private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
- int min = Math.min(x.length, Math.min(y.length, z.length));
- for (int i = 0; i < min; i++) {
- x[i] = (short) Math.max(y[i] & 0xffff, z[i] & 0xffff);
- }
- }
-
public static void main(String[] args) {
short[] interesting = {
(short) 0x0000, (short) 0x0001, (short) 0x007f,
@@ -145,21 +91,11 @@
short expected = (short) Math.min(y[i], z[i]);
expectEquals(expected, x[i]);
}
- doitMinUnsigned(x, y, z);
- for (int i = 0; i < total; i++) {
- short expected = (short) Math.min(y[i] & 0xffff, z[i] & 0xffff);
- expectEquals(expected, x[i]);
- }
doitMax(x, y, z);
for (int i = 0; i < total; i++) {
short expected = (short) Math.max(y[i], z[i]);
expectEquals(expected, x[i]);
}
- doitMaxUnsigned(x, y, z);
- for (int i = 0; i < total; i++) {
- short expected = (short) Math.max(y[i] & 0xffff, z[i] & 0xffff);
- expectEquals(expected, x[i]);
- }
System.out.println("passed");
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index a89fe5b..ca52a99 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -668,12 +668,6 @@
# Note: this is required as envsetup right now exports detect_leaks=0.
RUN_TEST_ASAN_OPTIONS=""
-# JVMTI has a mismatch of malloc with delete. b/38322765
-if [ "x$RUN_TEST_ASAN_OPTIONS" != "x" ] ; then
- RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}:"
-fi
-RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}alloc_dealloc_mismatch=0"
-
# Multiple shutdown leaks. b/38341789
if [ "x$RUN_TEST_ASAN_OPTIONS" != "x" ] ; then
RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}:"
diff --git a/tools/ahat/src/heapdump/AhatClassInstance.java b/tools/ahat/src/heapdump/AhatClassInstance.java
index 273530a..c10d604 100644
--- a/tools/ahat/src/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/heapdump/AhatClassInstance.java
@@ -154,10 +154,7 @@
}
@Override public AhatInstance getAssociatedBitmapInstance() {
- if (isInstanceOfClass("android.graphics.Bitmap")) {
- return this;
- }
- return null;
+ return getBitmapInfo() == null ? null : this;
}
@Override public boolean isClassInstance() {
@@ -178,14 +175,27 @@
* Returns null if the field value is null, not a byte[] or could not be read.
*/
private byte[] getByteArrayField(String fieldName) {
- Value value = getField(fieldName);
- if (!value.isAhatInstance()) {
- return null;
- }
- return value.asAhatInstance().asByteArray();
+ AhatInstance field = getRefField(fieldName);
+ return field == null ? null : field.asByteArray();
}
- public BufferedImage asBitmap() {
+ private static class BitmapInfo {
+ public final int width;
+ public final int height;
+ public final byte[] buffer;
+
+ public BitmapInfo(int width, int height, byte[] buffer) {
+ this.width = width;
+ this.height = height;
+ this.buffer = buffer;
+ }
+ }
+
+ /**
+ * Return bitmap info for this object, or null if no appropriate bitmap
+ * info is available.
+ */
+ private BitmapInfo getBitmapInfo() {
if (!isInstanceOfClass("android.graphics.Bitmap")) {
return null;
}
@@ -205,20 +215,34 @@
return null;
}
+ if (buffer.length < 4 * height * width) {
+ return null;
+ }
+
+ return new BitmapInfo(width, height, buffer);
+
+ }
+
+ public BufferedImage asBitmap() {
+ BitmapInfo info = getBitmapInfo();
+ if (info == null) {
+ return null;
+ }
+
// Convert the raw data to an image
// Convert BGRA to ABGR
- int[] abgr = new int[height * width];
+ int[] abgr = new int[info.height * info.width];
for (int i = 0; i < abgr.length; i++) {
abgr[i] = (
- (((int) buffer[i * 4 + 3] & 0xFF) << 24)
- + (((int) buffer[i * 4 + 0] & 0xFF) << 16)
- + (((int) buffer[i * 4 + 1] & 0xFF) << 8)
- + ((int) buffer[i * 4 + 2] & 0xFF));
+ (((int) info.buffer[i * 4 + 3] & 0xFF) << 24)
+ + (((int) info.buffer[i * 4 + 0] & 0xFF) << 16)
+ + (((int) info.buffer[i * 4 + 1] & 0xFF) << 8)
+ + ((int) info.buffer[i * 4 + 2] & 0xFF));
}
BufferedImage bitmap = new BufferedImage(
- width, height, BufferedImage.TYPE_4BYTE_ABGR);
- bitmap.setRGB(0, 0, width, height, abgr, 0, width);
+ info.width, info.height, BufferedImage.TYPE_4BYTE_ABGR);
+ bitmap.setRGB(0, 0, info.width, info.height, abgr, 0, info.width);
return bitmap;
}
}