Merge "Thread-local mark stacks for the CC collector."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 77f39c4..ee0cb09 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -40,7 +40,7 @@
ART_BUILD_HOST_STATIC := false
endif
-ifneq ($$(HOST_OS),linux)
+ifneq ($(HOST_OS),linux)
ART_BUILD_HOST_STATIC := false
endif
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 183f4e3..a561c5f 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -88,4 +88,8 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+
+# Classpath for Jack compilation: we only need core-libart.
+HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
+TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a1d8226..a122ceb 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -71,18 +71,18 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
- false,
+ /* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
- false, // TODO: Think about debuggability of JIT-compiled code.
+ Runtime::Current()->IsDebuggable(),
CompilerOptions::kDefaultGenerateDebugInfo,
- false,
- false,
- false,
- false, // pic
- nullptr,
+ /* implicit_null_checks */ true,
+ /* implicit_so_checks */ true,
+ /* implicit_suspend_checks */ false,
+ /* pic */ true, // TODO: Support non-PIC in optimizing.
+ /* verbose_methods */ nullptr,
pass_manager_options,
- nullptr,
- false));
+ /* init_failure_output */ nullptr,
+ /* abort_on_hard_verifier_failure */ false));
const InstructionSet instruction_set = kRuntimeISA;
instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
cumulative_logger_.reset(new CumulativeLogger("jit times"));
@@ -92,10 +92,23 @@
method_inliner_map_.get(),
CompilerCallbacks::CallbackMode::kCompileApp));
compiler_driver_.reset(new CompilerDriver(
- compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
- Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
- nullptr, nullptr, nullptr, 1, false, true,
- std::string(), cumulative_logger_.get(), -1, std::string()));
+ compiler_options_.get(),
+ verification_results_.get(),
+ method_inliner_map_.get(),
+ Compiler::kOptimizing,
+ instruction_set,
+ instruction_set_features_.get(),
+ /* image */ false,
+ /* image_classes */ nullptr,
+ /* compiled_classes */ nullptr,
+ /* compiled_methods */ nullptr,
+ /* thread_count */ 1,
+ /* dump_stats */ false,
+ /* dump_passes */ false,
+ /* dump_cfg_file_name */ "",
+ cumulative_logger_.get(),
+ /* swap_fd */ -1,
+ /* profile_file */ ""));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
@@ -195,9 +208,14 @@
std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
// After we are done writing we need to update the method header.
// Write out the method header last.
- method_header = new(method_header)OatQuickMethodHeader(
- code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes,
- core_spill_mask, fp_spill_mask, code_size);
+ method_header = new(method_header) OatQuickMethodHeader(
+ (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
+ (gc_map == nullptr) ? 0 : code_ptr - gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
// Return the code ptr.
return code_ptr;
}
@@ -216,23 +234,35 @@
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
- CHECK(gc_map != nullptr) << PrettyMethod(method);
- // Write out pre-header stuff.
- uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
- self, mapping_table->data(), mapping_table->data() + mapping_table->size());
- if (mapping_table_ptr == nullptr) {
- return false; // Out of data cache.
+ uint8_t* mapping_table_ptr = nullptr;
+ uint8_t* vmap_table_ptr = nullptr;
+ uint8_t* gc_map_ptr = nullptr;
+
+ if (mapping_table != nullptr) {
+ // Write out pre-header stuff.
+ mapping_table_ptr = code_cache->AddDataArray(
+ self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+ if (mapping_table_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
- uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
- self, vmap_table->data(), vmap_table->data() + vmap_table->size());
- if (vmap_table_ptr == nullptr) {
- return false; // Out of data cache.
+
+ if (vmap_table != nullptr) {
+ vmap_table_ptr = code_cache->AddDataArray(
+ self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+ if (vmap_table_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
- uint8_t* const gc_map_ptr = code_cache->AddDataArray(
- self, gc_map->data(), gc_map->data() + gc_map->size());
- if (gc_map_ptr == nullptr) {
- return false; // Out of data cache.
+
+ if (gc_map != nullptr) {
+ gc_map_ptr = code_cache->AddDataArray(
+ self, gc_map->data(), gc_map->data() + gc_map->size());
+ if (gc_map_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
+
// Don't touch this until you protect / unprotect the code.
const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index bb1b40a..1319f2c 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -333,18 +333,41 @@
return;
}
- const size_t num_blocks = graph_->GetBlocks().Size();
- ArenaBitVector can_block_throw(arena_, num_blocks, false);
+ // Bit vector stores information on which blocks contain throwing instructions.
+ // Must be expandable because catch blocks may be split into two.
+ ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().Size(), /* expandable */ true);
// Scan blocks and mark those which contain throwing instructions.
- for (size_t block_id = 0; block_id < num_blocks; ++block_id) {
+ for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
HBasicBlock* block = graph_->GetBlocks().Get(block_id);
+ bool can_throw = false;
for (HInstructionIterator insn(block->GetInstructions()); !insn.Done(); insn.Advance()) {
if (insn.Current()->CanThrow()) {
- can_block_throw.SetBit(block_id);
+ can_throw = true;
break;
}
}
+
+ if (can_throw) {
+ if (block->IsCatchBlock()) {
+ // Catch blocks are always considered an entry point into the TryItem in
+ // order to avoid splitting exceptional edges. We split the block after
+ // the move-exception (if present) and mark the first part non-throwing.
+ // Later on, a TryBoundary will be inserted between the two blocks.
+ HInstruction* first_insn = block->GetFirstInstruction();
+ if (first_insn->IsLoadException()) {
+ // Catch block starts with a LoadException. Split the block after the
+ // StoreLocal that must come after the load.
+ DCHECK(first_insn->GetNext()->IsStoreLocal());
+ block = block->SplitBefore(first_insn->GetNext()->GetNext());
+ } else {
+ // Catch block does not load the exception. Split at the beginning to
+ // create an empty catch block.
+ block = block->SplitBefore(first_insn);
+ }
+ }
+ can_block_throw.SetBit(block->GetBlockId());
+ }
}
// Iterate over all blocks, find those covered by some TryItem and:
@@ -353,7 +376,7 @@
// (c) link the new blocks to corresponding exception handlers.
// We cannot iterate only over blocks in `branch_targets_` because switch-case
// blocks share the same dex_pc.
- for (size_t block_id = 0; block_id < num_blocks; ++block_id) {
+ for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
HBasicBlock* try_block = graph_->GetBlocks().Get(block_id);
// TryBoundary blocks are added at the end of the list and not iterated over.
@@ -365,57 +388,35 @@
continue;
}
- if (try_block->IsCatchBlock()) {
- // Catch blocks are always considered an entry point into the TryItem in
- // order to avoid splitting exceptional edges (they might not have been
- // created yet). We separate the move-exception (if present) from the
- // rest of the block and insert a TryBoundary after it, creating a
- // landing pad for the exceptional edges.
- HInstruction* first_insn = try_block->GetFirstInstruction();
- HInstruction* split_position = nullptr;
- if (first_insn->IsLoadException()) {
- // Catch block starts with a LoadException. Split the block after the
- // StoreLocal that must come after the load.
- DCHECK(first_insn->GetNext()->IsStoreLocal());
- split_position = first_insn->GetNext()->GetNext();
- } else {
- // Catch block does not obtain the exception. Split at the beginning
- // to create an empty catch block.
- split_position = first_insn;
- }
- DCHECK(split_position != nullptr);
- HBasicBlock* catch_block = try_block;
- try_block = catch_block->SplitBefore(split_position);
- SplitTryBoundaryEdge(catch_block, try_block, HTryBoundary::kEntry, code_item, *try_item);
- } else {
- // For non-catch blocks, find predecessors which are not covered by the
- // same TryItem range. Such edges enter the try block and will have
- // a TryBoundary inserted.
- for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) {
- HBasicBlock* predecessor = try_block->GetPredecessors().Get(i);
- if (predecessor->IsSingleTryBoundary()) {
- // The edge was already split because of an exit from a neighbouring
- // TryItem. We split it again and insert an entry point.
- if (kIsDebugBuild) {
- HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary();
- const DexFile::TryItem* predecessor_try_item =
- GetTryItem(predecessor->GetSinglePredecessor(), code_item, can_block_throw);
- DCHECK(!last_insn->IsEntry());
- DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block);
- DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i));
- DCHECK_NE(try_item, predecessor_try_item);
- }
- } else if (GetTryItem(predecessor, code_item, can_block_throw) != try_item) {
- // This is an entry point into the TryItem and the edge has not been
- // split yet. That means that `predecessor` is not in a TryItem, or
- // it is in a different TryItem and we happened to iterate over this
- // block first. We split the edge and insert an entry point.
- } else {
- // Not an edge on the boundary of the try block.
- continue;
+ // Catch blocks were split earlier and cannot throw.
+ DCHECK(!try_block->IsCatchBlock());
+
+ // Find predecessors which are not covered by the same TryItem range. Such
+ // edges enter the try block and will have a TryBoundary inserted.
+ for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) {
+ HBasicBlock* predecessor = try_block->GetPredecessors().Get(i);
+ if (predecessor->IsSingleTryBoundary()) {
+ // The edge was already split because of an exit from a neighbouring
+ // TryItem. We split it again and insert an entry point.
+ if (kIsDebugBuild) {
+ HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary();
+ const DexFile::TryItem* predecessor_try_item =
+ GetTryItem(predecessor->GetSinglePredecessor(), code_item, can_block_throw);
+ DCHECK(!last_insn->IsEntry());
+ DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block);
+ DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i));
+ DCHECK_NE(try_item, predecessor_try_item);
}
- SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item);
+ } else if (GetTryItem(predecessor, code_item, can_block_throw) != try_item) {
+ // This is an entry point into the TryItem and the edge has not been
+ // split yet. That means that `predecessor` is not in a TryItem, or
+ // it is in a different TryItem and we happened to iterate over this
+ // block first. We split the edge and insert an entry point.
+ } else {
+ // Not an edge on the boundary of the try block.
+ continue;
}
+ SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item);
}
// Find successors which are not covered by the same TryItem range. Such
@@ -755,6 +756,35 @@
current_block_ = nullptr;
}
+void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register,
+ uint32_t dex_pc,
+ HInvoke* actual_string) {
+ if (!graph_->IsDebuggable()) {
+ // Notify that we cannot compile with baseline. The dex registers aliasing
+ // with `original_dex_register` will be handled when we optimize
+ // (see HInstructionSimplifer::VisitFakeString).
+ can_use_baseline_for_string_init_ = false;
+ return;
+ }
+ const VerifiedMethod* verified_method =
+ compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
+ if (verified_method != nullptr) {
+ UpdateLocal(original_dex_register, actual_string);
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(dex_pc);
+ if (map_it != string_init_map.end()) {
+ std::set<uint32_t> reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot);
+ UpdateLocal(*set_it, load_local);
+ }
+ }
+ } else {
+ can_use_baseline_for_string_init_ = false;
+ }
+}
+
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
uint32_t dex_pc,
uint32_t method_idx,
@@ -996,34 +1026,23 @@
if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) {
// Add the class initialization check as last input of `invoke`.
DCHECK(clinit_check != nullptr);
+ DCHECK(!is_string_init);
invoke->SetArgumentAt(argument_index, clinit_check);
+ argument_index++;
}
- current_block_->AddInstruction(invoke);
- latest_result_ = invoke;
-
// Add move-result for StringFactory method.
if (is_string_init) {
uint32_t orig_this_reg = is_range ? register_index : args[0];
- UpdateLocal(orig_this_reg, invoke);
- const VerifiedMethod* verified_method =
- compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
- if (verified_method == nullptr) {
- LOG(WARNING) << "No verified method for method calling String.<init>: "
- << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_);
- return false;
- }
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
- verified_method->GetStringInitPcRegMap();
- auto map_it = string_init_map.find(dex_pc);
- if (map_it != string_init_map.end()) {
- std::set<uint32_t> reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- HInstruction* load_local = LoadLocal(orig_this_reg, Primitive::kPrimNot);
- UpdateLocal(*set_it, load_local);
- }
- }
+ HInstruction* fake_string = LoadLocal(orig_this_reg, Primitive::kPrimNot);
+ invoke->SetArgumentAt(argument_index, fake_string);
+ current_block_->AddInstruction(invoke);
+ PotentiallySimplifyFakeString(orig_this_reg, dex_pc, invoke);
+ } else {
+ current_block_->AddInstruction(invoke);
}
+ latest_result_ = invoke;
+
return true;
}
@@ -2238,10 +2257,10 @@
case Instruction::NEW_INSTANCE: {
uint16_t type_index = instruction.VRegB_21c();
if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) {
- // Turn new-instance of string into a const 0.
int32_t register_index = instruction.VRegA();
- HNullConstant* constant = graph_->GetNullConstant();
- UpdateLocal(register_index, constant);
+ HFakeString* fake_string = new (arena_) HFakeString();
+ current_block_->AddInstruction(fake_string);
+ UpdateLocal(register_index, fake_string);
} else {
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocObjectWithAccessCheck
@@ -2328,27 +2347,27 @@
}
case Instruction::CMP_LONG: {
- Binop_23x_cmp(instruction, Primitive::kPrimLong, kNoBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimLong, ComparisonBias::kNoBias, dex_pc);
break;
}
case Instruction::CMPG_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, kGtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kGtBias, dex_pc);
break;
}
case Instruction::CMPG_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, kGtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kGtBias, dex_pc);
break;
}
case Instruction::CMPL_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, kLtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kLtBias, dex_pc);
break;
}
case Instruction::CMPL_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, kLtBias, dex_pc);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kLtBias, dex_pc);
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 7098eb8..76610f5 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,6 +54,7 @@
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
latest_result_(nullptr),
+ can_use_baseline_for_string_init_(true),
compilation_stats_(compiler_stats) {}
// Only for unit testing.
@@ -72,10 +73,15 @@
return_type_(return_type),
code_start_(nullptr),
latest_result_(nullptr),
+ can_use_baseline_for_string_init_(true),
compilation_stats_(nullptr) {}
bool BuildGraph(const DexFile::CodeItem& code);
+ bool CanUseBaselineForStringInit() const {
+ return can_use_baseline_for_string_init_;
+ }
+
static constexpr const char* kBuilderPassName = "builder";
private:
@@ -251,6 +257,10 @@
// Returns whether `type_index` points to the outer-most compiling method's class.
bool IsOutermostCompilingClass(uint16_t type_index) const;
+ void PotentiallySimplifyFakeString(uint16_t original_dex_register,
+ uint32_t dex_pc,
+ HInvoke* invoke);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -290,6 +300,11 @@
// used by move-result instructions.
HInstruction* latest_result_;
+ // We need to know whether we have built a graph that has calls to StringFactory
+ // and hasn't gone through the verifier. If the following flag is `false`, then
+ // we cannot compile with baseline.
+ bool can_use_baseline_for_string_init_;
+
OptimizingCompilerStats* compilation_stats_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e3683ef..75b8f06 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -334,7 +334,7 @@
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
-inline Condition ARMCondition(IfCondition cond) {
+inline Condition ARMSignedOrFPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
@@ -342,24 +342,22 @@
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return EQ; // Unreachable.
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
-inline Condition ARMOppositeCondition(IfCondition cond) {
+inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
- case kCondEQ: return NE;
- case kCondNE: return EQ;
- case kCondLT: return GE;
- case kCondLE: return GT;
- case kCondGT: return LE;
- case kCondGE: return LT;
- default:
- LOG(FATAL) << "Unknown if condition";
+ case kCondEQ: return EQ;
+ case kCondNE: return NE;
+ case kCondLT: return LO;
+ case kCondLE: return LS;
+ case kCondGT: return HI;
+ case kCondGE: return HS;
}
- return EQ; // Unreachable.
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1008,6 +1006,142 @@
UNUSED(exit);
}
+void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
+ ShifterOperand operand;
+ if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
+ __ cmp(left, operand);
+ } else {
+ Register temp = IP;
+ __ LoadImmediate(temp, right);
+ __ cmp(left, ShifterOperand(temp));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
+ Label* true_label,
+ Label* false_label) {
+ __ vmstat(); // transfer FP status register to ARM APSR.
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ b(true_label, VS); // VS for unordered.
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ b(false_label, VS); // VS for unordered.
+ }
+ __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
+}
+
+void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
+ Label* true_label,
+ Label* false_label) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ Condition final_condition = ARMUnsignedCondition(if_cond);
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ GenerateCompareWithImmediate(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ GenerateCompareWithImmediate(left_low, val_low);
+ } else {
+ Register right_high = right.AsRegisterPairHigh<Register>();
+ Register right_low = right.AsRegisterPairLow<Register>();
+
+ __ cmp(left_high, ShifterOperand(right_high));
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ __ cmp(left_low, ShifterOperand(right_low));
+ }
+ // The last comparison might be unsigned.
+ __ b(true_label, final_condition);
+}
+
+void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
+ HCondition* condition,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target) {
+ LocationSummary* locations = condition->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+
+ // We don't want true_target as a nullptr.
+ if (true_target == nullptr) {
+ true_target = always_true_target;
+ }
+ bool falls_through = (false_target == nullptr);
+
+ // FP compares don't like null false_targets.
+ if (false_target == nullptr) {
+ false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ }
+
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimFloat:
+ __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimDouble:
+ __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected compare type " << type;
+ }
+
+ if (!falls_through) {
+ __ b(false_target);
+ }
+}
+
void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
Label* true_target,
Label* false_target,
@@ -1033,25 +1167,27 @@
} else {
// Condition has not been materialized, use its inputs as the
// comparison and its condition as the branch condition.
+ Primitive::Type type =
+ cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
+ // Is this a long or FP comparison that has been folded into the HCondition?
+ if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
+ // Generate the comparison directly.
+ GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
+ true_target, false_target, always_true_target);
+ return;
+ }
+
LocationSummary* locations = cond->GetLocations();
DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
Register left = locations->InAt(0).AsRegister<Register>();
- if (locations->InAt(1).IsRegister()) {
- __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
+ Location right = locations->InAt(1);
+ if (right.IsRegister()) {
+ __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
- DCHECK(locations->InAt(1).IsConstant());
- HConstant* constant = locations->InAt(1).GetConstant();
- int32_t value = CodeGenerator::GetInt32ValueOf(constant);
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, value);
- __ cmp(left, ShifterOperand(temp));
- }
+ DCHECK(right.IsConstant());
+ GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
+ __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
}
}
if (false_target != nullptr) {
@@ -1104,37 +1240,88 @@
void LocationsBuilderARM::VisitCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (cond->NeedsMaterialization()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // Handle the long/FP comparisons made in instruction simplification.
+ switch (cond->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+
+ default:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (cond->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
}
}
void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
- if (!cond->NeedsMaterialization()) return;
- LocationSummary* locations = cond->GetLocations();
- Register left = locations->InAt(0).AsRegister<Register>();
-
- if (locations->InAt(1).IsRegister()) {
- __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
- } else {
- DCHECK(locations->InAt(1).IsConstant());
- int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, value);
- __ cmp(left, ShifterOperand(temp));
- }
+ if (!cond->NeedsMaterialization()) {
+ return;
}
- __ it(ARMCondition(cond->GetCondition()), kItElse);
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMCondition(cond->GetCondition()));
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMOppositeCondition(cond->GetCondition()));
+
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ Register out = locations->Out().AsRegister<Register>();
+ Label true_label, false_label;
+
+ switch (cond->InputAt(0)->GetType()) {
+ default: {
+ // Integer case.
+ if (right.IsRegister()) {
+ __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
+ } else {
+ DCHECK(right.IsConstant());
+ GenerateCompareWithImmediate(left.AsRegister<Register>(),
+ CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
+ ARMSignedOrFPCondition(cond->GetCondition()));
+ __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
+ ARMSignedOrFPCondition(cond->GetOppositeCondition()));
+ return;
+ }
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimFloat:
+ __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimDouble:
+ __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ }
+
+ // Convert the jumps into the result.
+ Label done_label;
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ LoadImmediate(out, 0);
+ __ b(&done_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ LoadImmediate(out, 1);
+ __ Bind(&done_label);
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
@@ -2913,7 +3100,7 @@
ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
- // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
+ // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
__ LoadImmediate(out, 0);
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
@@ -2936,7 +3123,7 @@
LOG(FATAL) << "Unexpected compare type " << type;
}
__ b(&done, EQ);
- __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats.
+ __ b(&less, LO); // LO is for both: unsigned compare for longs and 'less than' for floats.
__ Bind(&greater);
__ LoadImmediate(out, 1);
@@ -3710,7 +3897,7 @@
Register length = locations->InAt(1).AsRegister<Register>();
__ cmp(index, ShifterOperand(length));
- __ b(slow_path->GetEntryLabel(), CS);
+ __ b(slow_path->GetEntryLabel(), HS);
}
void CodeGeneratorARM::MarkGCCard(Register temp,
@@ -4365,6 +4552,18 @@
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 1d10293..53bd766 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -207,6 +207,14 @@
Label* true_target,
Label* false_target,
Label* always_true_target);
+ void GenerateCompareWithImmediate(Register left, int32_t right);
+ void GenerateCompareTestAndBranch(HIf* if_instr,
+ HCondition* condition,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target);
+ void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
+ void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f64e801..069c9e1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -657,6 +657,13 @@
Primitive::Type type = instruction->GetType();
DCHECK_NE(type, Primitive::kPrimVoid);
+ if (instruction->IsFakeString()) {
+ // The fake string is an alias for null.
+ DCHECK(IsBaseline());
+ instruction = locations->Out().GetConstant();
+ DCHECK(instruction->IsNullConstant()) << instruction->DebugName();
+ }
+
if (instruction->IsCurrentMethod()) {
MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
} else if (locations != nullptr && locations->Out().Equals(location)) {
@@ -905,7 +912,7 @@
(source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
__ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
- DCHECK(unspecified_type || CoherentConstantAndType(source, type));
+ DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
@@ -3039,6 +3046,18 @@
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderARM64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorARM64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index aa4fd26..e7d2ec6 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3292,5 +3292,17 @@
VisitCondition(comp);
}
+void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
} // namespace mips64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index be71443..e15eff9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -344,7 +344,7 @@
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
-inline Condition X86Condition(IfCondition cond) {
+inline Condition X86SignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -352,10 +352,22 @@
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return kEqual;
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline Condition X86UnsignedOrFPCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return kEqual;
+ case kCondNE: return kNotEqual;
+ case kCondLT: return kBelow;
+ case kCondLE: return kBelowEqual;
+ case kCondGT: return kAbove;
+ case kCondGE: return kAboveEqual;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -892,46 +904,12 @@
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
- bool gt_bias = cond->IsGtBias();
- IfCondition if_cond = cond->GetCondition();
- Condition ccode = X86Condition(if_cond);
- switch (if_cond) {
- case kCondEQ:
- if (!gt_bias) {
- __ j(kParityEven, false_label);
- }
- break;
- case kCondNE:
- if (!gt_bias) {
- __ j(kParityEven, true_label);
- }
- break;
- case kCondLT:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelow;
- break;
- case kCondLE:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelowEqual;
- break;
- case kCondGT:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAbove;
- break;
- case kCondGE:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAboveEqual;
- break;
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ j(kUnordered, true_label);
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ j(kUnordered, false_label);
}
- __ j(ccode, true_label);
+ __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label);
}
void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
@@ -942,43 +920,37 @@
Location right = locations->InAt(1);
IfCondition if_cond = cond->GetCondition();
- Register left_low = left.AsRegisterPairLow<Register>();
Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = X86Condition(if_cond);
+ Condition final_condition = X86UnsignedOrFPCondition(if_cond);
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
switch (if_cond) {
case kCondEQ:
- false_high_cond = kCondNE;
- break;
case kCondNE:
- false_high_cond = kCondEQ;
+ // Nothing to do.
break;
case kCondLT:
false_high_cond = kCondGT;
- final_condition = kBelow;
break;
case kCondLE:
true_high_cond = kCondLT;
- final_condition = kBelowEqual;
break;
case kCondGT:
false_high_cond = kCondLT;
- final_condition = kAbove;
break;
case kCondGE:
true_high_cond = kCondGT;
- final_condition = kAboveEqual;
break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
+ int32_t val_low = Low32Bits(value);
if (val_high == 0) {
__ testl(left_high, left_high);
@@ -986,12 +958,12 @@
__ cmpl(left_high, Immediate(val_high));
}
if (if_cond == kCondNE) {
- __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
} else {
- __ j(X86Condition(true_high_cond), true_label);
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
if (val_low == 0) {
@@ -1000,17 +972,17 @@
__ cmpl(left_low, Immediate(val_low));
}
} else {
- Register right_low = right.AsRegisterPairLow<Register>();
Register right_high = right.AsRegisterPairHigh<Register>();
+ Register right_low = right.AsRegisterPairLow<Register>();
__ cmpl(left_high, right_high);
if (if_cond == kCondNE) {
- __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
} else {
- __ j(X86Condition(true_high_cond), true_label);
- __ j(X86Condition(false_high_cond), false_label);
+ __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86SignedCondition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
__ cmpl(left_low, right_low);
@@ -1045,12 +1017,10 @@
GenerateLongComparesAndJumps(condition, true_target, false_target);
break;
case Primitive::kPrimFloat:
- DCHECK(right.IsFpuRegister());
__ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
GenerateFPJumps(condition, true_target, false_target);
break;
case Primitive::kPrimDouble:
- DCHECK(right.IsFpuRegister());
__ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
GenerateFPJumps(condition, true_target, false_target);
break;
@@ -1080,7 +1050,7 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- bool materialized =
+ bool is_materialized =
!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
@@ -1089,8 +1059,8 @@
Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
bool eflags_set = cond->IsCondition()
&& cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
- && type == Primitive::kPrimInt;
- if (materialized) {
+ && (type != Primitive::kPrimLong && !Primitive::IsFloatingPointType(type));
+ if (is_materialized) {
if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = instruction->GetLocations()->InAt(0);
@@ -1101,9 +1071,12 @@
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
+ // Condition has not been materialized, use its inputs as the
+ // comparison and its condition as the branch condition.
+
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
// Generate the comparison directly.
@@ -1114,6 +1087,7 @@
always_true_target);
return;
}
+
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see
@@ -1130,7 +1104,7 @@
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1288,7 +1262,7 @@
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86Condition(cond->GetCondition()), reg);
+ __ setb(X86SignedCondition(cond->GetCondition()), reg);
return;
}
case Primitive::kPrimLong:
@@ -1307,12 +1281,12 @@
// Convert the jumps into the result.
Label done_label;
- // false case: result = 0;
+ // False case: result = 0.
__ Bind(&false_label);
__ xorl(reg, reg);
__ jmp(&done_label);
- // True case: result = 1
+ // True case: result = 1.
__ Bind(&true_label);
__ movl(reg, Immediate(1));
__ Bind(&done_label);
@@ -4990,6 +4964,18 @@
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderX86::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorX86::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ddaa60d..a95ce68 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -363,7 +363,7 @@
#undef __
#define __ down_cast<X86_64Assembler*>(GetAssembler())->
-inline Condition X86_64Condition(IfCondition cond) {
+inline Condition X86_64IntegerCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -371,10 +371,22 @@
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
- default:
- LOG(FATAL) << "Unknown if condition";
}
- return kEqual;
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline Condition X86_64FPCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return kEqual;
+ case kCondNE: return kNotEqual;
+ case kCondLT: return kBelow;
+ case kCondLE: return kBelowEqual;
+ case kCondGT: return kAbove;
+ case kCondGE: return kAboveEqual;
+ };
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
@@ -836,46 +848,12 @@
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
- bool gt_bias = cond->IsGtBias();
- IfCondition if_cond = cond->GetCondition();
- Condition ccode = X86_64Condition(if_cond);
- switch (if_cond) {
- case kCondEQ:
- if (!gt_bias) {
- __ j(kParityEven, false_label);
- }
- break;
- case kCondNE:
- if (!gt_bias) {
- __ j(kParityEven, true_label);
- }
- break;
- case kCondLT:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelow;
- break;
- case kCondLE:
- if (gt_bias) {
- __ j(kParityEven, false_label);
- }
- ccode = kBelowEqual;
- break;
- case kCondGT:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAbove;
- break;
- case kCondGE:
- if (gt_bias) {
- __ j(kParityEven, true_label);
- }
- ccode = kAboveEqual;
- break;
+ if (cond->IsFPConditionTrueIfNaN()) {
+ __ j(kUnordered, true_label);
+ } else if (cond->IsFPConditionFalseIfNaN()) {
+ __ j(kUnordered, false_label);
}
- __ j(ccode, true_label);
+ __ j(X86_64FPCondition(cond->GetCondition()), true_label);
}
void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HIf* if_instr,
@@ -911,7 +889,7 @@
__ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
}
} else {
- // Value won't fit in an 32-bit integer.
+ // Value won't fit in a 32-bit integer.
__ cmpq(left_reg, codegen_->LiteralInt64Address(value));
}
} else if (right.IsDoubleStackSlot()) {
@@ -919,7 +897,7 @@
} else {
__ cmpq(left_reg, right.AsRegister<CpuRegister>());
}
- __ j(X86_64Condition(condition->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
break;
}
case Primitive::kPrimFloat: {
@@ -978,7 +956,7 @@
DCHECK_EQ(cond_value, 0);
}
} else {
- bool materialized =
+ bool is_materialized =
!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
// Moves do not affect the eflags register, so if the condition is
// evaluated just before the if, we don't need to evaluate it
@@ -989,7 +967,7 @@
&& cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction)
&& !Primitive::IsFloatingPointType(type);
- if (materialized) {
+ if (is_materialized) {
if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = instruction->GetLocations()->InAt(0);
@@ -1001,16 +979,20 @@
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
+ // Condition has not been materialized, use its inputs as the
+ // comparison and its condition as the branch condition.
+
// Is this a long or FP comparison that has been folded into the HCondition?
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
- // Generate the comparison directly
+ // Generate the comparison directly.
GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
true_target, false_target, always_true_target);
return;
}
+
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
if (rhs.IsRegister()) {
@@ -1026,7 +1008,7 @@
__ cmpl(lhs.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1175,7 +1157,7 @@
} else {
__ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ setcc(X86_64Condition(cond->GetCondition()), reg);
+ __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimLong:
// Clear output register: setcc only sets the low byte.
@@ -1198,7 +1180,7 @@
} else {
__ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ setcc(X86_64Condition(cond->GetCondition()), reg);
+ __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimFloat: {
XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>();
@@ -1231,12 +1213,12 @@
// Convert the jumps into the result.
Label done_label;
- // false case: result = 0;
+ // False case: result = 0.
__ Bind(&false_label);
__ xorl(reg, reg);
__ jmp(&done_label);
- // True case: result = 1
+ // True case: result = 1.
__ Bind(&true_label);
__ movl(reg, Immediate(1));
__ Bind(&done_label);
@@ -4792,6 +4774,18 @@
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderX86_64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorX86_64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
if (value == 0) {
__ xorl(dest, dest);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 337cf5b..c86d797 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -70,6 +70,7 @@
void VisitUShr(HUShr* instruction) OVERRIDE;
void VisitXor(HXor* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
+ void VisitFakeString(HFakeString* fake_string) OVERRIDE;
bool IsDominatedByInputNullCheck(HInstruction* instr);
OptimizingCompilerStats* stats_;
@@ -517,10 +518,10 @@
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
- // This simplification is currently only supported on x86 and x86_64.
- // TODO: Implement it for ARM, ARM64 and MIPS64.
+ // This simplification is currently only supported on x86, x86_64 and ARM.
+ // TODO: Implement it for ARM64 and MIPS64.
InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set != kX86 && instruction_set != kX86_64) {
+ if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
return;
}
@@ -903,4 +904,46 @@
}
}
+void InstructionSimplifierVisitor::VisitFakeString(HFakeString* instruction) {
+ HInstruction* actual_string = nullptr;
+
+ // Find the string we need to replace this instruction with. The actual string is
+ // the return value of a StringFactory call.
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* use = it.Current()->GetUser();
+ if (use->IsInvokeStaticOrDirect()
+ && use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction)) {
+ use->AsInvokeStaticOrDirect()->RemoveFakeStringArgumentAsLastInput();
+ actual_string = use;
+ break;
+ }
+ }
+
+ // Check that there is no other instruction that thinks it is the factory for that string.
+ if (kIsDebugBuild) {
+ CHECK(actual_string != nullptr);
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* use = it.Current()->GetUser();
+ if (use->IsInvokeStaticOrDirect()) {
+ CHECK(!use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction));
+ }
+ }
+ }
+
+ // We need to remove any environment uses of the fake string that are not dominated by
+ // `actual_string` to null.
+ for (HUseIterator<HEnvironment*> it(instruction->GetEnvUses()); !it.Done(); it.Advance()) {
+ HEnvironment* environment = it.Current()->GetUser();
+ if (!actual_string->StrictlyDominates(environment->GetHolder())) {
+ environment->RemoveAsUserOfInput(it.Current()->GetIndex());
+ environment->SetRawEnvAt(it.Current()->GetIndex(), nullptr);
+ }
+ }
+
+ // Only uses dominated by `actual_string` must remain. We can safely replace and remove
+ // `instruction`.
+ instruction->ReplaceWith(actual_string);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+}
+
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index faee2dd..cc4b6f6 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -31,7 +31,7 @@
InstructionSimplifier(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
const char* name = kInstructionSimplifierPassName)
- : HOptimization(graph, name, stats) {}
+ : HOptimization(graph, name, stats) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 59255d1..8546a10 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -38,6 +38,7 @@
class HCurrentMethod;
class HDoubleConstant;
class HEnvironment;
+class HFakeString;
class HFloatConstant;
class HGraphBuilder;
class HGraphVisitor;
@@ -914,6 +915,7 @@
M(DoubleConstant, Constant) \
M(Equal, Condition) \
M(Exit, Instruction) \
+ M(FakeString, Instruction) \
M(FloatConstant, Constant) \
M(Goto, Instruction) \
M(GreaterThan, Condition) \
@@ -1339,8 +1341,7 @@
const uint32_t dex_pc_;
const InvokeType invoke_type_;
- // The instruction that holds this environment. Only used in debug mode
- // to ensure the graph is consistent.
+ // The instruction that holds this environment.
HInstruction* const holder_;
friend class HInstruction;
@@ -2149,7 +2150,7 @@
// The comparison bias applies for floating point operations and indicates how NaN
// comparisons are treated:
-enum ComparisonBias {
+enum class ComparisonBias {
kNoBias, // bias is not applicable (i.e. for long operation)
kGtBias, // return 1 for NaN comparisons
kLtBias, // return -1 for NaN comparisons
@@ -2160,7 +2161,7 @@
HCondition(HInstruction* first, HInstruction* second)
: HBinaryOperation(Primitive::kPrimBoolean, first, second),
needs_materialization_(true),
- bias_(kNoBias) {}
+ bias_(ComparisonBias::kNoBias) {}
bool NeedsMaterialization() const { return needs_materialization_; }
void ClearNeedsMaterialization() { needs_materialization_ = false; }
@@ -2175,7 +2176,7 @@
virtual IfCondition GetOppositeCondition() const = 0;
- bool IsGtBias() { return bias_ == kGtBias; }
+ bool IsGtBias() const { return bias_ == ComparisonBias::kGtBias; }
void SetBias(ComparisonBias bias) { bias_ = bias; }
@@ -2183,6 +2184,18 @@
return bias_ == other->AsCondition()->bias_;
}
+ bool IsFPConditionTrueIfNaN() const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ IfCondition if_cond = GetCondition();
+ return IsGtBias() ? ((if_cond == kCondGT) || (if_cond == kCondGE)) : (if_cond == kCondNE);
+ }
+
+ bool IsFPConditionFalseIfNaN() const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ IfCondition if_cond = GetCondition();
+ return IsGtBias() ? ((if_cond == kCondLT) || (if_cond == kCondLE)) : (if_cond == kCondEQ);
+ }
+
private:
// For register allocation purposes, returns whether this instruction needs to be
// materialized (that is, not just be in the processor flags).
@@ -2390,7 +2403,7 @@
ComparisonBias GetBias() const { return bias_; }
- bool IsGtBias() { return bias_ == kGtBias; }
+ bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; }
uint32_t GetDexPc() const { return dex_pc_; }
@@ -2730,9 +2743,11 @@
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(arena,
number_of_arguments,
- // There is one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit.
- clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 2u : 1u,
+ // There is one extra argument for the HCurrentMethod node, and
+ // potentially one other if the clinit check is explicit, and one other
+ // if the method is a string factory.
+ 1u + (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u)
+ + (string_init_offset ? 1u : 0u),
return_type,
dex_pc,
dex_method_index,
@@ -2777,6 +2792,23 @@
DCHECK(IsStaticWithImplicitClinitCheck());
}
+ bool IsStringFactoryFor(HFakeString* str) const {
+ if (!IsStringInit()) return false;
+ // +1 for the current method.
+ if (InputCount() == (number_of_arguments_ + 1)) return false;
+ return InputAt(InputCount() - 1)->AsFakeString() == str;
+ }
+
+ void RemoveFakeStringArgumentAsLastInput() {
+ DCHECK(IsStringInit());
+ size_t last_input_index = InputCount() - 1;
+ HInstruction* last_input = InputAt(last_input_index);
+ DCHECK(last_input != nullptr);
+ DCHECK(last_input->IsFakeString()) << last_input->DebugName();
+ RemoveAsUserOfInput(last_input_index);
+ inputs_.DeleteAt(last_input_index);
+ }
+
// Is this a call to a static method whose declaring class has an
// explicit intialization check in the graph?
bool IsStaticWithExplicitClinitCheck() const {
@@ -4159,6 +4191,25 @@
DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
+/**
+ * A HInstruction used as a marker for the replacement of new + <init>
+ * of a String to a call to a StringFactory. Only baseline will see
+ * the node at code generation, where it will be be treated as null.
+ * When compiling non-baseline, `HFakeString` instructions are being removed
+ * in the instruction simplifier.
+ */
+class HFakeString : public HTemplateInstruction<0> {
+ public:
+ HFakeString() : HTemplateInstruction(SideEffects::None()) {}
+
+ Primitive::Type GetType() const OVERRIDE { return Primitive::kPrimNot; }
+
+ DECLARE_INSTRUCTION(FakeString);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HFakeString);
+};
+
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1e51530..4568a46 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -627,7 +627,7 @@
// `run_optimizations_` is set explicitly (either through a compiler filter
// or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
// to Quick.
- bool can_use_baseline = !run_optimizations_;
+ bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit();
if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 1513296..6b4daed 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -32,8 +32,9 @@
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
-// Section references in the code refer to the "ARM Architecture Reference
-// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+// Section references in the code refer to the "ARM Architecture
+// Reference Manual ARMv7-A and ARMv7-R edition", issue C.b (24 July
+// 2012).
//
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
@@ -97,26 +98,32 @@
std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-// Values for the condition field as defined in section A3.2.
+// Values for the condition field as defined in Table A8-1 "Condition
+// codes" (refer to Section A8.3 "Conditional execution").
enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
- GE = 10, // signed greater than or equal
- LT = 11, // signed less than
- GT = 12, // signed greater than
- LE = 13, // signed less than or equal
- AL = 14, // always (unconditional)
- kSpecialCondition = 15, // special condition (refer to section A3.2.1)
+ // Meaning (integer) | Meaning (floating-point)
+ // ---------------------------------------+-----------------------------------------
+ EQ = 0, // Equal | Equal
+ NE = 1, // Not equal | Not equal, or unordered
+ CS = 2, // Carry set | Greater than, equal, or unordered
+ CC = 3, // Carry clear | Less than
+ MI = 4, // Minus, negative | Less than
+ PL = 5, // Plus, positive or zero | Greater than, equal, or unordered
+ VS = 6, // Overflow | Unordered (i.e. at least one NaN operand)
+ VC = 7, // No overflow | Not unordered
+ HI = 8, // Unsigned higher | Greater than, or unordered
+ LS = 9, // Unsigned lower or same | Less than or equal
+ GE = 10, // Signed greater than or equal | Greater than or equal
+ LT = 11, // Signed less than | Less than, or unordered
+ GT = 12, // Signed greater than | Greater than
+ LE = 13, // Signed less than or equal | Less than, equal, or unordered
+ AL = 14, // Always (unconditional) | Always (unconditional)
+ kSpecialCondition = 15, // Special condition (refer to Section A8.3 "Conditional execution").
kMaxCondition = 16,
+
+ HS = CS, // HS (unsigned higher or same) is a synonym for CS.
+ LO = CC // LO (unsigned lower) is a synonym for CC.
};
std::ostream& operator<<(std::ostream& os, const Condition& rhs);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index f55dccd..84c465f 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -52,7 +52,7 @@
struct Options gOptions;
/*
- * Output file. Defaults to stdout, but tests can modify.
+ * Output file. Defaults to stdout.
*/
FILE* gOutFile = stdout;
@@ -63,8 +63,6 @@
typedef uint16_t u2;
typedef uint32_t u4;
typedef uint64_t u8;
-typedef int8_t s1;
-typedef int16_t s2;
typedef int32_t s4;
typedef int64_t s8;
@@ -1274,23 +1272,14 @@
return -1;
}
- // Determine if opening file yielded a single dex file. On failure,
- // the parse error message of the original dexdump utility is shown.
- //
- // TODO(ajcbik): this restriction is not really needed, but kept
- // for now to stay close to original dexdump; we can
- // later relax this!
- //
- if (dex_files.size() != 1) {
- fprintf(stderr, "ERROR: DEX parse failed\n");
- return -1;
- }
-
- // Success. Either report checksum verification or process dex file.
+ // Success. Either report checksum verification or process
+ // all dex files found in given file.
if (gOptions.checksumOnly) {
fprintf(gOutFile, "Checksum verified\n");
} else {
- processDexFile(fileName, dex_files[0].get());
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ processDexFile(fileName, dex_files[i].get());
+ }
}
return 0;
}
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 756f879..9be0922 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -108,8 +108,8 @@
default:
wantUsage = true;
break;
- }
- }
+ } // switch
+ } // while
// Detect early problems.
if (optind == argc) {
@@ -138,7 +138,7 @@
int result = 0;
while (optind < argc) {
result |= processFile(argv[optind++]);
- }
+ } // while
return result != 0;
}
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index d7c0e4c..d8fd242 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -51,11 +51,8 @@
* Data types that match the definitions in the VM specification.
*/
typedef uint8_t u1;
-typedef uint16_t u2;
typedef uint32_t u4;
typedef uint64_t u8;
-typedef int32_t s4;
-typedef int64_t s8;
/*
* Returns a newly-allocated string for the "dot version" of the class
@@ -193,23 +190,15 @@
return -1;
}
- // Determine if opening file yielded a single dex file.
- //
- // TODO(ajcbik): this restriction is not really needed, but kept
- // for now to stay close to original dexlist; we can
- // later relax this!
- //
- if (dex_files.size() != 1) {
- fprintf(stderr, "ERROR: DEX parse failed\n");
- return -1;
- }
- const DexFile* pDexFile = dex_files[0].get();
-
- // Success. Iterate over all classes.
+ // Success. Iterate over all dex files found in given file.
fprintf(gOutFile, "#%s\n", fileName);
- const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
- for (u4 idx = 0; idx < classDefsSize; idx++) {
- dumpClass(pDexFile, idx);
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ // Iterate over all classes in one dex file.
+ const DexFile* pDexFile = dex_files[i].get();
+ const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
+ for (u4 idx = 0; idx < classDefsSize; idx++) {
+ dumpClass(pDexFile, idx);
+ }
}
return 0;
}
@@ -246,7 +235,7 @@
gOptions.outputFileName = optarg;
break;
case 'm':
- // If -m X.Y.Z is given, then find all instances of the
+ // If -m x.y.z is given, then find all instances of the
// fully-qualified method name. This isn't really what
// dexlist is for, but it's easy to do it here.
{
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 8dde547..82452ba 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -599,6 +599,9 @@
os << std::flush;
return false;
}
+
+ VariableIndentationOutputStream vios(&os);
+ ScopedIndentation indent1(&vios);
for (size_t class_def_index = 0;
class_def_index < dex_file->NumClassDefs();
class_def_index++) {
@@ -617,10 +620,8 @@
<< " (" << oat_class.GetStatus() << ")"
<< " (" << oat_class.GetType() << ")\n";
// TODO: include bitmap here if type is kOatClassSomeCompiled?
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
if (options_.list_classes_) continue;
- if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def, &stop_analysis)) {
+ if (!DumpOatClass(&vios, oat_class, *(dex_file.get()), class_def, &stop_analysis)) {
success = false;
}
if (stop_analysis) {
@@ -720,20 +721,21 @@
}
}
- bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
+ bool DumpOatClass(VariableIndentationOutputStream* vios,
+ const OatFile::OatClass& oat_class, const DexFile& dex_file,
const DexFile::ClassDef& class_def, bool* stop_analysis) {
bool success = true;
bool addr_found = false;
const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
- os << std::flush;
+ vios->Stream() << std::flush;
return success;
}
ClassDataItemIterator it(dex_file, class_data);
SkipAllFields(it);
uint32_t class_method_index = 0;
while (it.HasNextDirectMethod()) {
- if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+ if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
@@ -746,7 +748,7 @@
it.Next();
}
while (it.HasNextVirtualMethod()) {
- if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+ if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
@@ -759,7 +761,7 @@
it.Next();
}
DCHECK(!it.HasNext());
- os << std::flush;
+ vios->Stream() << std::flush;
return success;
}
@@ -768,7 +770,8 @@
// When this was picked, the largest arm method was 55,256 bytes and arm64 was 50,412 bytes.
static constexpr uint32_t kMaxCodeSize = 100 * 1000;
- bool DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def,
+ bool DumpOatMethod(VariableIndentationOutputStream* vios,
+ const DexFile::ClassDef& class_def,
uint32_t class_method_index,
const OatFile::OatClass& oat_class, const DexFile& dex_file,
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
@@ -782,16 +785,11 @@
}
std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
- os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
- class_method_index, pretty_method.c_str(),
- dex_method_idx);
+ vios->Stream() << StringPrintf("%d: %s (dex_method_idx=%d)\n",
+ class_method_index, pretty_method.c_str(),
+ dex_method_idx);
if (options_.list_methods_) return success;
- Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
- Indenter indent2_filter(indent1_os->rdbuf(), kIndentChar, kIndentBy1Count);
- std::unique_ptr<std::ostream> indent2_os(new std::ostream(&indent2_filter));
-
uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
@@ -805,137 +803,147 @@
}
}
+ // Everything below is indented at least once.
+ ScopedIndentation indent1(vios);
+
{
- *indent1_os << "DEX CODE:\n";
- DumpDexCode(*indent2_os, dex_file, code_item);
+ vios->Stream() << "DEX CODE:\n";
+ ScopedIndentation indent2(vios);
+ DumpDexCode(vios->Stream(), dex_file, code_item);
}
std::unique_ptr<verifier::MethodVerifier> verifier;
if (Runtime::Current() != nullptr) {
- *indent1_os << "VERIFIER TYPE ANALYSIS:\n";
- verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
+ vios->Stream() << "VERIFIER TYPE ANALYSIS:\n";
+ ScopedIndentation indent2(vios);
+ verifier.reset(DumpVerifier(vios,
+ dex_method_idx, &dex_file, class_def, code_item,
method_access_flags));
}
{
- *indent1_os << "OatMethodOffsets ";
+ vios->Stream() << "OatMethodOffsets ";
if (options_.absolute_addresses_) {
- *indent1_os << StringPrintf("%p ", oat_method_offsets);
+ vios->Stream() << StringPrintf("%p ", oat_method_offsets);
}
- *indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
+ vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
if (oat_method_offsets_offset > oat_file_.Size()) {
- *indent1_os << StringPrintf(
+ vios->Stream() << StringPrintf(
"WARNING: oat method offsets offset 0x%08x is past end of file 0x%08zx.\n",
oat_method_offsets_offset, oat_file_.Size());
// If we can't read OatMethodOffsets, the rest of the data is dangerous to read.
- os << std::flush;
+ vios->Stream() << std::flush;
return false;
}
- *indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset);
+ ScopedIndentation indent2(vios);
+ vios->Stream() << StringPrintf("code_offset: 0x%08x ", code_offset);
uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset());
if (aligned_code_begin > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "code offset 0x%08x is past end of file 0x%08zx.\n",
- aligned_code_begin, oat_file_.Size());
+ vios->Stream() << StringPrintf("WARNING: "
+ "code offset 0x%08x is past end of file 0x%08zx.\n",
+ aligned_code_begin, oat_file_.Size());
success = false;
}
- *indent2_os << "\n";
+ vios->Stream() << "\n";
- *indent2_os << "gc_map: ";
+ vios->Stream() << "gc_map: ";
if (options_.absolute_addresses_) {
- *indent2_os << StringPrintf("%p ", oat_method.GetGcMap());
+ vios->Stream() << StringPrintf("%p ", oat_method.GetGcMap());
}
uint32_t gc_map_offset = oat_method.GetGcMapOffset();
- *indent2_os << StringPrintf("(offset=0x%08x)\n", gc_map_offset);
+ vios->Stream() << StringPrintf("(offset=0x%08x)\n", gc_map_offset);
if (gc_map_offset > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "gc map table offset 0x%08x is past end of file 0x%08zx.\n",
- gc_map_offset, oat_file_.Size());
+ vios->Stream() << StringPrintf("WARNING: "
+ "gc map table offset 0x%08x is past end of file 0x%08zx.\n",
+ gc_map_offset, oat_file_.Size());
success = false;
} else if (options_.dump_raw_gc_map_) {
- Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent3_os(&indent3_filter);
- DumpGcMap(indent3_os, oat_method, code_item);
+ ScopedIndentation indent3(vios);
+ DumpGcMap(vios->Stream(), oat_method, code_item);
}
}
{
- *indent1_os << "OatQuickMethodHeader ";
+ vios->Stream() << "OatQuickMethodHeader ";
uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
if (options_.absolute_addresses_) {
- *indent1_os << StringPrintf("%p ", method_header);
+ vios->Stream() << StringPrintf("%p ", method_header);
}
- *indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset);
+ vios->Stream() << StringPrintf("(offset=0x%08x)\n", method_header_offset);
if (method_header_offset > oat_file_.Size()) {
- *indent1_os << StringPrintf(
+ vios->Stream() << StringPrintf(
"WARNING: oat quick method header offset 0x%08x is past end of file 0x%08zx.\n",
method_header_offset, oat_file_.Size());
// If we can't read the OatQuickMethodHeader, the rest of the data is dangerous to read.
- os << std::flush;
+ vios->Stream() << std::flush;
return false;
}
- *indent2_os << "mapping_table: ";
+ ScopedIndentation indent2(vios);
+ vios->Stream() << "mapping_table: ";
if (options_.absolute_addresses_) {
- *indent2_os << StringPrintf("%p ", oat_method.GetMappingTable());
+ vios->Stream() << StringPrintf("%p ", oat_method.GetMappingTable());
}
uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
- *indent2_os << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset());
+ vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset());
if (mapping_table_offset > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "mapping table offset 0x%08x is past end of file 0x%08zx. "
- "mapping table offset was loaded from offset 0x%08x.\n",
- mapping_table_offset, oat_file_.Size(),
- oat_method.GetMappingTableOffsetOffset());
+ vios->Stream() << StringPrintf("WARNING: "
+ "mapping table offset 0x%08x is past end of file 0x%08zx. "
+ "mapping table offset was loaded from offset 0x%08x.\n",
+ mapping_table_offset, oat_file_.Size(),
+ oat_method.GetMappingTableOffsetOffset());
success = false;
} else if (options_.dump_raw_mapping_table_) {
- Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent3_os(&indent3_filter);
- DumpMappingTable(indent3_os, oat_method);
+ ScopedIndentation indent3(vios);
+ DumpMappingTable(vios, oat_method);
}
- *indent2_os << "vmap_table: ";
+ vios->Stream() << "vmap_table: ";
if (options_.absolute_addresses_) {
- *indent2_os << StringPrintf("%p ", oat_method.GetVmapTable());
+ vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
}
uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
- *indent2_os << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
+ vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
if (vmap_table_offset > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "vmap table offset 0x%08x is past end of file 0x%08zx. "
- "vmap table offset was loaded from offset 0x%08x.\n",
- vmap_table_offset, oat_file_.Size(),
- oat_method.GetVmapTableOffsetOffset());
+ vios->Stream() << StringPrintf("WARNING: "
+ "vmap table offset 0x%08x is past end of file 0x%08zx. "
+ "vmap table offset was loaded from offset 0x%08x.\n",
+ vmap_table_offset, oat_file_.Size(),
+ oat_method.GetVmapTableOffsetOffset());
success = false;
} else if (options_.dump_vmap_) {
- DumpVmapData(*indent2_os, oat_method, code_item);
+ DumpVmapData(vios, oat_method, code_item);
}
}
{
- *indent1_os << "QuickMethodFrameInfo\n";
+ vios->Stream() << "QuickMethodFrameInfo\n";
- *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
- *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
- DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false);
- *indent2_os << "\n";
- *indent2_os << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
- DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true);
- *indent2_os << "\n";
+ ScopedIndentation indent2(vios);
+ vios->Stream()
+ << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
+ vios->Stream() << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
+ DumpSpillMask(vios->Stream(), oat_method.GetCoreSpillMask(), false);
+ vios->Stream() << "\n";
+ vios->Stream() << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
+ DumpSpillMask(vios->Stream(), oat_method.GetFpSpillMask(), true);
+ vios->Stream() << "\n";
}
{
- // Based on spill masks from QuickMethodFrameInfo so placed
- // after it is dumped, but useful for understanding quick
- // code, so dumped here.
- DumpVregLocations(*indent2_os, oat_method, code_item);
+ // Based on spill masks from QuickMethodFrameInfo so placed
+ // after it is dumped, but useful for understanding quick
+ // code, so dumped here.
+ ScopedIndentation indent2(vios);
+ DumpVregLocations(vios->Stream(), oat_method, code_item);
}
{
- *indent1_os << "CODE: ";
+ vios->Stream() << "CODE: ";
uint32_t code_size_offset = oat_method.GetQuickCodeSizeOffset();
if (code_size_offset > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "code size offset 0x%08x is past end of file 0x%08zx.",
- code_size_offset, oat_file_.Size());
+ ScopedIndentation indent2(vios);
+ vios->Stream() << StringPrintf("WARNING: "
+ "code size offset 0x%08x is past end of file 0x%08zx.",
+ code_size_offset, oat_file_.Size());
success = false;
} else {
const void* code = oat_method.GetQuickCode();
@@ -943,49 +951,52 @@
uint64_t aligned_code_end = aligned_code_begin + code_size;
if (options_.absolute_addresses_) {
- *indent1_os << StringPrintf("%p ", code);
+ vios->Stream() << StringPrintf("%p ", code);
}
- *indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
- code_offset,
- code_size_offset,
- code_size,
- code != nullptr ? "..." : "");
+ vios->Stream() << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
+ code_offset,
+ code_size_offset,
+ code_size,
+ code != nullptr ? "..." : "");
+ ScopedIndentation indent2(vios);
if (aligned_code_begin > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "start of code at 0x%08x is past end of file 0x%08zx.",
- aligned_code_begin, oat_file_.Size());
+ vios->Stream() << StringPrintf("WARNING: "
+ "start of code at 0x%08x is past end of file 0x%08zx.",
+ aligned_code_begin, oat_file_.Size());
success = false;
} else if (aligned_code_end > oat_file_.Size()) {
- *indent2_os << StringPrintf("WARNING: "
- "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. "
- "code size is 0x%08x loaded from offset 0x%08x.\n",
- aligned_code_end, oat_file_.Size(),
- code_size, code_size_offset);
+ vios->Stream() << StringPrintf(
+ "WARNING: "
+ "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. "
+ "code size is 0x%08x loaded from offset 0x%08x.\n",
+ aligned_code_end, oat_file_.Size(),
+ code_size, code_size_offset);
success = false;
if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
- DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
}
} else if (code_size > kMaxCodeSize) {
- *indent2_os << StringPrintf("WARNING: "
- "code size %d is bigger than max expected threshold of %d. "
- "code size is 0x%08x loaded from offset 0x%08x.\n",
- code_size, kMaxCodeSize,
- code_size, code_size_offset);
+ vios->Stream() << StringPrintf(
+ "WARNING: "
+ "code size %d is bigger than max expected threshold of %d. "
+ "code size is 0x%08x loaded from offset 0x%08x.\n",
+ code_size, kMaxCodeSize,
+ code_size, code_size_offset);
success = false;
if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
- DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
}
} else if (options_.disassemble_code_) {
- DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0);
+ DumpCode(vios, verifier.get(), oat_method, code_item, !success, 0);
}
}
}
- os << std::flush;
+ vios->Stream() << std::flush;
return success;
}
@@ -1013,7 +1024,7 @@
}
// Display data stored at the the vmap offset of an oat method.
- void DumpVmapData(std::ostream& os,
+ void DumpVmapData(VariableIndentationOutputStream* vios,
const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item) {
if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
@@ -1022,24 +1033,25 @@
if (raw_code_info != nullptr) {
CodeInfo code_info(raw_code_info);
DCHECK(code_item != nullptr);
- DumpCodeInfo(os, code_info, oat_method, *code_item);
+ ScopedIndentation indent1(vios);
+ DumpCodeInfo(vios, code_info, oat_method, *code_item);
}
} else {
// Otherwise, display the vmap table.
const uint8_t* raw_table = oat_method.GetVmapTable();
if (raw_table != nullptr) {
VmapTable vmap_table(raw_table);
- DumpVmapTable(os, oat_method, vmap_table);
+ DumpVmapTable(vios->Stream(), oat_method, vmap_table);
}
}
}
// Display a CodeInfo object emitted by the optimizing compiler.
- void DumpCodeInfo(std::ostream& os,
+ void DumpCodeInfo(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const OatFile::OatMethod& oat_method,
const DexFile::CodeItem& code_item) {
- code_info.Dump(os,
+ code_info.Dump(vios,
oat_method.GetCodeOffset(),
code_item.registers_size_,
options_.dump_code_info_stack_maps_);
@@ -1177,48 +1189,50 @@
}
}
- void DumpMappingTable(std::ostream& os, const OatFile::OatMethod& oat_method) {
+ void DumpMappingTable(VariableIndentationOutputStream* vios,
+ const OatFile::OatMethod& oat_method) {
const void* quick_code = oat_method.GetQuickCode();
if (quick_code == nullptr) {
return;
}
MappingTable table(oat_method.GetMappingTable());
if (table.TotalSize() != 0) {
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
if (table.PcToDexSize() != 0) {
typedef MappingTable::PcToDexIterator It;
- os << "suspend point mappings {\n";
+ vios->Stream() << "suspend point mappings {\n";
for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+ ScopedIndentation indent1(vios);
+ vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
}
- os << "}\n";
+ vios->Stream() << "}\n";
}
if (table.DexToPcSize() != 0) {
typedef MappingTable::DexToPcIterator It;
- os << "catch entry mappings {\n";
+ vios->Stream() << "catch entry mappings {\n";
for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+ ScopedIndentation indent1(vios);
+ vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
}
- os << "}\n";
+ vios->Stream() << "}\n";
}
}
}
- uint32_t DumpInformationAtOffset(std::ostream& os,
+ uint32_t DumpInformationAtOffset(VariableIndentationOutputStream* vios,
const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item,
size_t offset,
bool suspend_point_mapping) {
if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
if (suspend_point_mapping) {
- DumpDexRegisterMapAtOffset(os, oat_method, code_item, offset);
+ ScopedIndentation indent1(vios);
+ DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset);
}
// The return value is not used in the case of a method compiled
// with the optimizing compiler.
return DexFile::kDexNoIndex;
} else {
- return DumpMappingAtOffset(os, oat_method, offset, suspend_point_mapping);
+ return DumpMappingAtOffset(vios->Stream(), oat_method, offset, suspend_point_mapping);
}
}
@@ -1334,7 +1348,7 @@
return oat_method.GetGcMap() == nullptr && code_item != nullptr;
}
- void DumpDexRegisterMapAtOffset(std::ostream& os,
+ void DumpDexRegisterMapAtOffset(VariableIndentationOutputStream* vios,
const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item,
size_t offset) {
@@ -1349,13 +1363,14 @@
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(offset, encoding);
if (stack_map.IsValid()) {
- stack_map.Dump(
- os, code_info, encoding, oat_method.GetCodeOffset(), code_item->registers_size_);
+ stack_map.Dump(vios, code_info, encoding, oat_method.GetCodeOffset(),
+ code_item->registers_size_);
}
}
}
- verifier::MethodVerifier* DumpVerifier(std::ostream& os, uint32_t dex_method_idx,
+ verifier::MethodVerifier* DumpVerifier(VariableIndentationOutputStream* vios,
+ uint32_t dex_method_idx,
const DexFile* dex_file,
const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
@@ -1367,14 +1382,15 @@
hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
- soa.Self(), os, dex_method_idx, dex_file, dex_cache, *options_.class_loader_, &class_def,
- code_item, nullptr, method_access_flags);
+ soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
+ &class_def, code_item, nullptr, method_access_flags);
}
return nullptr;
}
- void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier,
+ void DumpCode(VariableIndentationOutputStream* vios,
+ verifier::MethodVerifier* verifier,
const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item,
bool bad_input, size_t code_size) {
const void* quick_code = oat_method.GetQuickCode();
@@ -1383,22 +1399,23 @@
code_size = oat_method.GetQuickCodeSize();
}
if (code_size == 0 || quick_code == nullptr) {
- os << "NO CODE!\n";
+ vios->Stream() << "NO CODE!\n";
return;
} else {
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
while (offset < code_size) {
if (!bad_input) {
- DumpInformationAtOffset(os, oat_method, code_item, offset, false);
+ DumpInformationAtOffset(vios, oat_method, code_item, offset, false);
}
- offset += disassembler_->Dump(os, quick_native_pc + offset);
+ offset += disassembler_->Dump(vios->Stream(), quick_native_pc + offset);
if (!bad_input) {
- uint32_t dex_pc = DumpInformationAtOffset(os, oat_method, code_item, offset, true);
+ uint32_t dex_pc =
+ DumpInformationAtOffset(vios, oat_method, code_item, offset, true);
if (dex_pc != DexFile::kDexNoIndex) {
- DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
+ DumpGcMapAtNativePcOffset(vios->Stream(), oat_method, code_item, offset);
if (verifier != nullptr) {
- DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
+ DumpVRegsAtDexPc(vios->Stream(), verifier, oat_method, code_item, dex_pc);
}
}
}
@@ -1420,12 +1437,16 @@
explicit ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space,
const ImageHeader& image_header, OatDumperOptions* oat_dumper_options)
: os_(os),
+ vios_(os),
+ indent1_(&vios_),
image_space_(image_space),
image_header_(image_header),
oat_dumper_options_(oat_dumper_options) {}
bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostream& os = *os_;
+ std::ostream& indent_os = vios_.Stream();
+
os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
@@ -1453,20 +1474,17 @@
{
os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots()) << "\n";
- Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent1_os(&indent1_filter);
static_assert(arraysize(image_roots_descriptions_) ==
static_cast<size_t>(ImageHeader::kImageRootsMax), "sizes must match");
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
ImageHeader::ImageRoot image_root = static_cast<ImageHeader::ImageRoot>(i);
const char* image_root_description = image_roots_descriptions_[i];
mirror::Object* image_root_object = image_header_.GetImageRoot(image_root);
- indent1_os << StringPrintf("%s: %p\n", image_root_description, image_root_object);
+ indent_os << StringPrintf("%s: %p\n", image_root_description, image_root_object);
if (image_root_object->IsObjectArray()) {
- Indenter indent2_filter(indent1_os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent2_os(&indent2_filter);
mirror::ObjectArray<mirror::Object>* image_root_object_array
= image_root_object->AsObjectArray<mirror::Object>();
+ ScopedIndentation indent2(&vios_);
for (int j = 0; j < image_root_object_array->GetLength(); j++) {
mirror::Object* value = image_root_object_array->Get(j);
size_t run = 0;
@@ -1478,20 +1496,22 @@
}
}
if (run == 0) {
- indent2_os << StringPrintf("%d: ", j);
+ indent_os << StringPrintf("%d: ", j);
} else {
- indent2_os << StringPrintf("%d to %zd: ", j, j + run);
+ indent_os << StringPrintf("%d to %zd: ", j, j + run);
j = j + run;
}
if (value != nullptr) {
- PrettyObjectValue(indent2_os, value->GetClass(), value);
+ PrettyObjectValue(indent_os, value->GetClass(), value);
} else {
- indent2_os << j << ": null\n";
+ indent_os << j << ": null\n";
}
}
}
}
+ }
+ {
os << "METHOD ROOTS\n";
static_assert(arraysize(image_methods_descriptions_) ==
static_cast<size_t>(ImageHeader::kImageMethodsCount), "sizes must match");
@@ -1499,7 +1519,7 @@
auto image_root = static_cast<ImageHeader::ImageMethod>(i);
const char* description = image_methods_descriptions_[i];
auto* image_method = image_header_.GetImageMethod(image_root);
- indent1_os << StringPrintf("%s: %p\n", description, image_method);
+ indent_os << StringPrintf("%s: %p\n", description, image_method);
}
}
os << "\n";
@@ -1556,11 +1576,6 @@
}
}
{
- std::ostream* saved_os = os_;
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
- os_ = &indent_os;
-
// Mark dex caches.
dex_cache_arrays_.clear();
{
@@ -1596,7 +1611,6 @@
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
- os_ = saved_os;
}
os << "STATS:\n" << std::flush;
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename.c_str()));
@@ -1621,7 +1635,7 @@
// RoundUp to 8 bytes to match the intern table alignment expectation.
stats_.art_method_bytes += RoundUp(method_section.Size(), sizeof(uint64_t));
stats_.interned_strings_bytes += intern_section.Size();
- stats_.Dump(os);
+ stats_.Dump(os, indent_os);
os << "\n";
os << std::flush;
@@ -1760,7 +1774,8 @@
state->stats_.object_bytes += object_bytes;
state->stats_.alignment_bytes += alignment_bytes;
- std::ostream& os = *state->os_;
+ std::ostream& os = state->vios_.Stream();
+
mirror::Class* obj_class = obj->GetClass();
if (obj_class->IsArrayClass()) {
os << StringPrintf("%p: %s length:%d\n", obj, PrettyDescriptor(obj_class).c_str(),
@@ -1775,9 +1790,8 @@
} else {
os << StringPrintf("%p: %s\n", obj, PrettyDescriptor(obj_class).c_str());
}
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
- DumpFields(indent_os, obj, obj_class);
+ ScopedIndentation indent1(&state->vios_);
+ DumpFields(os, obj, obj_class);
const auto image_pointer_size =
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
if (obj->IsObjectArray()) {
@@ -1793,25 +1807,24 @@
}
}
if (run == 0) {
- indent_os << StringPrintf("%d: ", i);
+ os << StringPrintf("%d: ", i);
} else {
- indent_os << StringPrintf("%d to %zd: ", i, i + run);
+ os << StringPrintf("%d to %zd: ", i, i + run);
i = i + run;
}
mirror::Class* value_class =
(value == nullptr) ? obj_class->GetComponentType() : value->GetClass();
- PrettyObjectValue(indent_os, value_class, value);
+ PrettyObjectValue(os, value_class, value);
}
} else if (obj->IsClass()) {
mirror::Class* klass = obj->AsClass();
ArtField* sfields = klass->GetSFields();
const size_t num_fields = klass->NumStaticFields();
if (num_fields != 0) {
- indent_os << "STATICS:\n";
- Indenter indent2_filter(indent_os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent2_os(&indent2_filter);
+ os << "STATICS:\n";
+ ScopedIndentation indent2(&state->vios_);
for (size_t i = 0; i < num_fields; i++) {
- PrintField(indent2_os, &sfields[i], sfields[i].GetDeclaringClass());
+ PrintField(os, &sfields[i], sfields[i].GetDeclaringClass());
}
}
} else {
@@ -1827,9 +1840,9 @@
for (int32_t j = i + 1; j < length &&
elem == arr->GetElementPtrSize<void*>(j, image_pointer_size); j++, run++) { }
if (run == 0) {
- indent_os << StringPrintf("%d: ", i);
+ os << StringPrintf("%d: ", i);
} else {
- indent_os << StringPrintf("%d to %zd: ", i, i + run);
+ os << StringPrintf("%d to %zd: ", i, i + run);
i = i + run;
}
auto offset = reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin();
@@ -1841,7 +1854,7 @@
} else {
msg = "Unknown type";
}
- indent_os << StringPrintf("%p %s\n", elem, msg.c_str());
+ os << StringPrintf("%p %s\n", elem, msg.c_str());
}
}
}
@@ -1920,7 +1933,7 @@
indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end);
indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd\n",
- dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes);
+ dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes);
size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes +
vmap_table_bytes + quick_oat_code_size + ArtMethod::ObjectSize(image_pointer_size);
@@ -2135,12 +2148,11 @@
os << "\n" << std::flush;
}
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void Dump(std::ostream& os, std::ostream& indent_os)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
{
os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n"
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n"
"object_bytes = %8zd (%2.0f%% of art file bytes)\n"
"art_field_bytes = %8zd (%2.0f%% of art file bytes)\n"
@@ -2233,7 +2245,13 @@
// threshold, we assume 2 bytes per instruction and 2 instructions per block.
kLargeMethodDexBytes = 16000
};
+
+ // For performance, use the *os_ directly for anything that doesn't need indentation
+ // and prepare an indentation stream with default indentation 1.
std::ostream* os_;
+ VariableIndentationOutputStream vios_;
+ ScopedIndentation indent1_;
+
gc::space::ImageSpace& image_space_;
const ImageHeader& image_header_;
std::unique_ptr<OatDumper> oat_dumper_;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index ca3ca1d..a7826a74 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -331,9 +331,7 @@
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
mov r2, r9 @ pass Thread::Current
mov r3, sp
- .cfi_adjust_cfa_offset 16
bl \cxx_name @ (method_idx, this, caller, Thread*, SP)
- .cfi_adjust_cfa_offset -16
mov r12, r1 @ save Method*->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz r0, 1f @ did we find the target? if not go to exception delivery
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index ff04106..09a018e 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -267,6 +267,10 @@
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
+ // Deoptimize
+ qpoints->pDeoptimize = art_quick_deoptimize;
+ static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct.");
+
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 321c27b..4904af9 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -176,6 +176,9 @@
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+ // Deoptimize
+ qpoints->pDeoptimize = art_quick_deoptimize;
+
// TODO - use lld/scd instructions for Mips64
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index 5d27e47..aca5a37 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -23,6 +23,7 @@
subl LITERAL(8), %esp // align stack
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artFindNativeMethod) // (Thread*)
addl LITERAL(12), %esp // remove argument & padding
CFI_ADJUST_CFA_OFFSET(-12)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c9bc977..f6c7649 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1111,7 +1111,7 @@
POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
- CFI_ADJUST_CFA_OFFSET(-12)
+ CFI_ADJUST_CFA_OFFSET(-4)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
@@ -1183,8 +1183,8 @@
PUSH eax
#else
pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
-#endif
CFI_ADJUST_CFA_OFFSET(4)
+#endif
PUSH ebx // pass arg1 - component type of the array
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
addl LITERAL(16), %esp // pop arguments
@@ -1429,6 +1429,7 @@
call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
@@ -1559,6 +1560,7 @@
PUSH eax // Pass Method*.
call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
+ CFI_ADJUST_CFA_OFFSET(-28)
movl 60(%esp), %edi // Restore edi.
movl %eax, 60(%esp) // Place code* over edi, just under return pc.
movl SYMBOL(art_quick_instrumentation_exit)@GOT(%ebx), %ebx
@@ -1578,11 +1580,13 @@
movl 52(%esp), %ebp // Restore ebp.
movl 56(%esp), %esi // Restore esi.
addl LITERAL(60), %esp // Wind stack back upto code*.
+ CFI_ADJUST_CFA_OFFSET(-60)
ret // Call method (and pop).
END_FUNCTION art_quick_instrumentation_entry
DEFINE_FUNCTION art_quick_instrumentation_exit
pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
@@ -1611,6 +1615,7 @@
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
addl LITERAL(4), %esp // Remove fake return pc.
+ CFI_ADJUST_CFA_OFFSET(-4)
jmp *%ecx // Return.
END_FUNCTION art_quick_instrumentation_exit
@@ -1619,7 +1624,7 @@
* will long jump to the upcall with a special exception of -1.
*/
DEFINE_FUNCTION art_quick_deoptimize
- pushl %ebx // Entry point for a jump. Fake that we were called.
+ PUSH ebx // Entry point for a jump. Fake that we were called.
.globl SYMBOL(art_quick_deoptimize_from_compiled_slow_path) // Entry point for real calls
// from compiled slow paths.
SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
@@ -1682,8 +1687,8 @@
DEFINE_FUNCTION art_nested_signal_return
SETUP_GOT_NOSAVE ebx // sets %ebx for call into PLT
movl LITERAL(1), %ecx
- pushl %ecx // second arg to longjmp (1)
- pushl %eax // first arg to longjmp (jmp_buf)
+ PUSH ecx // second arg to longjmp (1)
+ PUSH eax // first arg to longjmp (jmp_buf)
call PLT_SYMBOL(longjmp)
int3 // won't get here.
END_FUNCTION art_nested_signal_return
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 35b50d1..93d4edc 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -17,7 +17,6 @@
#ifndef ART_RUNTIME_BASE_LOGGING_H_
#define ART_RUNTIME_BASE_LOGGING_H_
-#include <memory>
#include <ostream>
#include "base/macros.h"
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d9935cb..e4f7b7a 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -17,7 +17,6 @@
#ifndef ART_RUNTIME_CLASS_LINKER_H_
#define ART_RUNTIME_CLASS_LINKER_H_
-#include <deque>
#include <string>
#include <utility>
#include <vector>
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 90b8fdb..eec4983 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -794,13 +794,13 @@
}
const DexFile::TryItem* try_items = DexFile::GetTryItems(*code_item, 0);
- ptr_ = DexFile::GetCatchHandlerData(*code_item, 0);
- uint32_t handlers_size = DecodeUnsignedLeb128(&ptr_);
-
if (!CheckListSize(try_items, try_items_size, sizeof(DexFile::TryItem), "try_items size")) {
return false;
}
+ ptr_ = DexFile::GetCatchHandlerData(*code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&ptr_);
+
if (UNLIKELY((handlers_size == 0) || (handlers_size >= 65536))) {
ErrorStringPrintf("Invalid handlers_size: %ud", handlers_size);
return false;
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index f9a2ff6..185a9b7 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
-#include <ostream>
+#include <iosfwd>
namespace art {
namespace gc {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index ac4fc69..9316b27 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -20,6 +20,7 @@
#include "base/stl_util.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "intern_table.h"
diff --git a/runtime/gc/collector/gc_type.h b/runtime/gc/collector/gc_type.h
index f18e40f..401444a 100644
--- a/runtime/gc/collector/gc_type.h
+++ b/runtime/gc/collector/gc_type.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_GC_TYPE_H_
#define ART_RUNTIME_GC_COLLECTOR_GC_TYPE_H_
-#include <ostream>
+#include <iosfwd>
namespace art {
namespace gc {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 9275e6d..95ba380 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_TYPE_H_
#define ART_RUNTIME_GC_COLLECTOR_TYPE_H_
-#include <ostream>
+#include <iosfwd>
namespace art {
namespace gc {
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 1f2643a..0536f32d 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_GC_GC_CAUSE_H_
#define ART_RUNTIME_GC_GC_CAUSE_H_
-#include <ostream>
+#include <iosfwd>
namespace art {
namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a782fc8..0ae9cdf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -37,13 +37,12 @@
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
-#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/concurrent_copying.h"
#include "gc/collector/mark_compact.h"
-#include "gc/collector/mark_sweep-inl.h"
+#include "gc/collector/mark_sweep.h"
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
#include "gc/collector/sticky_mark_sweep.h"
@@ -62,7 +61,6 @@
#include "image.h"
#include "intern_table.h"
#include "mirror/class-inl.h"
-#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/reference-inl.h"
@@ -467,6 +465,7 @@
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
task_processor_.reset(new TaskProcessor());
+ reference_processor_.reset(new ReferenceProcessor());
pending_task_lock_ = new Mutex("Pending task lock");
if (ignore_max_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
@@ -1705,11 +1704,12 @@
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
+ mirror::Class* klass = instance_counter->classes_[i];
if (instance_counter->use_is_assignable_from_) {
- if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
+ if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
++instance_counter->counts_[i];
}
- } else if (instance_class == instance_counter->classes_[i]) {
+ } else if (instance_class == klass) {
++instance_counter->counts_[i];
}
}
@@ -1865,7 +1865,7 @@
static_cast<double>(space_size_before_compaction);
tl->ResumeAll();
// Finish GC.
- reference_processor_.EnqueueClearedReferences(self);
+ reference_processor_->EnqueueClearedReferences(self);
GrowForUtilization(semi_space_collector_);
LogGC(kGcCauseHomogeneousSpaceCompact, collector);
FinishGC(self, collector::kGcTypeFull);
@@ -1998,7 +1998,7 @@
ChangeCollector(collector_type);
tl->ResumeAll();
// Can't call into java code with all threads suspended.
- reference_processor_.EnqueueClearedReferences(self);
+ reference_processor_->EnqueueClearedReferences(self);
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
DCHECK(collector != nullptr);
@@ -2472,7 +2472,7 @@
total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
RequestTrim(self);
// Enqueue cleared references.
- reference_processor_.EnqueueClearedReferences(self);
+ reference_processor_->EnqueueClearedReferences(self);
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector, bytes_allocated_before_gc);
LogGC(gc_cause, collector);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index cfb6e06..d0040f2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -26,22 +26,17 @@
#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/time_utils.h"
-#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table.h"
#include "gc/accounting/read_barrier_table.h"
#include "gc/gc_cause.h"
-#include "gc/collector/garbage_collector.h"
#include "gc/collector/gc_type.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
-#include "jni.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "reference_processor.h"
#include "safe_map.h"
-#include "thread_pool.h"
#include "verify_object.h"
namespace art {
@@ -50,6 +45,7 @@
class Mutex;
class StackVisitor;
class Thread;
+class ThreadPool;
class TimingLogger;
namespace mirror {
@@ -631,7 +627,7 @@
bool HasImageSpace() const;
ReferenceProcessor* GetReferenceProcessor() {
- return &reference_processor_;
+ return reference_processor_.get();
}
TaskProcessor* GetTaskProcessor() {
return task_processor_.get();
@@ -1015,7 +1011,7 @@
std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
// Reference processor;
- ReferenceProcessor reference_processor_;
+ std::unique_ptr<ReferenceProcessor> reference_processor_;
// Task processor, proxies heap trim requests to the daemon threads.
std::unique_ptr<TaskProcessor> task_processor_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 52192e2..2b567fe 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,7 +16,9 @@
#include "large_object_space.h"
+#include <valgrind.h>
#include <memory>
+#include <memcheck/memcheck.h>
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 5f3a1db..9495864 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -20,8 +20,6 @@
#include "space.h"
#include <ostream>
-#include <valgrind.h>
-#include <memcheck/memcheck.h>
namespace art {
namespace gc {
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 25d4445..f94ec23 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
+#include <valgrind.h>
+
#include "gc/allocator/rosalloc-inl.h"
#include "gc/space/valgrind_settings.h"
#include "rosalloc_space.h"
diff --git a/runtime/indenter.h b/runtime/indenter.h
index 38b398d..78b18f6 100644
--- a/runtime/indenter.h
+++ b/runtime/indenter.h
@@ -19,10 +19,13 @@
#include "base/logging.h"
#include "base/macros.h"
+#include <ostream>
#include <streambuf>
-const char kIndentChar =' ';
-const size_t kIndentBy1Count = 2;
+namespace art {
+
+constexpr char kIndentChar =' ';
+constexpr size_t kIndentBy1Count = 2;
class Indenter : public std::streambuf {
public:
@@ -99,9 +102,60 @@
const char text_[8];
// Number of times text is output.
- const size_t count_;
+ size_t count_;
+
+ friend class VariableIndentationOutputStream;
DISALLOW_COPY_AND_ASSIGN(Indenter);
};
+class VariableIndentationOutputStream {
+ public:
+ explicit VariableIndentationOutputStream(std::ostream* os, char text = kIndentChar)
+ : indenter_(os->rdbuf(), text, 0u),
+ indented_os_(&indenter_) {
+ }
+
+ std::ostream& Stream() {
+ return indented_os_;
+ }
+
+ void IncreaseIndentation(size_t adjustment) {
+ indenter_.count_ += adjustment;
+ }
+
+ void DecreaseIndentation(size_t adjustment) {
+ DCHECK_GE(indenter_.count_, adjustment);
+ indenter_.count_ -= adjustment;
+ }
+
+ private:
+ Indenter indenter_;
+ std::ostream indented_os_;
+
+ DISALLOW_COPY_AND_ASSIGN(VariableIndentationOutputStream);
+};
+
+class ScopedIndentation {
+ public:
+ explicit ScopedIndentation(VariableIndentationOutputStream* vios,
+ size_t adjustment = kIndentBy1Count)
+ : vios_(vios),
+ adjustment_(adjustment) {
+ vios_->IncreaseIndentation(adjustment_);
+ }
+
+ ~ScopedIndentation() {
+ vios_->DecreaseIndentation(adjustment_);
+ }
+
+ private:
+ VariableIndentationOutputStream* const vios_;
+ const size_t adjustment_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedIndentation);
+};
+
+} // namespace art
+
#endif // ART_RUNTIME_INDENTER_H_
diff --git a/runtime/indenter_test.cc b/runtime/indenter_test.cc
index 1919e3d..1a26d7b 100644
--- a/runtime/indenter_test.cc
+++ b/runtime/indenter_test.cc
@@ -17,6 +17,8 @@
#include "gtest/gtest.h"
#include "indenter.h"
+namespace art {
+
TEST(IndenterTest, MultiLineTest) {
std::ostringstream output;
Indenter indent_filter(output.rdbuf(), '\t', 2);
@@ -33,3 +35,5 @@
input << "\n";
EXPECT_EQ(output.str(), "\t\thello\n\t\thello again\n");
}
+
+} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 7f89b1d..fc27315 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -832,11 +832,22 @@
f->VisitRoots(visitor);
}
}
- for (auto& m : GetDirectMethods(pointer_size)) {
- m.VisitRoots(visitor);
+ // We may see GetDirectMethodsPtr() == null with NumDirectMethods() != 0 if the root marking
+ // thread reads a null DirectMethodsBegin() but a non null DirectMethodsBegin() due to a race
+ // SetDirectMethodsPtr from class linking. Same for virtual methods.
+ // In this case, it is safe to avoid marking the roots since we must be either the CC or CMS. If
+ // we are CMS then the roots are already marked through other sources, otherwise the roots are
+ // already marked due to the to-space invariant.
+ // Unchecked versions since we may visit roots of classes that aren't yet loaded.
+ if (GetDirectMethodsPtrUnchecked() != nullptr) {
+ for (auto& m : GetDirectMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
}
- for (auto& m : GetVirtualMethods(pointer_size)) {
- m.VisitRoots(visitor);
+ if (GetVirtualMethodsPtrUnchecked() != nullptr) {
+ for (auto& m : GetVirtualMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f0b7bfd..5bd6583 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -471,7 +471,8 @@
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
- if (name == method.GetName() && method.GetSignature() == signature) {
+ ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+ if (name == np_method->GetName() && np_method->GetSignature() == signature) {
return &method;
}
}
@@ -481,7 +482,8 @@
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
size_t pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
- if (name == method.GetName() && signature == method.GetSignature()) {
+ ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+ if (name == np_method->GetName() && signature == np_method->GetSignature()) {
return &method;
}
}
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 1078492..8febb62 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -257,21 +257,45 @@
static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass,
jboolean countAssignable) {
ScopedObjectAccess soa(env);
- gc::Heap* heap = Runtime::Current()->GetHeap();
- // We only want reachable instances, so do a GC. Heap::VisitObjects visits all of the heap
- // objects in the all spaces and the allocation stack.
- heap->CollectGarbage(false);
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // Caller's responsibility to do GC if desired.
mirror::Class* c = soa.Decode<mirror::Class*>(javaClass);
if (c == nullptr) {
return 0;
}
- std::vector<mirror::Class*> classes;
- classes.push_back(c);
+ std::vector<mirror::Class*> classes {c};
uint64_t count = 0;
heap->CountInstances(classes, countAssignable, &count);
return count;
}
+static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectArray javaClasses,
+ jboolean countAssignable) {
+ ScopedObjectAccess soa(env);
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // Caller's responsibility to do GC if desired.
+ auto* decoded_classes = soa.Decode<mirror::ObjectArray<mirror::Class>*>(javaClasses);
+ if (decoded_classes == nullptr) {
+ return nullptr;
+ }
+ std::vector<mirror::Class*> classes;
+ for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) {
+ classes.push_back(decoded_classes->Get(i));
+ }
+ std::vector<uint64_t> counts(classes.size(), 0u);
+ // Heap::CountInstances can handle null and will put 0 for these classes.
+ heap->CountInstances(classes, countAssignable, &counts[0]);
+ auto* long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size());
+ if (long_counts == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ for (size_t i = 0; i < counts.size(); ++i) {
+ long_counts->Set(i, counts[i]);
+ }
+ return soa.AddLocalReference<jlongArray>(long_counts);
+}
+
// We export the VM internal per-heap-space size/alloc/free metrics
// for the zygote space, alloc space (application heap), and the large
// object space for dumpsys meminfo. The other memory region data such
@@ -452,6 +476,7 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
+ NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"),
NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"),
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index b28adf9..df0cf45 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -698,18 +698,13 @@
return false;
}
- ClassLinker* linker = runtime->GetClassLinker();
- CHECK(linker != nullptr) << "ClassLinker is not created yet";
- const OatFile* primary_oat_file = linker->GetPrimaryOatFile();
- const bool debuggable = primary_oat_file != nullptr && primary_oat_file->IsDebuggable();
-
std::vector<std::string> argv;
argv.push_back(runtime->GetCompilerExecutable());
argv.push_back("--runtime-arg");
argv.push_back("-classpath");
argv.push_back("--runtime-arg");
argv.push_back(runtime->GetClassPathString());
- if (debuggable) {
+ if (runtime->IsDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 8e99dbb..df34ce7 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -17,13 +17,6 @@
#ifndef ART_RUNTIME_OBJECT_CALLBACKS_H_
#define ART_RUNTIME_OBJECT_CALLBACKS_H_
-// For ostream.
-#include <ostream>
-// For uint32_t.
-#include <stdint.h>
-// For size_t.
-#include <stdlib.h>
-
#include "base/macros.h"
namespace art {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a2a745e..1f954ca 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -680,6 +680,11 @@
return IsShuttingDownLocked();
}
+bool Runtime::IsDebuggable() const {
+ const OatFile* oat_file = GetClassLinker()->GetPrimaryOatFile();
+ return oat_file != nullptr && oat_file->IsDebuggable();
+}
+
void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads entering";
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 13cccc0..f2f3c97 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -553,6 +553,8 @@
return method_ref_string_init_reg_map_;
}
+ bool IsDebuggable() const;
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 741cd90..962132b 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -95,40 +95,37 @@
DexRegisterLocation location,
const std::string& prefix = "v",
const std::string& suffix = "") {
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
- indented_os << prefix << dex_register_num << ": "
- << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind())
- << " (" << location.GetValue() << ")" << suffix << '\n';
+ os << prefix << dex_register_num << ": "
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind())
+ << " (" << location.GetValue() << ")" << suffix << '\n';
}
-void CodeInfo::Dump(std::ostream& os,
+void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps) const {
StackMapEncoding encoding = ExtractEncoding();
uint32_t code_info_size = GetOverallSize();
size_t number_of_stack_maps = GetNumberOfStackMaps();
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
- indented_os << "Optimized CodeInfo (size=" << code_info_size
- << ", number_of_dex_registers=" << number_of_dex_registers
- << ", number_of_stack_maps=" << number_of_stack_maps
- << ", has_inline_info=" << encoding.HasInlineInfo()
- << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo()
- << ", number_of_bytes_for_dex_register_map="
- << encoding.NumberOfBytesForDexRegisterMap()
- << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc()
- << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc()
- << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask()
- << ")\n";
+ vios->Stream()
+ << "Optimized CodeInfo (size=" << code_info_size
+ << ", number_of_dex_registers=" << number_of_dex_registers
+ << ", number_of_stack_maps=" << number_of_stack_maps
+ << ", has_inline_info=" << encoding.HasInlineInfo()
+ << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo()
+ << ", number_of_bytes_for_dex_register_map=" << encoding.NumberOfBytesForDexRegisterMap()
+ << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc()
+ << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc()
+ << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask()
+ << ")\n";
+ ScopedIndentation indent1(vios);
// Display the Dex register location catalog.
- GetDexRegisterLocationCatalog(encoding).Dump(indented_os, *this);
+ GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
// Display stack maps along with (live) Dex register maps.
if (dump_stack_maps) {
for (size_t i = 0; i < number_of_stack_maps; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- stack_map.Dump(indented_os,
+ stack_map.Dump(vios,
*this,
encoding,
code_offset,
@@ -140,30 +137,28 @@
// we need to know the number of dex registers for each inlined method.
}
-void DexRegisterLocationCatalog::Dump(std::ostream& os, const CodeInfo& code_info) {
+void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
+ const CodeInfo& code_info) {
StackMapEncoding encoding = code_info.ExtractEncoding();
size_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
- indented_os
+ vios->Stream()
<< "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
<< ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
for (size_t i = 0; i < number_of_location_catalog_entries; ++i) {
DexRegisterLocation location = GetDexRegisterLocation(i);
- DumpRegisterMapping(indented_os, i, location, "entry ");
+ ScopedIndentation indent1(vios);
+ DumpRegisterMapping(vios->Stream(), i, location, "entry ");
}
}
-void DexRegisterMap::Dump(std::ostream& os,
+void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
StackMapEncoding encoding = code_info.ExtractEncoding();
size_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
// TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
if (IsDexRegisterLive(j)) {
@@ -173,70 +168,70 @@
number_of_dex_registers,
code_info,
encoding);
+ ScopedIndentation indent1(vios);
DumpRegisterMapping(
- indented_os, j, location, "v",
+ vios->Stream(), j, location, "v",
"\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]");
}
}
}
-void StackMap::Dump(std::ostream& os,
+void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const StackMapEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
const std::string& header_suffix) const {
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
- indented_os << "StackMap" << header_suffix
- << std::hex
- << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]"
- << " (dex_pc=0x" << GetDexPc(encoding)
- << ", native_pc_offset=0x" << GetNativePcOffset(encoding)
- << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding)
- << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding)
- << ", register_mask=0x" << GetRegisterMask(encoding)
- << std::dec
- << ", stack_mask=0b";
+ vios->Stream()
+ << "StackMap" << header_suffix
+ << std::hex
+ << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]"
+ << " (dex_pc=0x" << GetDexPc(encoding)
+ << ", native_pc_offset=0x" << GetNativePcOffset(encoding)
+ << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding)
+ << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding)
+ << ", register_mask=0x" << GetRegisterMask(encoding)
+ << std::dec
+ << ", stack_mask=0b";
MemoryRegion stack_mask = GetStackMask(encoding);
for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
- indented_os << stack_mask.LoadBit(e - i - 1);
+ vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
- indented_os << ")\n";
+ vios->Stream() << ")\n";
if (HasDexRegisterMap(encoding)) {
DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
*this, encoding, number_of_dex_registers);
- dex_register_map.Dump(os, code_info, number_of_dex_registers);
+ dex_register_map.Dump(vios, code_info, number_of_dex_registers);
}
if (HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
// We do not know the length of the dex register maps of inlined frames
// at this level, so we just pass null to `InlineInfo::Dump` to tell
// it not to look at these maps.
- inline_info.Dump(os, code_info, nullptr);
+ inline_info.Dump(vios, code_info, nullptr);
}
}
-void InlineInfo::Dump(std::ostream& os,
+void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers[]) const {
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indented_os(&indent_filter);
- indented_os << "InlineInfo with depth " << static_cast<uint32_t>(GetDepth()) << "\n";
+ vios->Stream() << "InlineInfo with depth " << static_cast<uint32_t>(GetDepth()) << "\n";
for (size_t i = 0; i < GetDepth(); ++i) {
- indented_os << " At depth " << i
- << std::hex
- << " (dex_pc=0x" << GetDexPcAtDepth(i)
- << std::dec
- << ", method_index=" << GetMethodIndexAtDepth(i)
- << ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(i))
- << ")\n";
+ vios->Stream()
+ << " At depth " << i
+ << std::hex
+ << " (dex_pc=0x" << GetDexPcAtDepth(i)
+ << std::dec
+ << ", method_index=" << GetMethodIndexAtDepth(i)
+ << ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(i))
+ << ")\n";
if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
StackMapEncoding encoding = code_info.ExtractEncoding();
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
- dex_register_map.Dump(indented_os, code_info, number_of_dex_registers[i]);
+ ScopedIndentation indent1(vios);
+ dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]);
}
}
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 4e42008..e8769f9 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,8 @@
namespace art {
+class VariableIndentationOutputStream;
+
// Size of a frame slot, in bytes. This constant is a signed value,
// to please the compiler in arithmetic operations involving int32_t
// (signed) values.
@@ -357,7 +359,7 @@
return region_.size();
}
- void Dump(std::ostream& os, const CodeInfo& code_info);
+ void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info);
// Special (invalid) Dex register location catalog entry index meaning
// that there is no location for a given Dex register (i.e., it is
@@ -610,7 +612,8 @@
return region_.size();
}
- void Dump(std::ostream& o, const CodeInfo& code_info, uint16_t number_of_dex_registers) const;
+ void Dump(VariableIndentationOutputStream* vios,
+ const CodeInfo& code_info, uint16_t number_of_dex_registers) const;
private:
// Return the index in the Dex register map corresponding to the Dex
@@ -837,7 +840,7 @@
&& region_.size() == other.region_.size();
}
- void Dump(std::ostream& os,
+ void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const StackMapEncoding& encoding,
uint32_t code_offset,
@@ -931,7 +934,8 @@
return kFixedEntrySize;
}
- void Dump(std::ostream& os, const CodeInfo& info, uint16_t* number_of_dex_registers) const;
+ void Dump(VariableIndentationOutputStream* vios,
+ const CodeInfo& info, uint16_t* number_of_dex_registers) const;
private:
// TODO: Instead of plain types such as "uint8_t", introduce
@@ -1120,7 +1124,7 @@
// number of Dex virtual registers used in this method. If
// `dump_stack_maps` is true, also dump the stack maps and the
// associated Dex register maps.
- void Dump(std::ostream& os,
+ void Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps) const;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index f7ef894..5f965f1 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -25,6 +25,7 @@
#include "base/mutex-inl.h"
#include "gc/heap.h"
#include "jni_env_ext.h"
+#include "thread_pool.h"
namespace art {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7ee0ff1..e0d65ad 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -388,6 +388,24 @@
void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
CHECK(java_peer != nullptr);
Thread* self = static_cast<JNIEnvExt*>(env)->self;
+
+ if (VLOG_IS_ON(threads)) {
+ ScopedObjectAccess soa(env);
+
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
+ soa.Decode<mirror::Object*>(java_peer)));
+ std::string thread_name;
+ if (java_name != nullptr) {
+ thread_name = java_name->ToModifiedUtf8();
+ } else {
+ thread_name = "(Unnamed)";
+ }
+
+ VLOG(threads) << "Creating native thread for " << thread_name;
+ self->Dump(LOG(INFO));
+ }
+
Runtime* runtime = Runtime::Current();
// Atomically start the birth of the thread ensuring the runtime isn't shutting down.
@@ -556,6 +574,16 @@
}
}
+ if (VLOG_IS_ON(threads)) {
+ if (thread_name != nullptr) {
+ VLOG(threads) << "Attaching thread " << thread_name;
+ } else {
+ VLOG(threads) << "Attaching unnamed thread.";
+ }
+ ScopedObjectAccess soa(self);
+ self->Dump(LOG(INFO));
+ }
+
{
ScopedObjectAccess soa(self);
Dbg::PostThreadStart(self);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 47e5b52..9d907eb 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -28,7 +28,6 @@
#include <sstream>
#include "base/histogram-inl.h"
-#include "base/mutex.h"
#include "base/mutex-inl.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 1e84c9d..d8f80fa 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -201,112 +201,4 @@
return tasks_.size();
}
-WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
- size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {}
-
-void WorkStealingWorker::Run() {
- Thread* self = Thread::Current();
- Task* task = nullptr;
- WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_);
- while ((task = thread_pool_->GetTask(self)) != nullptr) {
- WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task);
-
- {
- CHECK(task_ == nullptr);
- MutexLock mu(self, thread_pool->work_steal_lock_);
- // Register that we are running the task
- ++stealing_task->ref_count_;
- task_ = stealing_task;
- }
- stealing_task->Run(self);
- // Mark ourselves as not running a task so that nobody tries to steal from us.
- // There is a race condition that someone starts stealing from us at this point. This is okay
- // due to the reference counting.
- task_ = nullptr;
-
- bool finalize;
-
- // Steal work from tasks until there is none left to steal. Note: There is a race, but
- // all that happens when the race occurs is that we steal some work instead of processing a
- // task from the queue.
- while (thread_pool->GetTaskCount(self) == 0) {
- WorkStealingTask* steal_from_task = nullptr;
-
- {
- MutexLock mu(self, thread_pool->work_steal_lock_);
- // Try finding a task to steal from.
- steal_from_task = thread_pool->FindTaskToStealFrom();
- if (steal_from_task != nullptr) {
- CHECK_NE(stealing_task, steal_from_task)
- << "Attempting to steal from completed self task";
- steal_from_task->ref_count_++;
- } else {
- break;
- }
- }
-
- if (steal_from_task != nullptr) {
- // Task which completed earlier is going to steal some work.
- stealing_task->StealFrom(self, steal_from_task);
-
- {
- // We are done stealing from the task, lets decrement its reference count.
- MutexLock mu(self, thread_pool->work_steal_lock_);
- finalize = !--steal_from_task->ref_count_;
- }
-
- if (finalize) {
- steal_from_task->Finalize();
- }
- }
- }
-
- {
- MutexLock mu(self, thread_pool->work_steal_lock_);
- // If nobody is still referencing task_ we can finalize it.
- finalize = !--stealing_task->ref_count_;
- }
-
- if (finalize) {
- stealing_task->Finalize();
- }
- }
-}
-
-WorkStealingWorker::~WorkStealingWorker() {}
-
-WorkStealingThreadPool::WorkStealingThreadPool(const char* name, size_t num_threads)
- : ThreadPool(name, 0),
- work_steal_lock_("work stealing lock"),
- steal_index_(0) {
- while (GetThreadCount() < num_threads) {
- const std::string worker_name = StringPrintf("Work stealing worker %zu", GetThreadCount());
- threads_.push_back(new WorkStealingWorker(this, worker_name,
- ThreadPoolWorker::kDefaultStackSize));
- }
-}
-
-WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() {
- const size_t thread_count = GetThreadCount();
- for (size_t i = 0; i < thread_count; ++i) {
- // TODO: Use CAS instead of lock.
- ++steal_index_;
- if (steal_index_ >= thread_count) {
- steal_index_-= thread_count;
- }
-
- WorkStealingWorker* worker = down_cast<WorkStealingWorker*>(threads_[steal_index_]);
- WorkStealingTask* task = worker->task_;
- if (task) {
- // Not null, we can probably steal from this worker.
- return task;
- }
- }
- // Couldn't find something to steal.
- return nullptr;
-}
-
-WorkStealingThreadPool::~WorkStealingThreadPool() {}
-
} // namespace art
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 0557708..88700e6 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -144,58 +144,6 @@
DISALLOW_COPY_AND_ASSIGN(ThreadPool);
};
-class WorkStealingTask : public Task {
- public:
- WorkStealingTask() : ref_count_(0) {}
-
- size_t GetRefCount() const {
- return ref_count_;
- }
-
- virtual void StealFrom(Thread* self, WorkStealingTask* source) = 0;
-
- private:
- // How many people are referencing this task.
- size_t ref_count_;
-
- friend class WorkStealingWorker;
-};
-
-class WorkStealingWorker : public ThreadPoolWorker {
- public:
- virtual ~WorkStealingWorker();
-
- bool IsRunningTask() const {
- return task_ != nullptr;
- }
-
- protected:
- WorkStealingTask* task_;
-
- WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
- virtual void Run();
-
- private:
- friend class WorkStealingThreadPool;
- DISALLOW_COPY_AND_ASSIGN(WorkStealingWorker);
-};
-
-class WorkStealingThreadPool : public ThreadPool {
- public:
- explicit WorkStealingThreadPool(const char* name, size_t num_threads);
- virtual ~WorkStealingThreadPool();
-
- private:
- Mutex work_steal_lock_;
- // Which thread we are stealing from (round robin).
- size_t steal_index_;
-
- // Find a task to steal from
- WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
-
- friend class WorkStealingWorker;
-};
-
} // namespace art
#endif // ART_RUNTIME_THREAD_POOL_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 09db7cd..11c3e65 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -349,27 +349,29 @@
return result;
}
-MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t dex_method_idx,
- const DexFile* dex_file,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
- const DexFile::CodeItem* code_item,
- ArtMethod* method,
- uint32_t method_access_flags) {
+MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self,
+ VariableIndentationOutputStream* vios,
+ uint32_t dex_method_idx,
+ const DexFile* dex_file,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile::ClassDef* class_def,
+ const DexFile::CodeItem* code_item,
+ ArtMethod* method,
+ uint32_t method_access_flags) {
MethodVerifier* verifier = new MethodVerifier(self, dex_file, dex_cache, class_loader,
class_def, code_item, dex_method_idx, method,
method_access_flags, true, true, true, true);
verifier->Verify();
- verifier->DumpFailures(os);
- os << verifier->info_messages_.str();
+ verifier->DumpFailures(vios->Stream());
+ vios->Stream() << verifier->info_messages_.str();
// Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
// and querying any info is dangerous/can abort.
if (verifier->have_pending_hard_failure_) {
delete verifier;
return nullptr;
} else {
- verifier->Dump(os);
+ verifier->Dump(vios);
return verifier;
}
}
@@ -1280,32 +1282,36 @@
}
void MethodVerifier::Dump(std::ostream& os) {
+ VariableIndentationOutputStream vios(&os);
+ Dump(&vios);
+}
+
+void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
if (code_item_ == nullptr) {
- os << "Native method\n";
+ vios->Stream() << "Native method\n";
return;
}
{
- os << "Register Types:\n";
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
- reg_types_.Dump(indent_os);
+ vios->Stream() << "Register Types:\n";
+ ScopedIndentation indent1(vios);
+ reg_types_.Dump(vios->Stream());
}
- os << "Dumping instructions and register lines:\n";
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
+ vios->Stream() << "Dumping instructions and register lines:\n";
+ ScopedIndentation indent1(vios);
const Instruction* inst = Instruction::At(code_item_->insns_);
for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
dex_pc += inst->SizeInCodeUnits()) {
RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
if (reg_line != nullptr) {
- indent_os << reg_line->Dump(this) << "\n";
+ vios->Stream() << reg_line->Dump(this) << "\n";
}
- indent_os << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+ vios->Stream()
+ << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
const bool kDumpHexOfInstruction = false;
if (kDumpHexOfInstruction) {
- indent_os << inst->DumpHex(5) << " ";
+ vios->Stream() << inst->DumpHex(5) << " ";
}
- indent_os << inst->DumpString(dex_file_) << "\n";
+ vios->Stream() << inst->DumpString(dex_file_) << "\n";
inst = inst->Next();
}
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 2550694..d933448 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -32,6 +32,7 @@
class Instruction;
struct ReferenceMap2Visitor;
class Thread;
+class VariableIndentationOutputStream;
namespace verifier {
@@ -157,7 +158,9 @@
bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static MethodVerifier* VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t method_idx,
+ static MethodVerifier* VerifyMethodAndDump(Thread* self,
+ VariableIndentationOutputStream* vios,
+ uint32_t method_idx,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -191,6 +194,7 @@
// Dump the state of the verifier, namely each instruction, what flags are set on it, register
// information
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(VariableIndentationOutputStream* vios) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index c8aa4fd..1435607 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -585,7 +585,15 @@
DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
// Perform pointer equality tests for conflict to avoid virtual method dispatch.
const ConflictType& conflict = reg_types->Conflict();
- if (this == &conflict) {
+ if (IsUndefined() || incoming_type.IsUndefined()) {
+ // There is a difference between undefined and conflict. Conflicts may be copied around, but
+ // not used. Undefined registers must not be copied. So any merge with undefined should return
+ // undefined.
+ if (IsUndefined()) {
+ return *this;
+ }
+ return incoming_type;
+ } else if (this == &conflict) {
DCHECK(IsConflict());
return *this; // Conflict MERGE * => Conflict
} else if (&incoming_type == &conflict) {
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 244deed..9cd2bdf 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -38,10 +38,9 @@
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
<< new_type << "'";
return false;
- } else if (new_type.IsConflict()) { // should only be set during a merge
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
- return false;
} else {
+ // Note: previously we failed when asked to set a conflict. However, conflicts are OK as long
+ // as they are not accessed, and our backends can handle this nowadays.
line_[vdst] = new_type.GetId();
}
// Clear the monitor entry bits for this register.
@@ -93,8 +92,9 @@
if (!SetRegisterType(verifier, vdst, type)) {
return;
}
- if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
- (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
+ if (!type.IsConflict() && // Allow conflicts to be copied around.
+ ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
+ (cat == kTypeCategoryRef && !type.IsReferenceTypes()))) {
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
<< " cat=" << static_cast<int>(cat);
} else if (cat == kTypeCategoryRef) {
diff --git a/test/003-omnibus-opcodes/build b/test/003-omnibus-opcodes/build
index f909fb2..faa2983 100644
--- a/test/003-omnibus-opcodes/build
+++ b/test/003-omnibus-opcodes/build
@@ -22,5 +22,10 @@
rm classes/UnresClass.class
${JAVAC} -d classes `find src2 -name '*.java'`
-${DX} -JXmx256m --debug --dex --output=classes.dex classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ ${DX} -JXmx256m --debug --dex --output=classes.dex classes
+ fi
zip $TEST_NAME.jar classes.dex
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index ca256ec..db0dd32 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -626,3 +626,7 @@
assert(strcmp(test_array, chars6) == 0);
env->ReleaseStringUTFChars(s6, chars6);
}
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_testGetMethodID(JNIEnv* env, jclass, jclass c) {
+ return reinterpret_cast<jlong>(env->GetMethodID(c, "a", "()V"));
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index ac20417..810dda0 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -14,7 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
public class Main {
public static void main(String[] args) {
@@ -35,6 +37,7 @@
testCallNonvirtual();
testNewStringObject();
testRemoveLocalObject();
+ testProxyGetMethodID();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -194,6 +197,31 @@
private static native void testCallNonvirtual();
private static native void testNewStringObject();
+
+ private interface SimpleInterface {
+ void a();
+ }
+
+ private static class DummyInvocationHandler implements InvocationHandler {
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return null;
+ }
+ }
+
+ private static void testProxyGetMethodID() {
+ InvocationHandler handler = new DummyInvocationHandler();
+ SimpleInterface proxy =
+ (SimpleInterface) Proxy.newProxyInstance(SimpleInterface.class.getClassLoader(),
+ new Class[] {SimpleInterface.class}, handler);
+ if (testGetMethodID(SimpleInterface.class) == 0) {
+ throw new AssertionError();
+ }
+ if (testGetMethodID(proxy.getClass()) == 0) {
+ throw new AssertionError();
+ }
+ }
+
+ private static native long testGetMethodID(Class<?> c);
}
class JniCallNonvirtualTest {
diff --git a/test/004-ReferenceMap/build b/test/004-ReferenceMap/build
new file mode 100644
index 0000000..08987b5
--- /dev/null
+++ b/test/004-ReferenceMap/build
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# The test relies on DEX file produced by javac+dx so keep building with them for now
+# (see b/19467889)
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
+ --dump-width=1000 ${DX_FLAGS} classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build
new file mode 100644
index 0000000..08987b5
--- /dev/null
+++ b/test/004-StackWalk/build
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# The test relies on DEX file produced by javac+dx so keep building with them for now
+# (see b/19467889)
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
+ --dump-width=1000 ${DX_FLAGS} classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/005-annotations/build b/test/005-annotations/build
index 2474055..3f00a1a 100644
--- a/test/005-annotations/build
+++ b/test/005-annotations/build
@@ -25,4 +25,12 @@
# ...but not at run time.
rm 'classes/android/test/anno/MissingAnnotation.class'
rm 'classes/android/test/anno/ClassWithInnerAnnotationClass$MissingInnerAnnotationClass.class'
-${DX} -JXmx256m --debug --dex --output=$TEST_NAME.jar classes
+
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ ${DX} -JXmx256m --debug --dex --output=classes.dex classes
+fi
+
+zip $TEST_NAME.jar classes.dex
diff --git a/test/022-interface/build b/test/022-interface/build
index c86b1dc..3f8915c 100644
--- a/test/022-interface/build
+++ b/test/022-interface/build
@@ -19,5 +19,11 @@
# Use classes that are compiled with ecj that exposes an invokeinterface
# issue when interfaces override methods in Object
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+fi
+
zip $TEST_NAME.jar classes.dex
diff --git a/test/023-many-interfaces/build b/test/023-many-interfaces/build
index ad42a2d..3bb6747 100644
--- a/test/023-many-interfaces/build
+++ b/test/023-many-interfaces/build
@@ -21,8 +21,14 @@
gcc -Wall -Werror -o iface-gen iface-gen.c
./iface-gen
-mkdir classes
-${JAVAC} -d classes src/*.java
+if [ ${USE_JACK} = "true" ]; then
+ # Use the default Jack commands
+ ./default-build
+else
+ mkdir classes
+ ${JAVAC} -d classes src/*.java
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
-zip $TEST_NAME.jar classes.dex
+ # dx needs more memory for that test so do not pass Xmx option here.
+ ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+ zip $TEST_NAME.jar classes.dex
+fi
diff --git a/test/036-finalizer/src/Main.java b/test/036-finalizer/src/Main.java
index 8c7c27d..0de56f9 100644
--- a/test/036-finalizer/src/Main.java
+++ b/test/036-finalizer/src/Main.java
@@ -68,14 +68,17 @@
return s[0];
}
+ private static void printWeakReference(WeakReference<FinalizerTest> wimp) {
+ // Reference ft so we are sure the WeakReference cannot be cleared.
+ FinalizerTest keepLive = wimp.get();
+ System.out.println("wimp: " + wimpString(wimp));
+ }
+
public static void main(String[] args) {
WeakReference<FinalizerTest> wimp = makeRef();
- FinalizerTest keepLive = wimp.get();
-
- System.out.println("wimp: " + wimpString(wimp));
+ printWeakReference(wimp);
/* this will try to collect and finalize ft */
- keepLive = null;
System.out.println("gc");
Runtime.getRuntime().gc();
diff --git a/test/056-const-string-jumbo/build b/test/056-const-string-jumbo/build
index ef286d1..ae42519 100644
--- a/test/056-const-string-jumbo/build
+++ b/test/056-const-string-jumbo/build
@@ -39,8 +39,13 @@
printf("}\n") > fileName;
}'
-mkdir classes
-${JAVAC} -d classes src/*.java
+if [ ${USE_JACK} = "true" ]; then
+ ${JACK} --output-dex . src
+else
+ mkdir classes
+ ${JAVAC} -d classes src/*.java
-${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes
+ ${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes
+fi
+
zip $TEST_NAME.jar classes.dex
diff --git a/test/074-gc-thrash/src/Main.java b/test/074-gc-thrash/src/Main.java
index 238e73a..f947d0b 100644
--- a/test/074-gc-thrash/src/Main.java
+++ b/test/074-gc-thrash/src/Main.java
@@ -218,17 +218,7 @@
return;
}
- /*
- * Check the results of the last trip through. Everything in
- * "weak" should be matched in "strong", and the two should be
- * equivalent (object-wise, not just string-equality-wise).
- */
- for (int i = 0; i < MAX_DEPTH; i++) {
- if (strong[i] != weak[i].get()) {
- System.err.println("Deep: " + i + " strong=" + strong[i] +
- ", weak=" + weak[i].get());
- }
- }
+ checkStringReferences();
/*
* Wipe "strong", do a GC, see if "weak" got collected.
@@ -248,6 +238,26 @@
System.out.println("Deep: iters=" + iter / MAX_DEPTH);
}
+
+ /**
+ * Check the results of the last trip through. Everything in
+ * "weak" should be matched in "strong", and the two should be
+ * equivalent (object-wise, not just string-equality-wise).
+ *
+ * We do that check in a separate method to avoid retaining these
+ * String references in local DEX registers. In interpreter mode,
+ * they would retain these references until the end of the method
+ * or until they are updated to another value.
+ */
+ private static void checkStringReferences() {
+ for (int i = 0; i < MAX_DEPTH; i++) {
+ if (strong[i] != weak[i].get()) {
+ System.err.println("Deep: " + i + " strong=" + strong[i] +
+ ", weak=" + weak[i].get());
+ }
+ }
+ }
+
/**
* Recursively dive down, setting one or more local variables.
*
diff --git a/test/085-old-style-inner-class/build b/test/085-old-style-inner-class/build
index 963d6b3..6f50a76 100644
--- a/test/085-old-style-inner-class/build
+++ b/test/085-old-style-inner-class/build
@@ -22,7 +22,12 @@
mkdir classes
${JAVAC} -source 1.4 -target 1.4 -d classes `find src -name '*.java'`
-# Suppress stderr to keep the inner class warnings out of the expected output.
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes 2>/dev/null
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ # Suppress stderr to keep the inner class warnings out of the expected output.
+ ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes 2>/dev/null
+fi
zip $TEST_NAME.jar classes.dex
diff --git a/test/089-many-methods/build b/test/089-many-methods/build
index 7ede759..ff77c60 100644
--- a/test/089-many-methods/build
+++ b/test/089-many-methods/build
@@ -43,7 +43,8 @@
printf("}\n") > fileName;
}'
+# The test relies on the error message produced by dx, not jack, so keep building with dx for now
+# (b/19467889).
mkdir classes
${JAVAC} -d classes `find src -name '*.java'`
${DX} -JXmx1024m --dex --no-optimize classes
-
diff --git a/test/097-duplicate-method/build b/test/097-duplicate-method/build
index 6576779..a855873 100644
--- a/test/097-duplicate-method/build
+++ b/test/097-duplicate-method/build
@@ -18,8 +18,19 @@
set -e
mkdir classes
-${JAVAC} -d classes src/*.java
-${JASMIN} -d classes src/*.j
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JACK} --output-jack src.jack src
+
+ ${JASMIN} -d classes src/*.j
+ ${JILL} classes --output jasmin.jack
+
+ # We set jack.import.type.policy=keep-first to consider class definitions from jasmin first.
+ ${JACK} --import jasmin.jack --import src.jack -D jack.import.type.policy=keep-first --output-dex .
+else
+ ${JAVAC} -d classes src/*.java
+ ${JASMIN} -d classes src/*.j
+
+ ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+fi
zip $TEST_NAME.jar classes.dex
diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt
index 579f98f..b8d72f6 100644
--- a/test/099-vmdebug/expected.txt
+++ b/test/099-vmdebug/expected.txt
@@ -17,3 +17,9 @@
Got expected exception
Test sampling with bogus (<= 0) interval
Got expected exception
+Instances of ClassA 2
+Instances of ClassB 1
+Instances of null 0
+Instances of ClassA assignable 3
+Array counts [2, 1, 0]
+Array counts assignable [3, 1, 0]
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index add2ff6..1be5765 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,8 @@
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.ArrayList;
import java.util.Map;
public class Main {
@@ -30,7 +32,9 @@
return;
}
testMethodTracing();
+ testCountInstances();
testRuntimeStat();
+ testRuntimeStats();
}
private static File createTempFile() throws Exception {
@@ -220,12 +224,39 @@
checkHistogram(blocking_gc_count_rate_histogram);
}
+ static class ClassA { }
+ static class ClassB { }
+ static class ClassC extends ClassA { }
+
+ private static void testCountInstances() throws Exception {
+ ArrayList<Object> l = new ArrayList<Object>();
+ l.add(new ClassA());
+ l.add(new ClassB());
+ l.add(new ClassA());
+ l.add(new ClassC());
+ Runtime.getRuntime().gc();
+ System.out.println("Instances of ClassA " +
+ VMDebug.countInstancesofClass(ClassA.class, false));
+ System.out.println("Instances of ClassB " +
+ VMDebug.countInstancesofClass(ClassB.class, false));
+ System.out.println("Instances of null " + VMDebug.countInstancesofClass(null, false));
+ System.out.println("Instances of ClassA assignable " +
+ VMDebug.countInstancesofClass(ClassA.class, true));
+ Class[] classes = new Class[]{ClassA.class, ClassB.class, null};
+ long[] counts = VMDebug.countInstancesofClasses(classes, false);
+ System.out.println("Array counts " + Arrays.toString(counts));
+ counts = VMDebug.countInstancesofClasses(classes, true);
+ System.out.println("Array counts assignable " + Arrays.toString(counts));
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
private static final Method getMethodTracingModeMethod;
private static final Method getRuntimeStatMethod;
private static final Method getRuntimeStatsMethod;
+ private static final Method countInstancesOfClassMethod;
+ private static final Method countInstancesOfClassesMethod;
static {
try {
Class c = Class.forName("dalvik.system.VMDebug");
@@ -235,6 +266,10 @@
getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
+ countInstancesOfClassMethod = c.getDeclaredMethod("countInstancesOfClass",
+ Class.class, Boolean.TYPE);
+ countInstancesOfClassesMethod = c.getDeclaredMethod("countInstancesOfClasses",
+ Class[].class, Boolean.TYPE);
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -257,5 +292,13 @@
public static Map<String, String> getRuntimeStats() throws Exception {
return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
}
+ public static long countInstancesofClass(Class c, boolean assignable) throws Exception {
+ return (long) countInstancesOfClassMethod.invoke(null, new Object[]{c, assignable});
+ }
+ public static long[] countInstancesofClasses(Class[] classes, boolean assignable)
+ throws Exception {
+ return (long[]) countInstancesOfClassesMethod.invoke(
+ null, new Object[]{classes, assignable});
+ }
}
}
diff --git a/test/111-unresolvable-exception/build b/test/111-unresolvable-exception/build
index c21a9ef..e772fb8 100644
--- a/test/111-unresolvable-exception/build
+++ b/test/111-unresolvable-exception/build
@@ -21,5 +21,10 @@
${JAVAC} -d classes `find src -name '*.java'`
rm classes/TestException.class
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+fi
zip $TEST_NAME.jar classes.dex
diff --git a/test/113-multidex/build b/test/113-multidex/build
index ec8706e..8ef5c0e 100644
--- a/test/113-multidex/build
+++ b/test/113-multidex/build
@@ -17,16 +17,32 @@
# Stop if something fails.
set -e
-mkdir classes
-
# All except Main
+mkdir classes
${JAVAC} -d classes `find src -name '*.java'`
rm classes/Main.class
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
# Only Main
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/Second.class classes/FillerA.class classes/FillerB.class classes/Inf*.class
-${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes
+mkdir classes2
+${JAVAC} -d classes2 `find src -name '*.java'`
+rm classes2/Second.class classes2/FillerA.class classes2/FillerB.class classes2/Inf*.class
+if [ ${USE_JACK} = "true" ]; then
+ # Create .jack files from classes generated with javac.
+ ${JILL} classes --output classes.jack
+ ${JILL} classes2 --output classes2.jack
+
+ # Create DEX files from .jack files.
+ ${JACK} --import classes.jack --output-dex .
+ mv classes.dex classes-1.dex
+ ${JACK} --import classes2.jack --output-dex .
+ mv classes.dex classes2.dex
+ mv classes-1.dex classes.dex
+else
+ # All except Main
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+
+ # Only Main
+ ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2
+fi
zip $TEST_NAME.jar classes.dex classes2.dex
diff --git a/test/114-ParallelGC/src/Main.java b/test/114-ParallelGC/src/Main.java
index 46029cf..159dd5c 100644
--- a/test/114-ParallelGC/src/Main.java
+++ b/test/114-ParallelGC/src/Main.java
@@ -53,20 +53,21 @@
}
// Allocate objects to definitely run GC before quitting.
- ArrayList<Object> l = new ArrayList<Object>();
- try {
- for (int i = 0; i < 100000; i++) {
- l.add(new ArrayList<Object>(i));
- }
- } catch (OutOfMemoryError oom) {
- }
- // Make the (outer) ArrayList unreachable. Note it may still
- // be reachable under an interpreter or a compiler without a
- // liveness analysis.
- l = null;
+ allocateObjectsToRunGc();
+
new ArrayList<Object>(50);
}
+ private static void allocateObjectsToRunGc() {
+ ArrayList<Object> l = new ArrayList<Object>();
+ try {
+ for (int i = 0; i < 100000; i++) {
+ l.add(new ArrayList<Object>(i));
+ }
+ } catch (OutOfMemoryError oom) {
+ }
+ }
+
private Main(CyclicBarrier startBarrier) {
this.startBarrier = startBarrier;
}
diff --git a/test/121-modifiers/build b/test/121-modifiers/build
index d73be86..85b69e9 100644
--- a/test/121-modifiers/build
+++ b/test/121-modifiers/build
@@ -30,5 +30,11 @@
# mv NonInf.out classes/NonInf.class
# mv Main.class A.class A\$B.class A\$C.class classes/
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ # Workaround b/19561685: disable sanity checks to produce a DEX file with invalid modifiers.
+ ${JACK} --sanity-checks off --import classes.jack --output-dex .
+else
+ ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+fi
zip $TEST_NAME.jar classes.dex
diff --git a/test/124-missing-classes/build b/test/124-missing-classes/build
index 62e57c8..b92ecf9 100644
--- a/test/124-missing-classes/build
+++ b/test/124-missing-classes/build
@@ -25,4 +25,11 @@
# ...but not at run time.
rm 'classes/MissingClass.class'
rm 'classes/Main$MissingInnerClass.class'
-${DX} -JXmx256m --debug --dex --output=$TEST_NAME.jar classes
+
+if [ ${USE_JACK} = "true" ]; then
+ ${JILL} classes --output classes.jack
+ ${JACK} --import classes.jack --output-dex .
+else
+ ${DX} -JXmx256m --debug --dex --output=classes.dex classes
+fi
+zip $TEST_NAME.jar classes.dex
diff --git a/test/126-miranda-multidex/build b/test/126-miranda-multidex/build
index 4c30f3f..b7f2118 100644
--- a/test/126-miranda-multidex/build
+++ b/test/126-miranda-multidex/build
@@ -17,16 +17,32 @@
# Stop if something fails.
set -e
+# All except MirandaInterface
mkdir classes
-
-# All except Main
${JAVAC} -d classes `find src -name '*.java'`
rm classes/MirandaInterface.class
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
-# Only Main
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/Main.class classes/MirandaAbstract.class classes/MirandaClass*.class classes/MirandaInterface2*.class
-${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes
+# Only MirandaInterface
+mkdir classes2
+${JAVAC} -d classes2 `find src -name '*.java'`
+rm classes2/Main.class classes2/MirandaAbstract.class classes2/MirandaClass*.class classes2/MirandaInterface2*.class
+if [ ${USE_JACK} = "true" ]; then
+ # Create .jack files from classes generated with javac.
+ ${JILL} classes --output classes.jack
+ ${JILL} classes2 --output classes2.jack
+
+ # Create DEX files from .jack files.
+ ${JACK} --import classes.jack --output-dex .
+ mv classes.dex classes-1.dex
+ ${JACK} --import classes2.jack --output-dex .
+ mv classes.dex classes2.dex
+ mv classes-1.dex classes.dex
+else
+ # All except Main
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+
+ # Only Main
+ ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2
+fi
zip $TEST_NAME.jar classes.dex classes2.dex
diff --git a/test/127-secondarydex/build b/test/127-secondarydex/build
index 712774f..0d9f4d6 100755
--- a/test/127-secondarydex/build
+++ b/test/127-secondarydex/build
@@ -23,9 +23,21 @@
mkdir classes-ex
mv classes/Super.class classes-ex
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+if [ ${USE_JACK} = "true" ]; then
+ # Create .jack files from classes generated with javac.
+ ${JILL} classes --output classes.jack
+ ${JILL} classes-ex --output classes-ex.jack
+
+ # Create DEX files from .jack files.
+ ${JACK} --import classes.jack --output-dex .
zip $TEST_NAME.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ ${JACK} --import classes-ex.jack --output-dex .
zip ${TEST_NAME}-ex.jar classes.dex
+else
+ if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ zip $TEST_NAME.jar classes.dex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ zip ${TEST_NAME}-ex.jar classes.dex
+ fi
fi
diff --git a/test/131-structural-change/build b/test/131-structural-change/build
index 7ddc81d..ff0da20 100755
--- a/test/131-structural-change/build
+++ b/test/131-structural-change/build
@@ -17,15 +17,23 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-
-mkdir classes-ex
-${JAVAC} -d classes-ex `find src-ex -name '*.java'`
-
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+if [ ${USE_JACK} = "true" ]; then
+ ${JACK} --output-dex . src
zip $TEST_NAME.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+
+ ${JACK} --output-dex . src-ex
zip ${TEST_NAME}-ex.jar classes.dex
+else
+ mkdir classes
+ ${JAVAC} -d classes `find src -name '*.java'`
+
+ mkdir classes-ex
+ ${JAVAC} -d classes-ex `find src-ex -name '*.java'`
+
+ if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ zip $TEST_NAME.jar classes.dex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ zip ${TEST_NAME}-ex.jar classes.dex
+ fi
fi
diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build
index 789d38e..5ff73ec 100644
--- a/test/303-verification-stress/build
+++ b/test/303-verification-stress/build
@@ -21,8 +21,14 @@
gcc -Wall -Werror -o classes-gen classes-gen.c
./classes-gen
-mkdir classes
-${JAVAC} -d classes src/*.java
+if [ ${USE_JACK} = "true" ]; then
+ # Use the default Jack commands
+ ./default-build
+else
+ mkdir classes
+ ${JAVAC} -d classes src/*.java
-${DX} --debug --dex --output=classes.dex classes
-zip $TEST_NAME.jar classes.dex
+ # dx needs more memory for that test so do not pass Xmx option here.
+ ${DX} --debug --dex --output=classes.dex classes
+ zip $TEST_NAME.jar classes.dex
+fi
diff --git a/test/454-get-vreg/build b/test/454-get-vreg/build
new file mode 100644
index 0000000..08987b5
--- /dev/null
+++ b/test/454-get-vreg/build
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# The test relies on DEX file produced by javac+dx so keep building with them for now
+# (see b/19467889)
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
+ --dump-width=1000 ${DX_FLAGS} classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/510-checker-try-catch/smali/Builder.smali b/test/510-checker-try-catch/smali/Builder.smali
index 87e4064..2274ba4 100644
--- a/test/510-checker-try-catch/smali/Builder.smali
+++ b/test/510-checker-try-catch/smali/Builder.smali
@@ -914,6 +914,14 @@
## CHECK: name "<<BReturn:B\d+>>"
## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: name "{{B\d+}}"
+## CHECK: Exit
+
+## CHECK: name "<<BTry2:B\d+>>"
+## CHECK: predecessors "<<BEnterTry2>>"
+## CHECK: successors "<<BExitTry2>>"
+## CHECK: Div
+
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "B0"
## CHECK: successors "<<BTry1>>"
@@ -926,11 +934,6 @@
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
-## CHECK: name "<<BTry2:B\d+>>"
-## CHECK: predecessors "<<BEnterTry2>>"
-## CHECK: successors "<<BExitTry2>>"
-## CHECK: Div
-
## CHECK: name "<<BEnterTry2>>"
## CHECK: predecessors "<<BCatch>>"
## CHECK: successors "<<BTry2>>"
@@ -987,6 +990,11 @@
## CHECK: successors "<<BExitTry1>>"
## CHECK: Div
+## CHECK: name "<<BTry2:B\d+>>"
+## CHECK: predecessors "<<BEnterTry2>>"
+## CHECK: successors "<<BExitTry2>>"
+## CHECK: Div
+
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "<<BCatch1>>"
## CHECK: successors "<<BTry1>>"
@@ -999,11 +1007,6 @@
## CHECK: xhandlers "<<BCatch2>>"
## CHECK: TryBoundary kind:exit
-## CHECK: name "<<BTry2:B\d+>>"
-## CHECK: predecessors "<<BEnterTry2>>"
-## CHECK: successors "<<BExitTry2>>"
-## CHECK: Div
-
## CHECK: name "<<BEnterTry2>>"
## CHECK: predecessors "<<BCatch2>>"
## CHECK: successors "<<BTry2>>"
diff --git a/test/522-checker-regression-monitor-exit/expected.txt b/test/522-checker-regression-monitor-exit/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/522-checker-regression-monitor-exit/expected.txt
diff --git a/test/522-checker-regression-monitor-exit/info.txt b/test/522-checker-regression-monitor-exit/info.txt
new file mode 100644
index 0000000..7cfc963
--- /dev/null
+++ b/test/522-checker-regression-monitor-exit/info.txt
@@ -0,0 +1,3 @@
+Regression test for removal of monitor-exit due to lack of specified side-effects.
+The test invokes a synchronized version of Object.hashCode in multiple threads.
+If monitor-exit is removed, the following threads will get stuck and timeout.
\ No newline at end of file
diff --git a/test/522-checker-regression-monitor-exit/smali/Test.smali b/test/522-checker-regression-monitor-exit/smali/Test.smali
new file mode 100644
index 0000000..c8e9198
--- /dev/null
+++ b/test/522-checker-regression-monitor-exit/smali/Test.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (before)
+## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter
+## CHECK: MonitorOperation [<<Param>>] kind:exit
+
+## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (after)
+## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter
+## CHECK: MonitorOperation [<<Param>>] kind:exit
+
+.method public static synchronizedHashCode(Ljava/lang/Object;)I
+ .registers 2
+
+ monitor-enter p0
+ invoke-virtual {p0}, Ljava/lang/Object;->hashCode()I
+ move-result v0
+
+ # Must not get removed by DCE.
+ monitor-exit p0
+
+ return v0
+
+.end method
diff --git a/test/522-checker-regression-monitor-exit/src/Main.java b/test/522-checker-regression-monitor-exit/src/Main.java
new file mode 100644
index 0000000..c85ac96
--- /dev/null
+++ b/test/522-checker-regression-monitor-exit/src/Main.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.TimeoutException;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ private static class HashCodeQuery implements Callable<Integer> {
+ public HashCodeQuery(Object obj) {
+ m_obj = obj;
+ }
+
+ public Integer call() {
+ Integer result;
+ try {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("synchronizedHashCode", new Class[] { Object.class });
+ result = (Integer) m.invoke(null, m_obj);
+ } catch (Exception e) {
+ System.err.println("Hash code query exception");
+ e.printStackTrace();
+ result = -1;
+ }
+ return result;
+ }
+
+ private Object m_obj;
+ private int m_index;
+ }
+
+ public static void main(String args[]) throws Exception {
+ Object obj = new Object();
+ int numThreads = 10;
+
+ ExecutorService pool = Executors.newFixedThreadPool(numThreads);
+
+ List<HashCodeQuery> queries = new ArrayList<HashCodeQuery>(numThreads);
+ for (int i = 0; i < numThreads; ++i) {
+ queries.add(new HashCodeQuery(obj));
+ }
+
+ try {
+ List<Future<Integer>> results = pool.invokeAll(queries, 5, TimeUnit.SECONDS);
+
+ int hash = obj.hashCode();
+ for (int i = 0; i < numThreads; ++i) {
+ int result = results.get(i).get();
+ if (hash != result) {
+ throw new Error("Query #" + i + " wrong. Expected " + hash + ", got " + result);
+ }
+ }
+ pool.shutdown();
+ } catch (CancellationException ex) {
+ System.err.println("Job timeout");
+ System.exit(1);
+ }
+ }
+}
diff --git a/test/523-checker-can-throw-regression/expected.txt b/test/523-checker-can-throw-regression/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/523-checker-can-throw-regression/expected.txt
diff --git a/test/523-checker-can-throw-regression/info.txt b/test/523-checker-can-throw-regression/info.txt
new file mode 100644
index 0000000..720dc85
--- /dev/null
+++ b/test/523-checker-can-throw-regression/info.txt
@@ -0,0 +1,2 @@
+Regression test for the HGraphBuilder which would split a throwing catch block
+but would not update information about which blocks throw.
\ No newline at end of file
diff --git a/test/523-checker-can-throw-regression/smali/Test.smali b/test/523-checker-can-throw-regression/smali/Test.smali
new file mode 100644
index 0000000..87192ea
--- /dev/null
+++ b/test/523-checker-can-throw-regression/smali/Test.smali
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: int Test.testCase(int, int, int) builder (after)
+## CHECK: TryBoundary kind:entry
+## CHECK: TryBoundary kind:entry
+## CHECK-NOT: TryBoundary kind:entry
+
+## CHECK-START: int Test.testCase(int, int, int) builder (after)
+## CHECK: TryBoundary kind:exit
+## CHECK: TryBoundary kind:exit
+## CHECK-NOT: TryBoundary kind:exit
+
+.method public static testCase(III)I
+ .registers 4
+
+ :try_start_1
+ div-int/2addr p0, p1
+ return p0
+ :try_end_1
+ .catchall {:try_start_1 .. :try_end_1} :catchall
+
+ :catchall
+ :try_start_2
+ move-exception v0
+ # Block would be split here but second part not marked as throwing.
+ div-int/2addr p0, p1
+ if-eqz p2, :else
+
+ div-int/2addr p0, p1
+ :else
+ div-int/2addr p0, p2
+ return p0
+ :try_end_2
+ .catchall {:try_start_2 .. :try_end_2} :catchall
+
+.end method
diff --git a/test/523-checker-can-throw-regression/src/Main.java b/test/523-checker-can-throw-regression/src/Main.java
new file mode 100644
index 0000000..3ff48f3
--- /dev/null
+++ b/test/523-checker-can-throw-regression/src/Main.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.TimeoutException;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String args[]) {}
+}
diff --git a/test/701-easy-div-rem/build b/test/701-easy-div-rem/build
index 1dc8452..666fe89 100644
--- a/test/701-easy-div-rem/build
+++ b/test/701-easy-div-rem/build
@@ -23,6 +23,10 @@
# Increase the file size limitation for classes.lst as the machine generated
# source file contains a lot of methods and is quite large.
-ulimit -S 4096
+
+# Jack generates big temp files so only apply ulimit for dx.
+if [ ${USE_JACK} = "false" ]; then
+ ulimit -S 4096
+fi
./default-build
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index fe68c5b..4c17240 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -28,4 +28,7 @@
b/22080519
b/21645819
b/22244733
+b/22331663
+b/22331663 (pass)
+b/22331663 (fail)
Done!
diff --git a/test/800-smali/smali/b_22331663.smali b/test/800-smali/smali/b_22331663.smali
new file mode 100644
index 0000000..057fc7f
--- /dev/null
+++ b/test/800-smali/smali/b_22331663.smali
@@ -0,0 +1,38 @@
+.class public LB22331663;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Make v4 defined, just use null.
+ const v4, 0
+
+ if-eqz v5, :Label2
+
+:Label1
+ # Construct a java.lang.Object completely, and throw a new exception.
+ new-instance v4, Ljava/lang/Object;
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+ new-instance v3, Ljava/lang/RuntimeException;
+ invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V
+ throw v3
+
+:Label2
+ # Allocate a java.lang.Object (do not initialize), and throw a new exception.
+ new-instance v4, Ljava/lang/Object;
+
+ new-instance v3, Ljava/lang/RuntimeException;
+ invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V
+ throw v3
+
+:Label3
+ # Catch handler. Here we had to merge the uninitialized with the initialized reference,
+ # which creates a conflict. Copy the conflict, and then return. This should not make the
+ # verifier fail the method.
+ move-object v0, v4
+
+ return-void
+
+.catchall {:Label1 .. :Label3} :Label3
+.end method
diff --git a/test/800-smali/smali/b_22331663_fail.smali b/test/800-smali/smali/b_22331663_fail.smali
new file mode 100644
index 0000000..0c25e30
--- /dev/null
+++ b/test/800-smali/smali/b_22331663_fail.smali
@@ -0,0 +1,20 @@
+.class public LB22331663Fail;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ if-eqz v5, :Label1
+
+ # Construct a java.lang.Object completely. This makes v4 of reference type.
+ new-instance v4, Ljava/lang/Object;
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+:Label1
+ # At this point, v4 is the merge of Undefined and ReferenceType. The verifier should
+ # reject any use of this, even a copy. Previously this was a conflict. Conflicts must
+ # be movable now, so ensure that we do not get a conflict (and then allow the move).
+ move-object v0, v4
+
+ return-void
+.end method
diff --git a/test/800-smali/smali/b_22331663_pass.smali b/test/800-smali/smali/b_22331663_pass.smali
new file mode 100644
index 0000000..1b54180
--- /dev/null
+++ b/test/800-smali/smali/b_22331663_pass.smali
@@ -0,0 +1,22 @@
+.class public LB22331663Pass;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ if-eqz v5, :Label1
+
+ # Construct a java.lang.Object completely. This makes v4 of reference type.
+ new-instance v4, Ljava/lang/Object;
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+:Label1
+ # At this point, v4 is the merge of Undefined and ReferenceType. The verifier should not
+ # reject this if it is unused.
+
+ # Do an allocation here. This will force heap checking in gcstress mode.
+ new-instance v0, Ljava/lang/Object;
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+
+ return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 61f0d7b..8be6418 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -103,6 +103,12 @@
null, null));
testCases.add(new TestCase("b/22244733", "B22244733", "run", new Object[] { "abc" },
null, "abc"));
+ testCases.add(new TestCase("b/22331663", "B22331663", "run", new Object[] { false },
+ null, null));
+ testCases.add(new TestCase("b/22331663 (pass)", "B22331663Pass", "run",
+ new Object[] { false }, null, null));
+ testCases.add(new TestCase("b/22331663 (fail)", "B22331663Fail", "run",
+ new Object[] { false }, new VerifyError(), null));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index c4111f6..38973f7 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -33,22 +33,46 @@
TEST_ART_RUN_TEST_BUILD_RULES :=
# Dependencies for actually running a run-test.
-TEST_ART_RUN_TEST_DEPENDENCIES := $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger
+TEST_ART_RUN_TEST_DEPENDENCIES := \
+ $(DX) \
+ $(HOST_OUT_EXECUTABLES)/jasmin \
+ $(HOST_OUT_EXECUTABLES)/smali \
+ $(HOST_OUT_EXECUTABLES)/dexmerger
+
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
+ TEST_ART_RUN_TEST_DEPENDENCIES += \
+ $(JACK_JAR) \
+ $(JACK_LAUNCHER_JAR) \
+ $(JILL_JAR)
+endif
# Helper to create individual build targets for tests. Must be called with $(eval).
# $(1): the test number
define define-build-art-run-test
dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
+ run_test_options = --build-only
+ ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
+ run_test_options += --build-with-jack
+ else
+ run_test_options += --build-with-javac-dx
+ endif
+$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES)
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
- $(LOCAL_PATH)/run-test --build-only --output-path $$(abspath $$(dir $$@)) $(1)
+ JACK=$(abspath $(JACK)) \
+ JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
+ JACK_CLASSPATH=$(TARGET_JACK_CLASSPATH) \
+ JACK_JAR=$(abspath $(JACK_JAR)) \
+ JILL_JAR=$(abspath $(JILL_JAR)) \
+ $(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target)
dmart_target :=
+ run_test_options :=
endef
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test))))
@@ -410,20 +434,6 @@
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
-# Known broken tests for the MIPS64 optimizing compiler backend in 64-bit mode. b/21555893
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS := \
- 449-checker-bce
-
-ifeq ($(TARGET_ARCH),mips64)
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS),64)
- endif
-endif
-
-TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS :=
-
# Known broken tests for the optimizing compiler.
TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
@@ -599,6 +609,12 @@
prereq_rule :=
test_groups :=
uc_host_or_target :=
+ jack_classpath :=
+ ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
+ run_test_options += --build-with-jack
+ else
+ run_test_options += --build-with-javac-dx
+ endif
ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
run_test_options += --always-clean
endif
@@ -607,11 +623,13 @@
test_groups := ART_RUN_TEST_HOST_RULES
run_test_options += --host
prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+ jack_classpath := $(HOST_JACK_CLASSPATH)
else
ifeq ($(1),target)
uc_host_or_target := TARGET
test_groups := ART_RUN_TEST_TARGET_RULES
prereq_rule := test-art-target-sync
+ jack_classpath := $(TARGET_JACK_CLASSPATH)
else
$$(error found $(1) expected $(TARGET_TYPES))
endif
@@ -806,12 +824,19 @@
run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options)
endif
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
+$$(run_test_rule_name): PRIVATE_JACK_CLASSPATH := $$(jack_classpath)
.PHONY: $$(run_test_rule_name)
-$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule)
+$$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
+ DX=$(abspath $(DX)) \
+ JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
+ JACK=$(abspath $(JACK)) \
+ JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
+ JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \
+ JACK_JAR=$(abspath $(JACK_JAR)) \
+ JILL_JAR=$(abspath $(JILL_JAR)) \
art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
@@ -826,6 +851,7 @@
run_test_options :=
run_test_rule_name :=
prereq_rule :=
+ jack_classpath :=
endef # define-test-art-run-test
$(foreach target, $(TARGET_TYPES), \
diff --git a/test/etc/default-build b/test/etc/default-build
index 92954a9..c281bca 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -39,24 +39,57 @@
exit 0
fi
-if [ -d src ]; then
- mkdir classes
- ${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
-fi
-
-if [ -d src2 ]; then
- mkdir -p classes
- ${JAVAC} -d classes `find src2 -name '*.java'`
-fi
-
if ! [ -d src ] && ! [ -d src2 ]; then
# No src directory? Then forget about trying to run dx.
SKIP_DX_MERGER="true"
fi
-if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
+if [ -d src-multidex ]; then
+ # Jack does not support this configuration unless we specify how to partition the DEX file
+ # with a .jpp file.
+ USE_JACK="false"
+fi
+
+if [ ${USE_JACK} = "true" ]; then
+ # Jack toolchain
+ if [ -d src ]; then
+ ${JACK} --output-jack src.jack src
+ imported_jack_files="--import src.jack"
+ fi
+
+ if [ -d src2 ]; then
+ ${JACK} --output-jack src2.jack src2
+ imported_jack_files="--import src2.jack ${imported_jack_files}"
+ fi
+
+ # Compile jack files into a DEX file. We set jack.import.type.policy=keep-first to consider
+ # class definitions from src2 first.
+ ${JACK} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex .
+else
+ # Legacy toolchain with javac+dx
+ if [ -d src ]; then
+ mkdir classes
+ ${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'`
+ fi
+
+ if [ -d src-multidex ]; then
+ mkdir classes2
+ ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
+ if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \
+ --dump-width=1000 ${DX_FLAGS} classes2
+ fi
+ fi
+
+ if [ -d src2 ]; then
+ mkdir -p classes
+ ${JAVAC} -d classes `find src2 -name '*.java'`
+ fi
+
+ if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
+ --dump-width=1000 ${DX_FLAGS} classes
+ fi
fi
if [ -d smali ]; then
@@ -72,30 +105,34 @@
fi
if [ -d src-ex ]; then
- mkdir classes-ex
- ${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'`
- if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \
- --dump-width=1000 ${DX_FLAGS} classes-ex
+ if [ ${USE_JACK} = "true" ]; then
+ # Rename previous "classes.dex" so it is not overwritten.
+ mv classes.dex classes-1.dex
+ #TODO find another way to append src.jack to the jack classpath
+ ${JACK}:src.jack --output-dex . src-ex
+ zip $TEST_NAME-ex.jar classes.dex
+ # Restore previous "classes.dex" so it can be zipped.
+ mv classes-1.dex classes.dex
+ else
+ mkdir classes-ex
+ ${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'`
+ if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \
+ --dump-width=1000 ${DX_FLAGS} classes-ex
- # quick shuffle so that the stored name is "classes.dex"
- mv classes.dex classes-1.dex
- mv classes-ex.dex classes.dex
- zip $TEST_NAME-ex.jar classes.dex
- mv classes.dex classes-ex.dex
- mv classes-1.dex classes.dex
+ # quick shuffle so that the stored name is "classes.dex"
+ mv classes.dex classes-1.dex
+ mv classes-ex.dex classes.dex
+ zip $TEST_NAME-ex.jar classes.dex
+ mv classes.dex classes-ex.dex
+ mv classes-1.dex classes.dex
+ fi
fi
fi
# Create a single jar with two dex files for multidex.
if [ -d src-multidex ]; then
- mkdir classes2
- ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'`
- if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \
- --dump-width=1000 ${DX_FLAGS} classes2
- zip $TEST_NAME.jar classes.dex classes2.dex
- fi
+ zip $TEST_NAME.jar classes.dex classes2.dex
elif [ ${NEED_DEX} = "true" ]; then
zip $TEST_NAME.jar classes.dex
fi
diff --git a/test/run-test b/test/run-test
index ffa25eb..f5fff09a 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@
export RUN="${progdir}/etc/run-test-jar"
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
+export USE_JACK="false"
# If dx was not set by the environment variable, assume it is in the path.
if [ -z "$DX" ]; then
@@ -67,6 +68,46 @@
export DXMERGER="dexmerger"
fi
+# If jack was not set by the environment variable, assume it is in the path.
+if [ -z "$JACK" ]; then
+ export JACK="jack"
+fi
+
+# If the tree is compiled with Jack, build test with Jack by default.
+if [ "$ANDROID_COMPILE_WITH_JACK" = "true" ]; then
+ USE_JACK="true"
+fi
+
+# ANDROID_BUILD_TOP is not set in a build environment.
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ export ANDROID_BUILD_TOP=$oldwd
+fi
+
+# If JACK_VM_COMMAND is not set, assume it launches the prebuilt jack-launcher.
+if [ -z "$JACK_VM_COMMAND" ]; then
+ if [ ! -z "$TMPDIR" ]; then
+ jack_temp_dir="-Djava.io.tmpdir=$TMPDIR"
+ fi
+ export JACK_VM_COMMAND="java -Dfile.encoding=UTF-8 -Xms2560m -XX:+TieredCompilation $jack_temp_dir -jar $ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack-launcher.jar"
+fi
+
+# If JACK_CLASSPATH is not set, assume it only contains core-libart.
+if [ -z "$JACK_CLASSPATH" ]; then
+ export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
+fi
+
+# If JACK_JAR is not set, assume it is located in the prebuilts directory.
+if [ -z "$JACK_JAR" ]; then
+ export JACK_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack.jar"
+fi
+
+# If JILL_JAR is not set, assume it is located in the prebuilts directory.
+if [ -z "$JILL_JAR" ]; then
+ export JILL_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jill.jar"
+fi
+
+export JACK="$JACK -g -cp $JACK_CLASSPATH"
+export JILL="java -jar $JILL_JAR"
info="info.txt"
build="build"
@@ -116,6 +157,7 @@
runtime="jvm"
prebuild_mode="no"
NEED_DEX="false"
+ USE_JACK="false"
run_args="${run_args} --jvm"
shift
elif [ "x$1" = "x-O" ]; then
@@ -237,6 +279,12 @@
elif [ "x$1" = "x--build-only" ]; then
build_only="yes"
shift
+ elif [ "x$1" = "x--build-with-javac-dx" ]; then
+ USE_JACK="false"
+ shift
+ elif [ "x$1" = "x--build-with-jack" ]; then
+ USE_JACK="true"
+ shift
elif [ "x$1" = "x--output-path" ]; then
shift
tmp_dir=$1
@@ -369,10 +417,7 @@
fi
elif [ "$runtime" = "art" ]; then
if [ "$target_mode" = "no" ]; then
- # ANDROID_BUILD_TOP and ANDROID_HOST_OUT are not set in a build environment.
- if [ -z "$ANDROID_BUILD_TOP" ]; then
- export ANDROID_BUILD_TOP=$oldwd
- fi
+ # ANDROID_HOST_OUT is not set in a build environment.
if [ -z "$ANDROID_HOST_OUT" ]; then
export ANDROID_HOST_OUT=$ANDROID_BUILD_TOP/out/host/linux-x86
fi
@@ -462,6 +507,8 @@
echo " --debuggable Whether to compile Java code for a debugger."
echo " --gdb Run under gdb; incompatible with some tests."
echo " --build-only Build test files only (off by default)."
+ echo " --build-with-javac-dx Build test files with javac and dx (on by default)."
+ echo " --build-with-jack Build test files with jack and jill (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
echo " --jit Enable jit (off by default)."
echo " --optimizing Enable optimizing compiler (default)."
@@ -556,6 +603,10 @@
# if Checker is not invoked and the test only runs the program.
build_args="${build_args} --dx-option --no-optimize"
+ # Jack does not necessarily generate the same DEX output than dx. Because these tests depend
+ # on a particular DEX output, keep building them with dx for now (b/19467889).
+ USE_JACK="false"
+
if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then
run_checker="yes"
run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
@@ -564,14 +615,20 @@
fi
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
-file_size_limit=2048
+build_file_size_limit=2048
+run_file_size_limit=2048
if echo "$test_dir" | grep 089; then
- file_size_limit=5120
+ build_file_size_limit=5120
+ run_file_size_limit=5120
elif echo "$test_dir" | grep 083; then
- file_size_limit=5120
+ build_file_size_limit=5120
+ run_file_size_limit=5120
fi
-if ! ulimit -S "$file_size_limit"; then
- echo "ulimit file size setting failed"
+if [ ${USE_JACK} = "false" ]; then
+ # Set ulimit if we build with dx only, Jack can generate big temp files.
+ if ! ulimit -S "$build_file_size_limit"; then
+ echo "ulimit file size setting failed"
+ fi
fi
good="no"
@@ -582,6 +639,9 @@
build_exit="$?"
echo "build exit status: $build_exit" 1>&2
if [ "$build_exit" = '0' ]; then
+ if ! ulimit -S "$run_file_size_limit"; then
+ echo "ulimit file size setting failed"
+ fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" 2>&1
run_exit="$?"
@@ -604,6 +664,9 @@
"./${build}" $build_args >"$build_output" 2>&1
build_exit="$?"
if [ "$build_exit" = '0' ]; then
+ if ! ulimit -S "$run_file_size_limit"; then
+ echo "ulimit file size setting failed"
+ fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
if [ "$run_checker" = "yes" ]; then
@@ -635,6 +698,9 @@
"./${build}" $build_args >"$build_output" 2>&1
build_exit="$?"
if [ "$build_exit" = '0' ]; then
+ if ! ulimit -S "$run_file_size_limit"; then
+ echo "ulimit file size setting failed"
+ fi
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
run_exit="$?"
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 7135dba..116a611 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -33,7 +33,7 @@
# We use Quick's image on target because optimizing's image is not compiled debuggable.
image="-Ximage:/data/art-test/core.art"
args=$@
-debuggee_args="-Xcompiler-option --compiler-backend=Optimizing -Xcompiler-option --debuggable"
+debuggee_args="-Xcompiler-option --debuggable"
device_dir="--device-dir=/data/local/tmp"
# We use the art script on target to ensure the runner and the debuggee share the same
# image.
@@ -92,6 +92,5 @@
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
--vm-arg -Djpda.settings.debuggeeJavaPath="\"$art_debugee $image $debuggee_args\"" \
--classpath $test_jar \
- --vm-arg -Xcompiler-option --vm-arg --compiler-backend=Optimizing \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
org.apache.harmony.jpda.tests.share.AllTests
diff --git a/tools/symbolize.sh b/tools/symbolize.sh
index 7365a9b..f5686e6 100755
--- a/tools/symbolize.sh
+++ b/tools/symbolize.sh
@@ -22,6 +22,7 @@
INTERACTIVE="no"
if [ "x$1" = "x--interactive" ] ; then
INTERACTIVE="yes"
+ shift
fi
# Pull the file from the device and symbolize it.
@@ -57,4 +58,15 @@
done
}
-all
+if [ "x$1" = "x" ] ; then
+ # No further arguments, iterate over all oat files on device.
+ all
+else
+ # Take the parameters as a list of paths on device.
+ while (($#)); do
+ DIR=$(dirname $1)
+ NAME=$(basename $1)
+ one $DIR $NAME
+ shift
+ done
+fi