Merge "x86_64: Fix neg_double"
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index b8d190a..c02d089 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -897,8 +897,8 @@
// Check early if we should skip this compilation if the profiler is enabled.
if (cu.compiler_driver->ProfilePresent()) {
std::string methodname = PrettyMethod(method_idx, dex_file);
- if (cu.mir_graph->SkipCompilation(methodname)) {
- return NULL;
+ if (cu.mir_graph->SkipCompilationByName(methodname)) {
+ return nullptr;
}
}
@@ -908,13 +908,16 @@
// TODO(Arm64): Remove this when we are able to compile everything.
if (!CanCompileMethod(method_idx, dex_file, cu)) {
- VLOG(compiler) << "Cannot compile method : " << PrettyMethod(method_idx, dex_file);
+ VLOG(compiler) << cu.instruction_set << ": Cannot compile method : "
+ << PrettyMethod(method_idx, dex_file);
return nullptr;
}
cu.NewTimingSplit("MIROpt:CheckFilters");
- if (cu.mir_graph->SkipCompilation()) {
- VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file);
+ std::string skip_message;
+ if (cu.mir_graph->SkipCompilation(&skip_message)) {
+ VLOG(compiler) << cu.instruction_set << ": Skipping method : "
+ << PrettyMethod(method_idx, dex_file) << " Reason = " << skip_message;
return nullptr;
}
@@ -945,7 +948,9 @@
CompiledMethod* result = NULL;
if (cu.mir_graph->PuntToInterpreter()) {
- return NULL;
+ VLOG(compiler) << cu.instruction_set << ": Punted method to interpreter: "
+ << PrettyMethod(method_idx, dex_file);
+ return nullptr;
}
cu.cg->Materialize();
@@ -955,9 +960,9 @@
cu.NewTimingSplit("Cleanup");
if (result) {
- VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file);
+ VLOG(compiler) << cu.instruction_set << ": Compiled " << PrettyMethod(method_idx, dex_file);
} else {
- VLOG(compiler) << "Deferred " << PrettyMethod(method_idx, dex_file);
+ VLOG(compiler) << cu.instruction_set << ": Deferred " << PrettyMethod(method_idx, dex_file);
}
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 1350665..e372206 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -941,7 +941,8 @@
}
}
-bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default) {
+bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
+ std::string* skip_message) {
float count = stats->dex_instructions;
stats->math_ratio = stats->math_ops / count;
stats->fp_ratio = stats->fp_ops / count;
@@ -994,6 +995,8 @@
// If significant in size and high proportion of expensive operations, skip.
if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
(stats->heavyweight_ratio > 0.3)) {
+ *skip_message = "Is a small method with heavyweight ratio " +
+ std::to_string(stats->heavyweight_ratio);
return true;
}
@@ -1003,7 +1006,7 @@
/*
* Will eventually want this to be a bit more sophisticated and happen at verification time.
*/
-bool MIRGraph::SkipCompilation() {
+bool MIRGraph::SkipCompilation(std::string* skip_message) {
const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerOptions::kEverything) {
@@ -1012,10 +1015,12 @@
// Contains a pattern we don't want to compile?
if (PuntToInterpreter()) {
+ *skip_message = "Punt to interpreter set";
return true;
}
if (!compiler_options.IsCompilationEnabled()) {
+ *skip_message = "Compilation disabled";
return true;
}
@@ -1041,6 +1046,9 @@
// If size < cutoff, assume we'll compile - but allow removal.
bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
+ if (skip_compilation) {
+ *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
+ }
/*
* Filter 1: Huge methods are likely to be machine generated, but some aren't.
@@ -1048,6 +1056,7 @@
*/
if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
skip_compilation = true;
+ *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
// If we're got a huge number of basic blocks, don't bother with further analysis.
if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
return true;
@@ -1055,6 +1064,7 @@
} else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
/* If it's large and contains no branches, it's likely to be machine generated initialization */
(GetBranchCount() == 0)) {
+ *skip_message = "Large method with no branches";
return true;
} else if (compiler_filter == CompilerOptions::kSpeed) {
// If not huge, compile.
@@ -1063,6 +1073,7 @@
// Filter 2: Skip class initializers.
if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
+ *skip_message = "Class initializer";
return true;
}
@@ -1092,7 +1103,7 @@
AnalyzeBlock(bb, &stats);
}
- return ComputeSkipCompilation(&stats, skip_compilation);
+ return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
}
void MIRGraph::DoCacheFieldLoweringInfo() {
@@ -1285,7 +1296,7 @@
method_lowering_infos_.GetRawStorage(), count);
}
-bool MIRGraph::SkipCompilation(const std::string& methodname) {
+bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
return cu_->compiler_driver->SkipCompilation(methodname);
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 15c0aa4..0ff340e 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -559,12 +559,12 @@
* Examine the graph to determine whether it's worthwile to spend the time compiling
* this method.
*/
- bool SkipCompilation();
+ bool SkipCompilation(std::string* skip_message);
/*
* Should we skip the compilation of this method based on its name?
*/
- bool SkipCompilation(const std::string& methodname);
+ bool SkipCompilationByName(const std::string& methodname);
/*
* Parse dex method and add MIR at current insert point. Returns id (which is
@@ -1127,7 +1127,8 @@
void CountChecks(BasicBlock* bb);
void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
- bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default);
+ bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default,
+ std::string* skip_message);
CompilationUnit* const cu_;
GrowableArray<int>* ssa_base_vregs_;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 2bdf3e4..590c767 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -190,7 +190,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -261,7 +261,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -362,7 +362,7 @@
Thread::kStackOverflowSignalReservedBytes;
bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
if (!skip_overflow_check) {
- if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
if (!large_frame) {
/* Load stack limit */
LockTemp(rs_r12);
@@ -401,7 +401,7 @@
const int spill_size = spill_count * 4;
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3a6012e..3f32c51 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -220,7 +220,7 @@
kA64Adc3rrr = kA64First, // adc [00011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
kA64Add4RRdT, // add [s001000100] imm_12[21-10] rn[9-5] rd[4-0].
kA64Add4rrro, // add [00001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
- kA64Add4rrre, // add [00001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
+ kA64Add4RRre, // add [00001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
kA64Adr2xd, // adr [0] immlo[30-29] [10000] immhi[23-5] rd[4-0].
kA64And3Rrl, // and [00010010] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64And4rrro, // and [00001010] shift[23-22] [N=0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
@@ -330,7 +330,7 @@
kA64Stlxr3wrX, // stlxr[11001000000] rs[20-16] [111111] rn[9-5] rt[4-0].
kA64Sub4RRdT, // sub [s101000100] imm_12[21-10] rn[9-5] rd[4-0].
kA64Sub4rrro, // sub [s1001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
- kA64Sub4rrre, // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
+ kA64Sub4RRre, // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
kA64Subs3rRd, // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
kA64Tst3rro, // tst alias of "ands rzr, arg1, arg2, arg3".
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index fe3bd6a..2a8da24 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -115,10 +115,10 @@
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE1,
"add", "!0r, !1r, !2r!3o", kFixupNone),
- ENCODING_MAP(WIDE(kA64Add4rrre), SF_VARIANTS(0x0b200000),
+ ENCODING_MAP(WIDE(kA64Add4RRre), SF_VARIANTS(0x0b200000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16,
kFmtExtend, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
- "add", "!0r, !1r, !2r!3o", kFixupNone),
+ "add", "!0r, !1r, !2r!3e", kFixupNone),
// Note: adr is binary, but declared as tertiary. The third argument is used while doing the
// fixups and contains information to identify the adr label.
ENCODING_MAP(kA64Adr2xd, NO_VARIANTS(0x10000000),
@@ -562,10 +562,10 @@
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
"sub", "!0r, !1r, !2r!3o", kFixupNone),
- ENCODING_MAP(WIDE(kA64Sub4rrre), SF_VARIANTS(0x4b200000),
+ ENCODING_MAP(WIDE(kA64Sub4RRre), SF_VARIANTS(0x4b200000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16,
kFmtExtend, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
- "sub", "!0r, !1r, !2r!3o", kFixupNone),
+ "sub", "!0r, !1r, !2r!3e", kFixupNone),
ENCODING_MAP(WIDE(kA64Subs3rRd), SF_VARIANTS(0x71000000),
kFmtRegR, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 35263ea..1df576b 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -95,8 +95,7 @@
tab_rec->anchor = switch_label;
// Add displacement to base branch address and go!
- // TODO(Arm64): generate "add x1, x1, w3, sxtw" rather than "add x1, x1, x3"?
- OpRegRegRegShift(kOpAdd, r_base, r_base, As64BitReg(r_disp), ENCODE_NO_SHIFT);
+ OpRegRegRegExtend(kOpAdd, r_base, r_base, As64BitReg(r_disp), kA64Sxtw, 0U);
NewLIR1(kA64Br1x, r_base.GetReg());
// Loop exit label.
@@ -141,7 +140,6 @@
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
- // TODO(Arm64): generate "ldr w3, [x1,w2,sxtw #2]" rather than "ldr w3, [x1,x2,lsl #2]"?
LoadBaseIndexed(table_base, As64BitReg(key_reg), As64BitReg(disp_reg), 2, k32);
// Get base branch address.
@@ -150,8 +148,7 @@
tab_rec->anchor = switch_label;
// Add displacement to base branch address and go!
- // TODO(Arm64): generate "add x4, x4, w3, sxtw" rather than "add x4, x4, x3"?
- OpRegRegRegShift(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), ENCODE_NO_SHIFT);
+ OpRegRegRegExtend(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), kA64Sxtw, 0U);
NewLIR1(kA64Br1x, branch_reg.GetReg());
// branch_over target here
@@ -213,7 +210,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -261,7 +258,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -349,7 +346,7 @@
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
if (!large_frame) {
// Load stack limit
LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9);
@@ -382,7 +379,7 @@
}
if (!skip_overflow_check) {
- if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
class StackOverflowSlowPath: public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 0fa7f2b..f1270ec 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -241,6 +241,8 @@
LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
int shift);
+ LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount);
LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
static const ArmEncodingMap EncodingMap[kA64Last];
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index fba368a..06e1cda 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -1163,7 +1163,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
} else {
*pcrLabel = nullptr;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index b3ca7c4..672aa88 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -644,6 +644,44 @@
}
}
+LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
+ RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
+ ArmOpcode opcode = kA64Brk1d;
+
+ switch (op) {
+ case kOpAdd:
+ opcode = kA64Add4RRre;
+ break;
+ case kOpSub:
+ opcode = kA64Sub4RRre;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented opcode: " << op;
+ break;
+ }
+ ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
+
+ if (r_dest.Is64Bit()) {
+ CHECK(r_src1.Is64Bit());
+
+ // dest determines whether the op is wide or not. Up-convert src2 when necessary.
+ // Note: this is not according to aarch64 specifications, but our encoding.
+ if (!r_src2.Is64Bit()) {
+ r_src2 = As64BitReg(r_src2);
+ }
+ } else {
+ CHECK(!r_src1.Is64Bit());
+ CHECK(!r_src2.Is64Bit());
+ }
+
+ // Sanity checks.
+ // 1) Amount is in the range 0..4
+ CHECK_LE(amount, 4);
+
+ return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(),
+ EncodeExtend(ext, amount));
+}
+
LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
}
@@ -694,14 +732,8 @@
return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
} else {
log_imm = -1;
- alt_opcode = (neg) ? kA64Add4rrre : kA64Sub4rrre;
- // To make it correct, we need:
- // 23..21 = 001 = extend
- // 15..13 = 01x = LSL/UXTW/X / x defines wide or not
- // 12..10 = 000 = no shift (in case of SP)
- // => info = 00101x000
- // => =0x 0 5 (0/8)
- info = (is_wide ? 8 : 0) | 0x50;
+ alt_opcode = (neg) ? kA64Add4RRre : kA64Sub4RRre;
+ info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
}
break;
// case kOpRsub:
@@ -744,7 +776,7 @@
return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
} else {
RegStorage r_scratch;
- if (IS_WIDE(wide)) {
+ if (is_wide) {
r_scratch = AllocTempWide();
LoadConstantWide(r_scratch, value);
} else {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3b99421..e36b592 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -173,7 +173,7 @@
/* Perform null-check on a register. */
LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
return GenExplicitNullCheck(m_reg, opt_flags);
}
return nullptr;
@@ -188,7 +188,7 @@
}
void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
- if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -197,13 +197,13 @@
}
void Mir2Lir::MarkPossibleStackOverflowException() {
- if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
}
}
void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
- if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -2171,7 +2171,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
- if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
return;
}
@@ -2191,7 +2191,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
- if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
OpUnconditionalBranch(target);
return;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 641579f..b3fac77 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -977,7 +977,7 @@
type, skip_this);
if (pcrLabel) {
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
} else {
*pcrLabel = nullptr;
@@ -1204,7 +1204,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
} else {
*pcrLabel = nullptr;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 92753e4..078dd5a 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2202,7 +2202,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (Runtime::Current()->ExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
} else {
*pcrLabel = nullptr;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 5d1c5da..fb3341b 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -53,7 +53,10 @@
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
generate_gdb_information_(false),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
- include_debug_symbols_(kDefaultIncludeDebugSymbols)
+ include_debug_symbols_(kDefaultIncludeDebugSymbols),
+ explicit_null_checks_(true),
+ explicit_so_checks_(true),
+ explicit_suspend_checks_(true)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(false)
#endif
@@ -67,7 +70,10 @@
size_t num_dex_methods_threshold,
bool generate_gdb_information,
double top_k_profile_threshold,
- bool include_debug_symbols
+ bool include_debug_symbols,
+ bool explicit_null_checks,
+ bool explicit_so_checks,
+ bool explicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, bool sea_ir_mode
#endif
@@ -80,7 +86,10 @@
num_dex_methods_threshold_(num_dex_methods_threshold),
generate_gdb_information_(generate_gdb_information),
top_k_profile_threshold_(top_k_profile_threshold),
- include_debug_symbols_(include_debug_symbols)
+ include_debug_symbols_(include_debug_symbols),
+ explicit_null_checks_(explicit_null_checks),
+ explicit_so_checks_(explicit_so_checks),
+ explicit_suspend_checks_(explicit_suspend_checks)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(sea_ir_mode)
#endif
@@ -147,6 +156,30 @@
return include_debug_symbols_;
}
+ bool GetExplicitNullChecks() const {
+ return explicit_null_checks_;
+ }
+
+ void SetExplicitNullChecks(bool new_val) {
+ explicit_null_checks_ = new_val;
+ }
+
+ bool GetExplicitStackOverflowChecks() const {
+ return explicit_so_checks_;
+ }
+
+ void SetExplicitStackOverflowChecks(bool new_val) {
+ explicit_so_checks_ = new_val;
+ }
+
+ bool GetExplicitSuspendChecks() const {
+ return explicit_suspend_checks_;
+ }
+
+ void SetExplicitSuspendChecks(bool new_val) {
+ explicit_suspend_checks_ = new_val;
+ }
+
#ifdef ART_SEA_IR_MODE
bool GetSeaIrMode();
#endif
@@ -166,6 +199,9 @@
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool include_debug_symbols_;
+ bool explicit_null_checks_;
+ bool explicit_so_checks_;
+ bool explicit_suspend_checks_;
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode_;
#endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index c3f2082..b6b5313 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -744,6 +744,19 @@
*parsed_value = value;
}
+void CheckExplicitCheckOptions(InstructionSet isa, bool* explicit_null_checks,
+ bool* explicit_so_checks, bool* explicit_suspend_checks) {
+ switch (isa) {
+ case kArm:
+ break; // All checks implemented, leave as is.
+
+ default: // No checks implemented, reset all to explicit checks.
+ *explicit_null_checks = true;
+ *explicit_so_checks = true;
+ *explicit_suspend_checks = true;
+ }
+}
+
static int dex2oat(int argc, char** argv) {
#if defined(__linux__) && defined(__arm__)
int major, minor;
@@ -825,6 +838,11 @@
bool watch_dog_enabled = !kIsTargetBuild;
bool generate_gdb_information = kIsDebugBuild;
+ bool explicit_null_checks = true;
+ bool explicit_so_checks = true;
+ bool explicit_suspend_checks = true;
+ bool has_explicit_checks_options = false;
+
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
const bool log_options = false;
@@ -998,6 +1016,31 @@
} else if (option.starts_with("--dump-cfg-passes=")) {
std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
PassDriverMEOpts::SetDumpPassList(dump_passes);
+ } else if (option.starts_with("--implicit-checks=")) {
+ std::string checks = option.substr(strlen("--implicit-checks=")).data();
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_null_checks = true;
+ explicit_so_checks = true;
+ explicit_suspend_checks = true;
+ } else if (val == "null") {
+ explicit_null_checks = false;
+ } else if (val == "suspend") {
+ explicit_suspend_checks = false;
+ } else if (val == "stack") {
+ explicit_so_checks = false;
+ } else if (val == "all") {
+ explicit_null_checks = false;
+ explicit_so_checks = false;
+ explicit_suspend_checks = false;
+ } else {
+ Usage("--implicit-checks passed non-recognized value %s", val.c_str());
+ }
+ has_explicit_checks_options = true;
+ }
} else {
Usage("Unknown argument %s", option.data());
}
@@ -1093,8 +1136,7 @@
}
if (compiler_filter_string == nullptr) {
- if ((instruction_set == kX86_64 && image) ||
- instruction_set == kArm64 ||
+ if (instruction_set == kArm64 ||
instruction_set == kMips) {
// TODO: implement/fix compilers for these architectures.
compiler_filter_string = "interpret-only";
@@ -1126,6 +1168,9 @@
Usage("Unknown --compiler-filter value %s", compiler_filter_string);
}
+ CheckExplicitCheckOptions(instruction_set, &explicit_null_checks, &explicit_so_checks,
+ &explicit_suspend_checks);
+
CompilerOptions compiler_options(compiler_filter,
huge_method_threshold,
large_method_threshold,
@@ -1134,7 +1179,10 @@
num_dex_methods_threshold,
generate_gdb_information,
top_k_profile_threshold,
- include_debug_symbols
+ include_debug_symbols,
+ explicit_null_checks,
+ explicit_so_checks,
+ explicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, compiler_options.sea_ir_ = true;
#endif
@@ -1205,6 +1253,18 @@
return EXIT_FAILURE;
}
std::unique_ptr<Dex2Oat> dex2oat(p_dex2oat);
+
+ // TODO: Not sure whether it's a good idea to allow anything else but the runtime option in
+ // this case at all, as we'll have to throw away produced code for a mismatch.
+ if (!has_explicit_checks_options) {
+ if (instruction_set == kRuntimeISA) {
+ Runtime* runtime = Runtime::Current();
+ compiler_options.SetExplicitNullChecks(runtime->ExplicitNullChecks());
+ compiler_options.SetExplicitStackOverflowChecks(runtime->ExplicitStackOverflowChecks());
+ compiler_options.SetExplicitSuspendChecks(runtime->ExplicitSuspendChecks());
+ }
+ }
+
// Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
// give it away now so that we don't starve GC.
Thread* self = Thread::Current();
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c40ae7a8..992202a 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -54,6 +54,7 @@
gc/collector/concurrent_copying.cc \
gc/collector/garbage_collector.cc \
gc/collector/immune_region.cc \
+ gc/collector/mark_compact.cc \
gc/collector/mark_sweep.cc \
gc/collector/partial_mark_sweep.cc \
gc/collector/semi_space.cc \
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4ede453..2e60b93 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1615,14 +1615,14 @@
// we would need to fully restore it. As there are a lot of callee-save registers, it seems
// easier to have an extra small stack area.
- str x19, [sp, #-16]! // Save integer result.
+ str x0, [sp, #-16]! // Save integer result.
.cfi_adjust_cfa_offset 16
str d0, [sp, #8] // Save floating-point result.
- mov x0, xSELF // Pass Thread.
add x1, sp, #16 // Pass SP.
mov x2, x0 // Pass integer result.
fmov x3, d0 // Pass floating-point result.
+ mov x0, xSELF // Pass Thread.
bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res)
mov x9, x0 // Return address from instrumentation call.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index f745088..16e0ec3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_LINKER_INL_H_
#include "class_linker.h"
+#include "gc/heap-inl.h"
#include "mirror/art_field.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
@@ -186,13 +187,19 @@
inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) {
return down_cast<mirror::IfTable*>(
- mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * mirror::IfTable::kMax));
+ mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass),
+ ifcount * mirror::IfTable::kMax));
}
inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
size_t length) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // Can't have movable field arrays for mark compact since we need these arrays to always be valid
+ // so that we can do Object::VisitReferences in the case where the fields don't fit in the
+ // reference offsets word.
return mirror::ObjectArray<mirror::ArtField>::Alloc(
- self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length);
+ self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length,
+ kMoveFieldArrays ? heap->GetCurrentAllocator() : heap->GetCurrentNonMovingAllocator());
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d684a50..d68aca9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1061,6 +1061,42 @@
VLOG(startup) << "ClassLinker::InitFromImage exiting";
}
+void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ if ((flags & kVisitRootFlagAllRoots) != 0) {
+ for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
+ callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
+ }
+ } else if ((flags & kVisitRootFlagNewRoots) != 0) {
+ for (auto& pair : new_class_roots_) {
+ mirror::Object* old_ref = pair.second;
+ callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
+ if (UNLIKELY(pair.second != old_ref)) {
+ // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
+ // corresponding object. This is slow, but luckily for us, this may only happen with a
+ // concurrent moving GC.
+ for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
+ it != end && it->first == pair.first; ++it) {
+ // If the class stored matches the old class, update it to the new value.
+ if (old_ref == it->second) {
+ it->second = pair.second;
+ }
+ }
+ }
+ }
+ }
+ if ((flags & kVisitRootFlagClearRootLog) != 0) {
+ new_class_roots_.clear();
+ }
+ if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
+ log_new_class_table_roots_ = true;
+ } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
+ log_new_class_table_roots_ = false;
+ }
+ // We deliberately ignore the class roots in the image since we
+ // handle image roots by using the MS/CMS rescanning of dirty cards.
+}
+
// Keep in sync with InitCallback. Anything we visit, we need to
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
@@ -1087,41 +1123,7 @@
log_new_dex_caches_roots_ = false;
}
}
- {
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
- }
- } else if ((flags & kVisitRootFlagNewRoots) != 0) {
- for (auto& pair : new_class_roots_) {
- mirror::Object* old_ref = pair.second;
- callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
- if (UNLIKELY(pair.second != old_ref)) {
- // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
- // corresponding object. This is slow, but luckily for us, this may only happen with a
- // concurrent moving GC.
- for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
- it != end && it->first == pair.first; ++it) {
- // If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second) {
- it->second = pair.second;
- }
- }
- }
- }
- }
- if ((flags & kVisitRootFlagClearRootLog) != 0) {
- new_class_roots_.clear();
- }
- if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = true;
- } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = false;
- }
- // We deliberately ignore the class roots in the image since we
- // handle image roots by using the MS/CMS rescanning of dirty cards.
- }
+ VisitClassRoots(callback, arg, flags);
callback(reinterpret_cast<mirror::Object**>(&array_iftable_), arg, 0, kRootVMInternal);
DCHECK(array_iftable_ != nullptr);
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
@@ -1252,7 +1254,7 @@
DCHECK_GE(class_size, sizeof(mirror::Class));
gc::Heap* heap = Runtime::Current()->GetHeap();
InitializeClassVisitor visitor(class_size);
- mirror::Object* k = (kMovingClasses) ?
+ mirror::Object* k = kMovingClasses ?
heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
if (UNLIKELY(k == nullptr)) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6d96aa2..62b5ea8 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -247,8 +247,10 @@
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
+ LOCKS_EXCLUDED(dex_lock_);
mirror::DexCache* FindDexCache(const DexFile& dex_file) const
LOCKS_EXCLUDED(dex_lock_)
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 224b33e..c0aa43e 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -52,7 +52,7 @@
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
- PROT_READ | PROT_WRITE, false, &error_msg));
+ PROT_READ | PROT_WRITE, false, &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return nullptr;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 0849171..27fb087 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -42,7 +42,6 @@
class SpaceBitmap {
public:
typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
-
typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
// Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
new file mode 100644
index 0000000..595dc8f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.cc
@@ -0,0 +1,634 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mark_compact.h"
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/remembered_set.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "stack.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+void MarkCompact::BindBitmaps() {
+ timings_.StartSplit("BindBitmaps");
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // Mark all of the spaces we never collect as immune.
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ }
+ }
+ timings_.EndSplit();
+}
+
+MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
+ : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+ space_(nullptr), collector_name_(name_) {
+}
+
+void MarkCompact::RunPhases() {
+ Thread* self = Thread::Current();
+ InitializePhase();
+ CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
+ {
+ ScopedPause pause(this);
+ GetHeap()->PreGcVerificationPaused(this);
+ GetHeap()->PrePauseRosAllocVerification(this);
+ MarkingPhase();
+ ReclaimPhase();
+ }
+ GetHeap()->PostGcVerification(this);
+ FinishPhase();
+}
+
+void MarkCompact::ForwardObject(mirror::Object* obj) {
+ const size_t alloc_size = RoundUp(obj->SizeOf(), space::BumpPointerSpace::kAlignment);
+ LockWord lock_word = obj->GetLockWord(false);
+ // If we have a non empty lock word, store it and restore it later.
+ if (lock_word.GetValue() != LockWord().GetValue()) {
+ // Set the bit in the bitmap so that we know to restore it later.
+ objects_with_lockword_->Set(obj);
+ lock_words_to_restore_.push_back(lock_word);
+ }
+ obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(bump_pointer_)),
+ false);
+ bump_pointer_ += alloc_size;
+ ++live_objects_in_space_;
+}
+
+class CalculateObjectForwardingAddressVisitor {
+ public:
+ explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
+ : collector_(collector) {}
+ void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
+ Locks::heap_bitmap_lock_) {
+ DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
+ DCHECK(collector_->IsMarked(obj));
+ collector_->ForwardObject(obj);
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+void MarkCompact::CalculateObjectForwardingAddresses() {
+ timings_.NewSplit(__FUNCTION__);
+ // The bump pointer in the space where the next forwarding address will be.
+ bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+ // Visit all the marked objects in the bitmap.
+ CalculateObjectForwardingAddressVisitor visitor(this);
+ objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+ reinterpret_cast<uintptr_t>(space_->End()),
+ visitor);
+}
+
+void MarkCompact::InitializePhase() {
+ TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ mark_stack_ = heap_->GetMarkStack();
+ DCHECK(mark_stack_ != nullptr);
+ immune_region_.Reset();
+ CHECK(space_->CanMoveObjects()) << "Attempting compact non-movable space from " << *space_;
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ mark_bitmap_ = heap_->GetMarkBitmap();
+ live_objects_in_space_ = 0;
+}
+
+void MarkCompact::ProcessReferences(Thread* self) {
+ TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ heap_->GetReferenceProcessor()->ProcessReferences(
+ false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
+ &ProcessMarkStackCallback, this);
+}
+
+class BitmapSetSlowPathVisitor {
+ public:
+ void operator()(const mirror::Object* obj) const {
+ // Marking a large object, make sure its aligned as a sanity check.
+ if (!IsAligned<kPageSize>(obj)) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ LOG(FATAL) << obj;
+ }
+ }
+};
+
+inline void MarkCompact::MarkObject(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ if (kUseBakerOrBrooksReadBarrier) {
+ // Verify all the objects have the correct forward pointer installed.
+ obj->AssertReadBarrierPointer();
+ }
+ if (immune_region_.ContainsObject(obj)) {
+ return;
+ }
+ if (objects_before_forwarding_->HasAddress(obj)) {
+ if (!objects_before_forwarding_->Set(obj)) {
+ MarkStackPush(obj); // This object was not previously marked.
+ }
+ } else {
+ DCHECK(!space_->HasAddress(obj));
+ BitmapSetSlowPathVisitor visitor;
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
+ }
+ }
+}
+
+void MarkCompact::MarkingPhase() {
+ Thread* self = Thread::Current();
+ // Bitmap which describes which objects we have to move.
+ objects_before_forwarding_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "objects before forwarding", space_->Begin(), space_->Size()));
+ // Bitmap which describes which lock words we need to restore.
+ objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "objects with lock words", space_->Begin(), space_->Size()));
+ CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+ TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ // Assume the cleared space is already empty.
+ BindBitmaps();
+ // Process dirty cards and add dirty cards to mod-union tables.
+ heap_->ProcessCards(timings_, false);
+ // Clear the whole card table since we can not Get any additional dirty cards during the
+ // paused GC. This saves memory but only works for pause the world collectors.
+ timings_.NewSplit("ClearCardTable");
+ heap_->GetCardTable()->ClearCardTable();
+ // Need to do this before the checkpoint since we don't want any threads to add references to
+ // the live stack during the recursive mark.
+ timings_.NewSplit("SwapStacks");
+ if (kUseThreadLocalAllocationStack) {
+ heap_->RevokeAllThreadLocalAllocationStacks(self);
+ }
+ heap_->SwapStacks(self);
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ MarkRoots();
+ // Mark roots of immune spaces.
+ UpdateAndMarkModUnion();
+ // Recursively mark remaining objects.
+ MarkReachableObjects();
+ }
+ ProcessReferences(self);
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ SweepSystemWeaks();
+ }
+ // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
+ // before they are properly counted.
+ RevokeAllThreadLocalBuffers();
+ timings_.StartSplit("PreSweepingGcVerification");
+ // Disabled due to an issue where we have objects in the bump pointer space which reference dead
+ // objects.
+ // heap_->PreSweepingGcVerification(this);
+ timings_.EndSplit();
+}
+
+void MarkCompact::UpdateAndMarkModUnion() {
+ for (auto& space : heap_->GetContinuousSpaces()) {
+ // If the space is immune then we need to mark the references to other spaces.
+ if (immune_region_.ContainsSpace(space)) {
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedSplit split(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ &timings_);
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ }
+ }
+ }
+}
+
+void MarkCompact::MarkReachableObjects() {
+ timings_.StartSplit("MarkStackAsLive");
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ // Recursively process the mark stack.
+ ProcessMarkStack();
+}
+
+void MarkCompact::ReclaimPhase() {
+ TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // Reclaim unmarked objects.
+ Sweep(false);
+ // Swap the live and mark bitmaps for each space which we modified space. This is an
+ // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+ // bitmaps.
+ timings_.StartSplit("SwapBitmapsAndUnBindBitmaps");
+ SwapBitmaps();
+ GetHeap()->UnBindBitmaps(); // Unbind the live and mark bitmaps.
+ Compact();
+ timings_.EndSplit();
+}
+
+void MarkCompact::ResizeMarkStack(size_t new_size) {
+ std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+ CHECK_LE(mark_stack_->Size(), new_size);
+ mark_stack_->Resize(new_size);
+ for (const auto& obj : temp) {
+ mark_stack_->PushBack(obj);
+ }
+}
+
+inline void MarkCompact::MarkStackPush(Object* obj) {
+ if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+ ResizeMarkStack(mark_stack_->Capacity() * 2);
+ }
+ // The object must be pushed on to the mark stack.
+ mark_stack_->PushBack(obj);
+}
+
+void MarkCompact::ProcessMarkStackCallback(void* arg) {
+ reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack();
+}
+
+mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) {
+ reinterpret_cast<MarkCompact*>(arg)->MarkObject(root);
+ return root;
+}
+
+void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
+ void* arg) {
+ reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+ void* arg) {
+ reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
+}
+
+void MarkCompact::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
+ reinterpret_cast<MarkCompact*>(arg)->MarkObject(*root);
+}
+
+void MarkCompact::UpdateRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
+ mirror::Object* obj = *root;
+ mirror::Object* new_obj = reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+ if (obj != new_obj) {
+ *root = new_obj;
+ DCHECK(new_obj != nullptr);
+ }
+}
+
+class UpdateObjectReferencesVisitor {
+ public:
+ explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
+ }
+ void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ collector_->UpdateObjectReferences(obj);
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateReferences() {
+ timings_.NewSplit(__FUNCTION__);
+ Runtime* runtime = Runtime::Current();
+ // Update roots.
+ runtime->VisitRoots(UpdateRootCallback, this);
+ // Update object references in mod union tables and spaces.
+ for (const auto& space : heap_->GetContinuousSpaces()) {
+ // If the space is immune then we need to mark the references to other spaces.
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedSplit split(
+ space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
+ "UpdateImageModUnionTableReferences",
+ &timings_);
+ table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
+ } else {
+ // No mod union table, so we need to scan the space using bitmap visit.
+ // Scan the space using bitmap visit.
+ accounting::ContinuousSpaceBitmap* bitmap = space->GetLiveBitmap();
+ if (bitmap != nullptr) {
+ UpdateObjectReferencesVisitor visitor(this);
+ bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
+ }
+ }
+ }
+ CHECK(!kMovingClasses)
+ << "Didn't update large object classes since they are assumed to not move.";
+ // Update the system weaks, these should already have been swept.
+ runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this);
+ // Update the objects in the bump pointer space last, these objects don't have a bitmap.
+ UpdateObjectReferencesVisitor visitor(this);
+ objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+ reinterpret_cast<uintptr_t>(space_->End()),
+ visitor);
+ // Update the reference processor cleared list.
+ heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this);
+}
+
+void MarkCompact::Compact() {
+ timings_.NewSplit(__FUNCTION__);
+ CalculateObjectForwardingAddresses();
+ UpdateReferences();
+ MoveObjects();
+ // Space
+ int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
+ int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
+ reinterpret_cast<int64_t>(bump_pointer_);
+ timings_.NewSplit("RecordFree");
+ space_->RecordFree(objects_freed, bytes_freed);
+ RecordFree(objects_freed, bytes_freed);
+ space_->SetEnd(bump_pointer_);
+ // Need to zero out the memory we freed. TODO: Use madvise for pages.
+ memset(bump_pointer_, 0, bytes_freed);
+}
+
+// Marks all objects in the root set.
+void MarkCompact::MarkRoots() {
+ timings_.NewSplit("MarkRoots");
+ Runtime::Current()->VisitRoots(MarkRootCallback, this);
+}
+
+mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
+ return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+}
+
+inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
+ mirror::Object* obj = reference->AsMirrorPtr();
+ if (obj != nullptr) {
+ mirror::Object* new_obj = GetMarkedForwardAddress(obj);
+ if (obj != new_obj) {
+ DCHECK(new_obj != nullptr);
+ reference->Assign(new_obj);
+ }
+ }
+}
+
+void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+ void* arg) {
+ reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference);
+}
+
+class UpdateReferenceVisitor {
+ public:
+ explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
+ }
+
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
+ }
+
+ void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ collector_->UpdateHeapReference(
+ ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateObjectReferences(mirror::Object* obj) {
+ UpdateReferenceVisitor visitor(this);
+ obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const {
+ DCHECK(obj != nullptr);
+ if (objects_before_forwarding_->HasAddress(obj)) {
+ DCHECK(objects_before_forwarding_->Test(obj));
+ mirror::Object* ret =
+ reinterpret_cast<mirror::Object*>(obj->GetLockWord(false).ForwardingAddress());
+ DCHECK(ret != nullptr);
+ return ret;
+ }
+ DCHECK(!space_->HasAddress(obj));
+ DCHECK(IsMarked(obj));
+ return obj;
+}
+
+inline bool MarkCompact::IsMarked(const Object* object) const {
+ if (immune_region_.ContainsObject(object)) {
+ return true;
+ }
+ if (objects_before_forwarding_->HasAddress(object)) {
+ return objects_before_forwarding_->Test(object);
+ }
+ return mark_bitmap_->Test(object);
+}
+
+mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) {
+ return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr;
+}
+
+bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+ void* arg) {
+ // Side effect free since we call this before ever moving objects.
+ return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::SweepSystemWeaks() {
+ timings_.StartSplit("SweepSystemWeaks");
+ Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ timings_.EndSplit();
+}
+
+bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
+ return space != space_ && !immune_region_.ContainsSpace(space);
+}
+
+class MoveObjectVisitor {
+ public:
+ explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
+ }
+ void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ collector_->MoveObject(obj, obj->SizeOf());
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
+ // Look at the forwarding address stored in the lock word to know where to copy.
+ DCHECK(space_->HasAddress(obj)) << obj;
+ uintptr_t dest_addr = obj->GetLockWord(false).ForwardingAddress();
+ mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest_addr);
+ DCHECK(space_->HasAddress(dest_obj)) << dest_obj;
+ // Use memmove since there may be overlap.
+ memmove(reinterpret_cast<void*>(dest_addr), reinterpret_cast<const void*>(obj), len);
+ // Restore the saved lock word if needed.
+ LockWord lock_word;
+ if (UNLIKELY(objects_with_lockword_->Test(obj))) {
+ lock_word = lock_words_to_restore_.front();
+ lock_words_to_restore_.pop_front();
+ }
+ dest_obj->SetLockWord(lock_word, false);
+}
+
+void MarkCompact::MoveObjects() {
+ timings_.NewSplit(__FUNCTION__);
+ // Move the objects in the before forwarding bitmap.
+ MoveObjectVisitor visitor(this);
+ objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+ reinterpret_cast<uintptr_t>(space_->End()),
+ visitor);
+ CHECK(lock_words_to_restore_.empty());
+}
+
+void MarkCompact::Sweep(bool swap_bitmaps) {
+ DCHECK(mark_stack_->IsEmpty());
+ TimingLogger::ScopedSplit split("Sweep", &timings_);
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsContinuousMemMapAllocSpace()) {
+ space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
+ if (!ShouldSweepSpace(alloc_space)) {
+ continue;
+ }
+ TimingLogger::ScopedSplit split(
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
+ size_t freed_objects = 0;
+ size_t freed_bytes = 0;
+ alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ RecordFree(freed_objects, freed_bytes);
+ }
+ }
+ SweepLargeObjects(swap_bitmaps);
+}
+
+void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
+ TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
+ size_t freed_objects = 0;
+ size_t freed_bytes = 0;
+ heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ RecordFreeLargeObjects(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
+ &HeapReferenceMarkedCallback, this);
+}
+
+class MarkCompactMarkObjectVisitor {
+ public:
+ explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
+ }
+
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ // Object was already verified when we scanned it.
+ collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
+ }
+
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ collector_->DelayReferenceReferent(klass, ref);
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+// Visit all of the references of an object and update.
+void MarkCompact::ScanObject(Object* obj) {
+ MarkCompactMarkObjectVisitor visitor(this);
+ obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+// Scan anything that's on the mark stack.
+void MarkCompact::ProcessMarkStack() {
+ timings_.StartSplit("ProcessMarkStack");
+ while (!mark_stack_->IsEmpty()) {
+ Object* obj = mark_stack_->PopBack();
+ DCHECK(obj != nullptr);
+ ScanObject(obj);
+ }
+ timings_.EndSplit();
+}
+
+void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
+ DCHECK(space != nullptr);
+ space_ = space;
+}
+
+void MarkCompact::FinishPhase() {
+ TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ space_ = nullptr;
+ CHECK(mark_stack_->IsEmpty());
+ mark_stack_->Reset();
+ // Clear all of the spaces' mark bitmaps.
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
+ // Release our bitmaps.
+ objects_before_forwarding_.reset(nullptr);
+ objects_with_lockword_.reset(nullptr);
+}
+
+void MarkCompact::RevokeAllThreadLocalBuffers() {
+ timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetHeap()->RevokeAllThreadLocalBuffers();
+ timings_.EndSplit();
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
new file mode 100644
index 0000000..25cfe0f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+
+#include <deque>
+#include <memory> // For unique_ptr.
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "immune_region.h"
+#include "lock_word.h"
+#include "object_callbacks.h"
+#include "offsets.h"
+
+namespace art {
+
+class Thread;
+
+namespace mirror {
+ class Class;
+ class Object;
+} // namespace mirror
+
+namespace gc {
+
+class Heap;
+
+namespace accounting {
+ template <typename T> class AtomicStack;
+ typedef AtomicStack<mirror::Object*> ObjectStack;
+} // namespace accounting
+
+namespace space {
+ class ContinuousMemMapAllocSpace;
+ class ContinuousSpace;
+} // namespace space
+
+namespace collector {
+
+class MarkCompact : public GarbageCollector {
+ public:
+ explicit MarkCompact(Heap* heap, const std::string& name_prefix = "");
+ ~MarkCompact() {}
+
+ virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ void InitializePhase();
+ void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkReachableObjects()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ virtual GcType GetGcType() const OVERRIDE {
+ return kGcTypePartial;
+ }
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return kCollectorTypeMC;
+ }
+
+ // Sets which space we will be copying objects in.
+ void SetSpace(space::BumpPointerSpace* space);
+
+ // Initializes internal structures.
+ void Init();
+
+ // Find the default mark bitmap.
+ void FindDefaultMarkBitmap();
+
+ void ScanObject(mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Marks the root set at the start of a garbage collection.
+ void MarkRoots()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+ // the image. Mark that portion of the heap as immune.
+ void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
+ void UnBindBitmaps()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void SweepSystemWeaks()
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/,
+ RootType /*root_type*/)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+ void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static void ProcessMarkStackCallback(void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+ void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Schedules an unmarked object for reference processing.
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+ // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+ // object for non movable things).
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+ // mark, otherwise we unmark.
+ bool MarkLargeObject(const mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Expand mark stack to 2x its current size.
+ void ResizeMarkStack(size_t new_size);
+
+ // Returns true if we should sweep the space.
+ bool ShouldSweepSpace(space::ContinuousSpace* space) const;
+
+ // Push an object onto the mark stack.
+ void MarkStackPush(mirror::Object* obj);
+
+ void UpdateAndMarkModUnion()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Recursively blackens objects on the mark stack.
+ void ProcessMarkStack()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ // 3 pass mark compact approach.
+ void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
+ // bitmap.
+ void CalculateObjectForwardingAddresses()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Update the references of objects by using the forwarding addresses.
+ void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ static void UpdateRootCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ // Move objects and restore lock words.
+ void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Move a single object to its forward address.
+ void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Mark a single object.
+ void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ Locks::mutator_lock_);
+ bool IsMarked(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ Locks::mutator_lock_);
+ // Update a single heap reference.
+ void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+ void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Update all of the references of a single object.
+ void UpdateObjectReferences(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Revoke all the thread-local buffers.
+ void RevokeAllThreadLocalBuffers();
+
+ accounting::ObjectStack* mark_stack_;
+
+ // Immune region, every object inside the immune region is assumed to be marked.
+ ImmuneRegion immune_region_;
+
+ // Bump pointer space which we are collecting.
+ space::BumpPointerSpace* space_;
+ // Cached mark bitmap as an optimization.
+ accounting::HeapBitmap* mark_bitmap_;
+
+ // The name of the collector.
+ std::string collector_name_;
+
+ // The bump pointer in the space where the next forwarding address will be.
+ byte* bump_pointer_;
+ // How many live objects we have in the space.
+ size_t live_objects_in_space_;
+
+ // Bitmap which describes which objects we have to move, need to do / 2 so that we can handle
+ // objects which are only 8 bytes.
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_before_forwarding_;
+ // Bitmap which describes which lock words we need to restore.
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_with_lockword_;
+ // Which lock words we need to restore as we are moving objects.
+ std::deque<LockWord> lock_words_to_restore_;
+
+ private:
+ friend class BitmapSetSlowPathVisitor;
+ friend class CalculateObjectForwardingAddressVisitor;
+ friend class MarkCompactMarkObjectVisitor;
+ friend class MoveObjectVisitor;
+ friend class UpdateObjectReferencesVisitor;
+ friend class UpdateReferenceVisitor;
+ DISALLOW_COPY_AND_ASSIGN(MarkCompact);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index c72913a..fbb349e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -43,10 +43,7 @@
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::ArtField;
-using ::art::mirror::Class;
using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
namespace art {
namespace gc {
@@ -1272,9 +1269,7 @@
timings_.EndSplit();
}
-inline bool MarkSweep::IsMarked(const Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- DCHECK(object != nullptr);
+inline bool MarkSweep::IsMarked(const Object* object) const {
if (immune_region_.ContainsObject(object)) {
return true;
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a44d8a1..2780099 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -215,7 +215,8 @@
protected:
// Returns true if the object has its bit set in the mark bitmap.
- bool IsMarked(const mirror::Object* object) const;
+ bool IsMarked(const mirror::Object* object) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index badf8b3..54e77a7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -41,22 +41,12 @@
#include "jni_internal.h"
#include "mark_sweep-inl.h"
#include "monitor.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
#include "runtime.h"
-#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
-#include "verifier/method_verifier.h"
-using ::art::mirror::Class;
using ::art::mirror::Object;
namespace art {
@@ -788,7 +778,7 @@
// Already forwarded, must be marked.
return obj;
}
- return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+ return mark_bitmap_->Test(obj) ? obj : nullptr;
}
void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index c0a6b6a..530a3c9 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -34,6 +34,8 @@
kCollectorTypeSS,
// A generational variant of kCollectorTypeSS.
kCollectorTypeGSS,
+ // Mark compact colector.
+ kCollectorTypeMC,
// Heap trimming collector, doesn't do any actual collecting.
kCollectorTypeHeapTrim,
// A (mostly) concurrent copying collector.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e6a5380..1c94d6f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -36,6 +36,7 @@
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/concurrent_copying.h"
+#include "gc/collector/mark_compact.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
@@ -331,9 +332,10 @@
semi_space_collector_ = new collector::SemiSpace(this, generational,
generational ? "generational" : "");
garbage_collectors_.push_back(semi_space_collector_);
-
concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
garbage_collectors_.push_back(concurrent_copying_collector_);
+ mark_compact_collector_ = new collector::MarkCompact(this);
+ garbage_collectors_.push_back(mark_compact_collector_);
}
if (GetImageSpace() != nullptr && main_space_ != nullptr) {
@@ -1341,8 +1343,9 @@
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
- ThreadList* tl = Runtime::Current()->GetThreadList();
- Thread* self = Thread::Current();
+ Runtime* const runtime = Runtime::Current();
+ ThreadList* const tl = runtime->GetThreadList();
+ Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
const bool copying_transition =
@@ -1371,7 +1374,7 @@
}
usleep(1000);
}
- if (Runtime::Current()->IsShuttingDown(self)) {
+ if (runtime->IsShuttingDown(self)) {
// Don't allow heap transitions to happen if the runtime is shutting down since these can
// cause objects to get finalized.
FinishGC(self, collector::kGcTypeNone);
@@ -1432,10 +1435,15 @@
void Heap::ChangeCollector(CollectorType collector_type) {
// TODO: Only do this with all mutators suspended to avoid races.
if (collector_type != collector_type_) {
+ if (collector_type == kCollectorTypeMC) {
+ // Don't allow mark compact unless support is compiled in.
+ CHECK(kMarkCompactSupport);
+ }
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
case kCollectorTypeCC: // Fall-through.
+ case kCollectorTypeMC: // Fall-through.
case kCollectorTypeSS: // Fall-through.
case kCollectorTypeGSS: {
gc_plan_.push_back(collector::kGcTypeFull);
@@ -1722,13 +1730,17 @@
void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
space::ContinuousMemMapAllocSpace* source_space) {
CHECK(kMovingCollector);
- CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
if (target_space != source_space) {
// Don't swap spaces since this isn't a typical semi space collection.
semi_space_collector_->SetSwapSemiSpaces(false);
semi_space_collector_->SetFromSpace(source_space);
semi_space_collector_->SetToSpace(target_space);
semi_space_collector_->Run(kGcCauseCollectorTransition, false);
+ } else {
+ CHECK(target_space->IsBumpPointerSpace())
+ << "In-place compaction is only supported for bump pointer spaces";
+ mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
+ mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
}
}
@@ -1792,21 +1804,30 @@
if (compacting_gc) {
DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
current_allocator_ == kAllocatorTypeTLAB);
- if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
- gc_type = semi_space_collector_->GetGcType();
- semi_space_collector_->SetFromSpace(bump_pointer_space_);
- semi_space_collector_->SetToSpace(temp_space_);
- collector = semi_space_collector_;
- semi_space_collector_->SetSwapSemiSpaces(true);
- } else if (collector_type_ == kCollectorTypeCC) {
- gc_type = concurrent_copying_collector_->GetGcType();
- collector = concurrent_copying_collector_;
- } else {
- LOG(FATAL) << "Unreachable - invalid collector type " << static_cast<size_t>(collector_type_);
+ switch (collector_type_) {
+ case kCollectorTypeSS:
+ // Fall-through.
+ case kCollectorTypeGSS:
+ semi_space_collector_->SetFromSpace(bump_pointer_space_);
+ semi_space_collector_->SetToSpace(temp_space_);
+ semi_space_collector_->SetSwapSemiSpaces(true);
+ collector = semi_space_collector_;
+ break;
+ case kCollectorTypeCC:
+ collector = concurrent_copying_collector_;
+ break;
+ case kCollectorTypeMC:
+ mark_compact_collector_->SetSpace(bump_pointer_space_);
+ collector = mark_compact_collector_;
+ break;
+ default:
+ LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
}
- temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
- CHECK(temp_space_->IsEmpty());
- gc_type = collector::kGcTypeFull;
+ if (collector != mark_compact_collector_) {
+ temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ CHECK(temp_space_->IsEmpty());
+ }
+ gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
current_allocator_ == kAllocatorTypeDlMalloc) {
collector = FindCollectorByGcType(gc_type);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9b49373..368a20c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -66,6 +66,7 @@
namespace collector {
class ConcurrentCopying;
class GarbageCollector;
+ class MarkCompact;
class MarkSweep;
class SemiSpace;
} // namespace collector
@@ -573,7 +574,7 @@
}
static bool IsMovingGc(CollectorType collector_type) {
return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
- collector_type == kCollectorTypeCC;
+ collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -952,12 +953,14 @@
std::vector<collector::GarbageCollector*> garbage_collectors_;
collector::SemiSpace* semi_space_collector_;
+ collector::MarkCompact* mark_compact_collector_;
collector::ConcurrentCopying* concurrent_copying_collector_;
const bool running_on_valgrind_;
const bool use_tlab_;
friend class collector::GarbageCollector;
+ friend class collector::MarkCompact;
friend class collector::MarkSweep;
friend class collector::SemiSpace;
friend class ReferenceQueue;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 3ff9889..292781e 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -205,6 +205,10 @@
}
}
+void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
+ cleared_references_.UpdateRoots(callback, arg);
+}
+
void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self);
if (!cleared_references_.IsEmpty()) {
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index ff7da52..2771ea8 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -59,6 +59,8 @@
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UpdateRoots(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
private:
class ProcessReferencesArgs {
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 19476e6..c3931e8 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -163,5 +163,11 @@
} while (LIKELY(ref != head));
}
+void ReferenceQueue::UpdateRoots(IsMarkedCallback* callback, void* arg) {
+ if (list_ != nullptr) {
+ list_ = down_cast<mirror::Reference*>(callback(list_, arg));
+ }
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 8ef0d20..cd814bb 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -83,12 +83,16 @@
mirror::Reference* GetList() {
return list_;
}
+ // Visits list_, currently only used for the mark compact GC.
+ void UpdateRoots(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // The actual reference list. Not a root since it will be nullptr when the GC is not running.
+ // The actual reference list. Only a root for the mark compact GC since it will be null for other
+ // GC types.
mirror::Reference* list_;
};
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9e61f30..feee34f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -145,6 +145,12 @@
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ // Record objects / bytes freed.
+ void RecordFree(int32_t objects, int32_t bytes) {
+ objects_allocated_.FetchAndSubSequentiallyConsistent(objects);
+ bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
+ }
+
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/globals.h b/runtime/globals.h
index 58c2118..3a906f1 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -74,8 +74,11 @@
// Garbage collector constants.
static constexpr bool kMovingCollector = true && !kUsePortableCompiler;
+static constexpr bool kMarkCompactSupport = false && kMovingCollector;
+// True if we allow moving field arrays, this can cause complication with mark compact.
+static constexpr bool kMoveFieldArrays = !kMarkCompactSupport;
// True if we allow moving classes.
-static constexpr bool kMovingClasses = true;
+static constexpr bool kMovingClasses = !kMarkCompactSupport;
// True if we allow moving fields.
static constexpr bool kMovingFields = false;
// True if we allow moving methods.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 512a66f..6205f70 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -505,8 +505,10 @@
template <bool kVisitClass, typename Visitor>
inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
- VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+ // Visit the static fields first so that we don't overwrite the SFields / IFields instance
+ // fields.
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
+ VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
}
inline bool Class::IsArtFieldClass() const {
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index ec3e514..3f20bf4 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -25,8 +25,9 @@
inline void IfTable::SetInterface(int32_t i, Class* interface) {
DCHECK(interface != NULL);
DCHECK(interface->IsInterface());
- DCHECK(Get((i * kMax) + kInterface) == NULL);
- Set<false>((i * kMax) + kInterface, interface);
+ const size_t idx = i * kMax + kInterface;
+ DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
+ Set<false>(idx, interface);
}
} // namespace mirror
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 567ce3e..15ecd3c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -26,6 +26,7 @@
#include "class.h"
#include "lock_word-inl.h"
#include "monitor.h"
+#include "object_array-inl.h"
#include "read_barrier-inl.h"
#include "runtime.h"
#include "reference.h"
@@ -667,10 +668,9 @@
mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i) : klass->GetInstanceField(i);
MemberOffset field_offset = field->GetOffset();
// TODO: Do a simpler check?
- if (!kVisitClass && UNLIKELY(field_offset.Uint32Value() == ClassOffset().Uint32Value())) {
- continue;
+ if (kVisitClass || field_offset.Uint32Value() != ClassOffset().Uint32Value()) {
+ visitor(this, field_offset, kIsStatic);
}
- visitor(this, field_offset, kIsStatic);
}
}
}
@@ -693,18 +693,16 @@
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
mirror::Class* klass = GetClass<kVerifyFlags>();
- if (klass->IsVariableSize()) {
- if (klass->IsClassClass()) {
- AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
- } else {
- DCHECK(klass->IsArrayClass<kVerifyFlags>());
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
- } else if (kVisitClass) {
- visitor(this, ClassOffset(), false);
- }
+ if (klass == Class::GetJavaLangClass()) {
+ AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
+ } else if (klass->IsArrayClass()) {
+ if (klass->IsObjectArrayClass<kVerifyNone>()) {
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
+ } else if (kVisitClass) {
+ visitor(this, ClassOffset(), false);
}
} else {
+ DCHECK(!klass->IsVariableSize());
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
ref_visitor(klass, AsReference());
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 87106d6..7cdd8f5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -113,6 +113,8 @@
return gc::kCollectorTypeGSS;
} else if (option == "CC") {
return gc::kCollectorTypeCC;
+ } else if (option == "MC") {
+ return gc::kCollectorTypeMC;
} else {
return gc::kCollectorTypeNone;
}
@@ -563,6 +565,10 @@
if (!ParseDouble(option, ':', 0.0, 100.0, &profiler_options_.top_k_change_threshold_)) {
return false;
}
+ } else if (option == "-Xprofile-type:method") {
+ profiler_options_.profile_type_ = kProfilerMethod;
+ } else if (option == "-Xprofile-type:dexpc") {
+ profiler_options_.profile_type_ = kProfilerMethodAndDexPC;
} else if (StartsWith(option, "-implicit-checks:")) {
std::string checks;
if (!ParseStringAfterChar(option, ':', &checks)) {
@@ -806,6 +812,7 @@
UsageMessage(stream, " -Xprofile-start-immediately\n");
UsageMessage(stream, " -Xprofile-top-k-threshold:doublevalue\n");
UsageMessage(stream, " -Xprofile-top-k-change-threshold:doublevalue\n");
+ UsageMessage(stream, " -Xprofile-type:{method,dexpc}\n");
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 00bb501..2cd876a 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -63,7 +63,8 @@
static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BackgroundMethodSamplingProfiler* profiler =
reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
- mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
+ uint32_t dex_pc;
+ mirror::ArtMethod* method = thread->GetCurrentMethod(&dex_pc);
if (false && method == nullptr) {
LOG(INFO) << "No current method available";
std::ostringstream os;
@@ -71,7 +72,7 @@
std::string data(os.str());
LOG(INFO) << data;
}
- profiler->RecordMethod(method);
+ profiler->RecordMethod(method, dex_pc);
}
// A closure that is called by the thread checkpoint code.
@@ -244,7 +245,7 @@
}
// Read the previous profile.
- profile_table_.ReadPrevious(fd);
+ profile_table_.ReadPrevious(fd, options_.GetProfileType());
// Move back to the start of the file.
lseek(fd, 0, SEEK_SET);
@@ -360,7 +361,7 @@
// A method has been hit, record its invocation in the method map.
// The mutator_lock must be held (shared) when this is called.
-void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
+void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method, uint32_t dex_pc) {
if (method == nullptr) {
profile_table_.NullMethod();
// Don't record a nullptr method.
@@ -393,7 +394,11 @@
// Add to the profile table unless it is filtered out.
if (!is_filtered) {
- profile_table_.Put(method);
+ if (options_.GetProfileType() == kProfilerMethod) {
+ profile_table_.Put(method);
+ } else if (options_.GetProfileType() == kProfilerMethodAndDexPC) {
+ profile_table_.PutDexPC(method, dex_pc);
+ }
}
}
@@ -403,7 +408,7 @@
}
uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
- return profile_table_.Write(os);
+ return profile_table_.Write(os, options_.GetProfileType());
}
// Profile Table.
@@ -414,19 +419,18 @@
num_boot_methods_(0) {
for (int i = 0; i < kHashSize; i++) {
table[i] = nullptr;
+ dex_table[i] = nullptr;
}
}
ProfileSampleResults::~ProfileSampleResults() {
- for (int i = 0; i < kHashSize; i++) {
- delete table[i];
- }
+ Clear();
}
// Add a method to the profile table. If it's the first time the method
// has been seen, add it with count=1, otherwise increment the count.
void ProfileSampleResults::Put(mirror::ArtMethod* method) {
- lock_.Lock(Thread::Current());
+ MutexLock mu(Thread::Current(), lock_);
uint32_t index = Hash(method);
if (table[index] == nullptr) {
table[index] = new Map();
@@ -438,11 +442,34 @@
i->second++;
}
num_samples_++;
- lock_.Unlock(Thread::Current());
+}
+
+// Add a method with dex pc to the profile table
+void ProfileSampleResults::PutDexPC(mirror::ArtMethod* method, uint32_t dex_pc) {
+ MutexLock mu(Thread::Current(), lock_);
+ uint32_t index = Hash(method);
+ if (dex_table[index] == nullptr) {
+ dex_table[index] = new MethodDexPCMap();
+ }
+ MethodDexPCMap::iterator i = dex_table[index]->find(method);
+ if (i == dex_table[index]->end()) {
+ DexPCCountMap* dex_pc_map = new DexPCCountMap();
+ (*dex_pc_map)[dex_pc] = 1;
+ (*dex_table[index])[method] = dex_pc_map;
+ } else {
+ DexPCCountMap* dex_pc_count = i->second;
+ DexPCCountMap::iterator dex_pc_i = dex_pc_count->find(dex_pc);
+ if (dex_pc_i == dex_pc_count->end()) {
+ (*dex_pc_count)[dex_pc] = 1;
+ } else {
+ dex_pc_i->second++;
+ }
+ }
+ num_samples_++;
}
// Write the profile table to the output stream. Also merge with the previous profile.
-uint32_t ProfileSampleResults::Write(std::ostream &os) {
+uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
ScopedObjectAccess soa(Thread::Current());
num_samples_ += previous_num_samples_;
num_null_methods_ += previous_num_null_methods_;
@@ -452,36 +479,101 @@
<< num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
uint32_t num_methods = 0;
- for (int i = 0 ; i < kHashSize; i++) {
- Map *map = table[i];
- if (map != nullptr) {
- for (const auto &meth_iter : *map) {
- mirror::ArtMethod *method = meth_iter.first;
- std::string method_name = PrettyMethod(method);
+ if (type == kProfilerMethod) {
+ for (int i = 0 ; i < kHashSize; i++) {
+ Map *map = table[i];
+ if (map != nullptr) {
+ for (const auto &meth_iter : *map) {
+ mirror::ArtMethod *method = meth_iter.first;
+ std::string method_name = PrettyMethod(method);
- const DexFile::CodeItem* codeitem = method->GetCodeItem();
- uint32_t method_size = 0;
- if (codeitem != nullptr) {
- method_size = codeitem->insns_size_in_code_units_;
- }
- uint32_t count = meth_iter.second;
+ const DexFile::CodeItem* codeitem = method->GetCodeItem();
+ uint32_t method_size = 0;
+ if (codeitem != nullptr) {
+ method_size = codeitem->insns_size_in_code_units_;
+ }
+ uint32_t count = meth_iter.second;
- // Merge this profile entry with one from a previous run (if present). Also
- // remove the previous entry.
- PreviousProfile::iterator pi = previous_.find(method_name);
- if (pi != previous_.end()) {
- count += pi->second.count_;
- previous_.erase(pi);
+ // Merge this profile entry with one from a previous run (if present). Also
+ // remove the previous entry.
+ PreviousProfile::iterator pi = previous_.find(method_name);
+ if (pi != previous_.end()) {
+ count += pi->second.count_;
+ previous_.erase(pi);
+ }
+ os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
+ ++num_methods;
}
- os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
- ++num_methods;
+ }
+ }
+ } else if (type == kProfilerMethodAndDexPC) {
+ for (int i = 0 ; i < kHashSize; i++) {
+ MethodDexPCMap *dex_map = dex_table[i];
+ if (dex_map != nullptr) {
+ for (const auto &dex_pc_iter : *dex_map) {
+ mirror::ArtMethod *method = dex_pc_iter.first;
+ std::string method_name = PrettyMethod(method);
+
+ const DexFile::CodeItem* codeitem = method->GetCodeItem();
+ uint32_t method_size = 0;
+ if (codeitem != nullptr) {
+ method_size = codeitem->insns_size_in_code_units_;
+ }
+ DexPCCountMap* dex_pc_map = dex_pc_iter.second;
+ uint32_t total_count = 0;
+ for (const auto &dex_pc_i : *dex_pc_map) {
+ total_count += dex_pc_i.second;
+ }
+
+ PreviousProfile::iterator pi = previous_.find(method_name);
+ if (pi != previous_.end()) {
+ total_count += pi->second.count_;
+ DexPCCountMap* previous_dex_pc_map = pi->second.dex_pc_map_;
+ if (previous_dex_pc_map != nullptr) {
+ for (const auto &dex_pc_i : *previous_dex_pc_map) {
+ uint32_t dex_pc = dex_pc_i.first;
+ uint32_t count = dex_pc_i.second;
+ DexPCCountMap::iterator di = dex_pc_map->find(dex_pc);
+ if (di == dex_pc_map->end()) {
+ (*dex_pc_map)[dex_pc] = count;
+ } else {
+ di->second += count;
+ }
+ }
+ }
+ delete previous_dex_pc_map;
+ previous_.erase(pi);
+ }
+ std::vector<std::string> dex_pc_count_vector;
+ for (const auto &dex_pc_i : *dex_pc_map) {
+ dex_pc_count_vector.push_back(StringPrintf("%u:%u", dex_pc_i.first, dex_pc_i.second));
+ }
+ // We write out profile data with dex pc information in the following format:
+ // "method/total_count/size/[pc_1:count_1,pc_2:count_2,...]".
+ os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
+ method_size, Join(dex_pc_count_vector, ',').c_str());
+ ++num_methods;
+ }
}
}
}
// Now we write out the remaining previous methods.
- for (PreviousProfile::iterator pi = previous_.begin(); pi != previous_.end(); ++pi) {
- os << StringPrintf("%s/%u/%u\n", pi->first.c_str(), pi->second.count_, pi->second.method_size_);
+ for (const auto &pi : previous_) {
+ if (type == kProfilerMethod) {
+ os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
+ } else if (type == kProfilerMethodAndDexPC) {
+ os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
+ DexPCCountMap* previous_dex_pc_map = pi.second.dex_pc_map_;
+ if (previous_dex_pc_map != nullptr) {
+ std::vector<std::string> dex_pc_count_vector;
+ for (const auto &dex_pc_i : *previous_dex_pc_map) {
+ dex_pc_count_vector.push_back(StringPrintf("%u:%u", dex_pc_i.first, dex_pc_i.second));
+ }
+ os << Join(dex_pc_count_vector, ',');
+ }
+ os << "]\n";
+ }
++num_methods;
}
return num_methods;
@@ -492,8 +584,20 @@
num_null_methods_ = 0;
num_boot_methods_ = 0;
for (int i = 0; i < kHashSize; i++) {
- delete table[i];
- table[i] = nullptr;
+ delete table[i];
+ table[i] = nullptr;
+ if (dex_table[i] != nullptr) {
+ for (auto &di : *dex_table[i]) {
+ delete di.second;
+ di.second = nullptr;
+ }
+ }
+ delete dex_table[i];
+ dex_table[i] = nullptr;
+ }
+ for (auto &pi : previous_) {
+ delete pi.second.dex_pc_map_;
+ pi.second.dex_pc_map_ = nullptr;
}
previous_.clear();
}
@@ -520,7 +624,7 @@
return true;
}
-void ProfileSampleResults::ReadPrevious(int fd) {
+void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
// Reset counters.
previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
@@ -540,21 +644,35 @@
previous_num_null_methods_ = atoi(summary_info[1].c_str());
previous_num_boot_methods_ = atoi(summary_info[2].c_str());
- // Now read each line until the end of file. Each line consists of 3 fields separated by /
+ // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
while (true) {
if (!ReadProfileLine(fd, line)) {
break;
}
std::vector<std::string> info;
Split(line, '/', info);
- if (info.size() != 3) {
+ if (info.size() != 3 && info.size() != 4) {
// Malformed.
break;
}
std::string methodname = info[0];
- uint32_t count = atoi(info[1].c_str());
+ uint32_t total_count = atoi(info[1].c_str());
uint32_t size = atoi(info[2].c_str());
- previous_[methodname] = PreviousValue(count, size);
+ DexPCCountMap* dex_pc_map = nullptr;
+ if (type == kProfilerMethodAndDexPC && info.size() == 4) {
+ dex_pc_map = new DexPCCountMap();
+ std::string dex_pc_counts_str = info[3].substr(1, info[3].size() - 2);
+ std::vector<std::string> dex_pc_count_pairs;
+ Split(dex_pc_counts_str, ',', dex_pc_count_pairs);
+ for (uint32_t i = 0; i < dex_pc_count_pairs.size(); ++i) {
+ std::vector<std::string> dex_pc_count;
+ Split(dex_pc_count_pairs[i], ':', dex_pc_count);
+ uint32_t dex_pc = atoi(dex_pc_count[0].c_str());
+ uint32_t count = atoi(dex_pc_count[1].c_str());
+ (*dex_pc_map)[dex_pc] = count;
+ }
+ }
+ previous_[methodname] = PreviousValue(total_count, size, dex_pc_map);
}
}
@@ -604,7 +722,7 @@
}
std::vector<std::string> info;
Split(line, '/', info);
- if (info.size() != 3) {
+ if (info.size() != 3 && info.size() != 4) {
// Malformed.
return false;
}
diff --git a/runtime/profiler.h b/runtime/profiler.h
index 0b18dbb..396dd23 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -53,8 +53,9 @@
~ProfileSampleResults();
void Put(mirror::ArtMethod* method);
- uint32_t Write(std::ostream &os);
- void ReadPrevious(int fd);
+ void PutDexPC(mirror::ArtMethod* method, uint32_t pc);
+ uint32_t Write(std::ostream &os, ProfileDataType type);
+ void ReadPrevious(int fd, ProfileDataType type);
void Clear();
uint32_t GetNumSamples() { return num_samples_; }
void NullMethod() { ++num_null_methods_; }
@@ -68,15 +69,21 @@
uint32_t num_null_methods_; // Number of samples where can don't know the method.
uint32_t num_boot_methods_; // Number of samples in the boot path.
- typedef std::map<mirror::ArtMethod*, uint32_t> Map; // Map of method vs its count.
+ typedef std::map<mirror::ArtMethod*, uint32_t> Map; // Map of method vs its count.
Map *table[kHashSize];
+ typedef std::map<uint32_t, uint32_t> DexPCCountMap; // Map of dex pc vs its count
+ // Map of method vs dex pc counts in the method.
+ typedef std::map<mirror::ArtMethod*, DexPCCountMap*> MethodDexPCMap;
+ MethodDexPCMap *dex_table[kHashSize];
+
struct PreviousValue {
- PreviousValue() : count_(0), method_size_(0) {}
- PreviousValue(uint32_t count, uint32_t method_size)
- : count_(count), method_size_(method_size) {}
+ PreviousValue() : count_(0), method_size_(0), dex_pc_map_(nullptr) {}
+ PreviousValue(uint32_t count, uint32_t method_size, DexPCCountMap* dex_pc_map)
+ : count_(count), method_size_(method_size), dex_pc_map_(dex_pc_map) {}
uint32_t count_;
uint32_t method_size_;
+ DexPCCountMap* dex_pc_map_;
};
typedef std::map<std::string, PreviousValue> PreviousProfile;
@@ -114,7 +121,7 @@
static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_);
static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_);
- void RecordMethod(mirror::ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RecordMethod(mirror::ArtMethod *method, uint32_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Barrier& GetBarrier() {
return *profiler_barrier_;
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
index 08e32cc..0b63003 100644
--- a/runtime/profiler_options.h
+++ b/runtime/profiler_options.h
@@ -22,6 +22,11 @@
namespace art {
+enum ProfileDataType {
+ kProfilerMethod, // Method only
+ kProfilerMethodAndDexPC, // Method with Dex PC
+};
+
class ProfilerOptions {
public:
static constexpr bool kDefaultEnabled = false;
@@ -32,6 +37,7 @@
static constexpr bool kDefaultStartImmediately = false;
static constexpr double kDefaultTopKThreshold = 90.0;
static constexpr double kDefaultChangeInTopKThreshold = 10.0;
+ static constexpr ProfileDataType kDefaultProfileData = kProfilerMethod;
ProfilerOptions() :
enabled_(kDefaultEnabled),
@@ -41,7 +47,8 @@
backoff_coefficient_(kDefaultBackoffCoefficient),
start_immediately_(kDefaultStartImmediately),
top_k_threshold_(kDefaultTopKThreshold),
- top_k_change_threshold_(kDefaultChangeInTopKThreshold) {}
+ top_k_change_threshold_(kDefaultChangeInTopKThreshold),
+ profile_type_(kDefaultProfileData) {}
ProfilerOptions(bool enabled,
uint32_t period_s,
@@ -50,7 +57,8 @@
double backoff_coefficient,
bool start_immediately,
double top_k_threshold,
- double top_k_change_threshold):
+ double top_k_change_threshold,
+ ProfileDataType profile_type):
enabled_(enabled),
period_s_(period_s),
duration_s_(duration_s),
@@ -58,7 +66,8 @@
backoff_coefficient_(backoff_coefficient),
start_immediately_(start_immediately),
top_k_threshold_(top_k_threshold),
- top_k_change_threshold_(top_k_change_threshold) {}
+ top_k_change_threshold_(top_k_change_threshold),
+ profile_type_(profile_type) {}
bool IsEnabled() const {
return enabled_;
@@ -92,6 +101,10 @@
return top_k_change_threshold_;
}
+ ProfileDataType GetProfileType() const {
+ return profile_type_;
+ }
+
private:
friend std::ostream & operator<<(std::ostream &os, const ProfilerOptions& po) {
os << "enabled=" << po.enabled_
@@ -101,7 +114,8 @@
<< ", backoff_coefficient=" << po.backoff_coefficient_
<< ", start_immediately=" << po.start_immediately_
<< ", top_k_threshold=" << po.top_k_threshold_
- << ", top_k_change_threshold=" << po.top_k_change_threshold_;
+ << ", top_k_change_threshold=" << po.top_k_change_threshold_
+ << ", profile_type=" << po.profile_type_;
return os;
}
@@ -123,6 +137,8 @@
double top_k_threshold_;
// How much the top K% samples needs to change in order for the app to be recompiled.
double top_k_change_threshold_;
+ // The type of profile data dumped to the disk.
+ ProfileDataType profile_type_;
};
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 717381c..8aa7ea1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -421,6 +421,9 @@
int fd = open(profile_output_filename_.c_str(), O_RDWR|O_CREAT|O_EXCL, 0660);
if (fd >= 0) {
close(fd);
+ } else if (errno != EEXIST) {
+ LOG(INFO) << "Failed to access the profile file. Profiler disabled.";
+ return true;
}
StartProfiler(profile_output_filename_.c_str());
}