Merge "Rename openDexFileNative to openDexFile." into lmp-dev
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 3a19c40..dd04837 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -237,7 +237,7 @@
.PHONY: $$(gtest_rule)
$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) $$< && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 61a2cde..10936a4 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -48,11 +48,6 @@
$(eval $(call create-core-oat-host-rules,2ND_))
endif
-IMPLICIT_CHECKS_arm := null,stack
-IMPLICIT_CHECKS_arm64 := none
-IMPLICIT_CHECKS_x86 := none
-IMPLICIT_CHECKS_x86_64 := none
-IMPLICIT_CHECKS_mips := none
define create-core-oat-target-rules
$$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENCY)
@echo "target dex2oat: $$@ ($$?)"
@@ -63,7 +58,6 @@
--oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(1)TARGET_ARCH) \
--instruction-set-features=$$($(1)TARGET_INSTRUCTION_SET_FEATURES) \
- --implicit-checks=$(IMPLICIT_CHECKS_$($(1)TARGET_ARCH)) \
--android-root=$$(PRODUCT_OUT)/system --include-patch-information
# This "renaming" eases declaration in art/Android.mk
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 40bd983..18adbab 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -2090,4 +2090,28 @@
EXPECT_EQ(value_names_[3], value_names_[14]);
}
+TEST_F(GlobalValueNumberingTest, NormalPathToCatchEntry) {
+ // When there's an empty catch block, all the exception paths lead to the next block in
+ // the normal path and we can also have normal "taken" or "fall-through" branches to that
+ // path. Check that LocalValueNumbering::PruneNonAliasingRefsForCatch() can handle it.
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(4)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),
+ };
+ PrepareBasicBlocks(bbs);
+ BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
+ catch_handler->catch_entry = true;
+ BasicBlock* merge_block = cu_.mir_graph->GetBasicBlock(4u);
+ std::swap(merge_block->taken, merge_block->fall_through);
+ PrepareMIRs(mirs);
+ PerformGVN();
+}
+
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index d5fd6fe..ef893fe 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -445,6 +445,11 @@
void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
const BasicBlock* bb = gvn_->GetBasicBlock(lvn->Id());
+ if (UNLIKELY(bb->taken == id_) || UNLIKELY(bb->fall_through == id_)) {
+ // Non-exceptional path to a catch handler means that the catch block was actually
+ // empty and all exceptional paths lead to the shared path after that empty block.
+ continue;
+ }
DCHECK_EQ(bb->taken, kNullBlock);
DCHECK_NE(bb->fall_through, kNullBlock);
const BasicBlock* fall_through_bb = gvn_->GetBasicBlock(bb->fall_through);
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 01e17bf..6b96e92 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -190,7 +190,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -261,7 +261,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -362,7 +362,7 @@
Thread::kStackOverflowSignalReservedBytes;
bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
if (!large_frame) {
/* Load stack limit */
LockTemp(rs_r12);
@@ -401,7 +401,7 @@
const int spill_size = spill_count * 4;
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 8117c62..d946ee3 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -202,7 +202,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -250,7 +250,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -338,7 +338,7 @@
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
if (!large_frame) {
// Load stack limit
LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9);
@@ -371,7 +371,7 @@
}
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
class StackOverflowSlowPath: public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index e4eeeaf..07002f5 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -100,7 +100,7 @@
RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) OVERRIDE;
+ int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 2650892..9bb1001 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -285,7 +285,8 @@
LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
RegStorage base_reg, int offset, int check_value,
- LIR* target) {
+ LIR* target, LIR** compare) {
+ DCHECK(compare == nullptr);
// It is possible that temp register is 64-bit. (ArgReg or RefReg)
// Always compare 32-bit value no matter what temp_reg is.
if (temp_reg.Is64Bit()) {
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 2212380..0fc8700 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -1193,8 +1193,8 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
// In lieu of generating a check for kArg1 being null, we need to
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 60d2589..463f277 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1172,9 +1172,12 @@
}
LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) {
+ int offset, int check_value, LIR* target, LIR** compare) {
// Handle this for architectures that can't compare to memory.
- Load32Disp(base_reg, offset, temp_reg);
+ LIR* inst = Load32Disp(base_reg, offset, temp_reg);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
return branch;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 45dd7f0..0e46c96 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,7 @@
true, // kIntrinsicMinMaxFloat
true, // kIntrinsicMinMaxDouble
true, // kIntrinsicSqrt
+ false, // kIntrinsicGet
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
false, // kIntrinsicIsEmptyOrLength
@@ -74,6 +75,7 @@
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
@@ -126,6 +128,7 @@
"D", // kClassCacheDouble
"V", // kClassCacheVoid
"Ljava/lang/Object;", // kClassCacheJavaLangObject
+ "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
"Ljava/lang/String;", // kClassCacheJavaLangString
"Ljava/lang/Double;", // kClassCacheJavaLangDouble
"Ljava/lang/Float;", // kClassCacheJavaLangFloat
@@ -152,6 +155,7 @@
"max", // kNameCacheMax
"min", // kNameCacheMin
"sqrt", // kNameCacheSqrt
+ "get", // kNameCacheGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
"isEmpty", // kNameCacheIsEmpty
@@ -220,6 +224,8 @@
{ kClassCacheBoolean, 0, { } },
// kProtoCache_I
{ kClassCacheInt, 0, { } },
+ // kProtoCache_Object
+ { kClassCacheJavaLangObject, 0, { } },
// kProtoCache_Thread
{ kClassCacheJavaLangThread, 0, { } },
// kProtoCacheJ_B
@@ -308,6 +314,8 @@
INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+ INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
+
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
@@ -428,6 +436,8 @@
return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
case kIntrinsicSqrt:
return backend->GenInlinedSqrt(info);
+ case kIntrinsicGet:
+ return backend->GenInlinedGet(info);
case kIntrinsicCharAt:
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 5b3b104..cb8c165 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -107,6 +107,7 @@
kClassCacheDouble,
kClassCacheVoid,
kClassCacheJavaLangObject,
+ kClassCacheJavaLangRefReference,
kClassCacheJavaLangString,
kClassCacheJavaLangDouble,
kClassCacheJavaLangFloat,
@@ -140,6 +141,7 @@
kNameCacheMax,
kNameCacheMin,
kNameCacheSqrt,
+ kNameCacheGet,
kNameCacheCharAt,
kNameCacheCompareTo,
kNameCacheIsEmpty,
@@ -199,6 +201,7 @@
kProtoCacheString_I,
kProtoCache_Z,
kProtoCache_I,
+ kProtoCache_Object,
kProtoCache_Thread,
kProtoCacheJ_B,
kProtoCacheJ_I,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1fc0cff..cf80ee7 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -176,7 +176,7 @@
/* Perform null-check on a register. */
LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
return GenExplicitNullCheck(m_reg, opt_flags);
}
return nullptr;
@@ -191,16 +191,17 @@
}
void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
+ // Insert after last instruction.
MarkSafepointPC(last_lir_insn_);
}
}
void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -209,13 +210,13 @@
}
void Mir2Lir::MarkPossibleStackOverflowException() {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
}
}
void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -623,7 +624,7 @@
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
- mirror::Class::kStatusInitialized, NULL);
+ mirror::Class::kStatusInitialized, nullptr, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -720,7 +721,7 @@
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
- mirror::Class::kStatusInitialized, NULL);
+ mirror::Class::kStatusInitialized, nullptr, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -2198,7 +2199,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
return;
}
@@ -2218,7 +2219,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
OpUnconditionalBranch(target);
return;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 79065b3..fdb3d83 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -23,9 +23,12 @@
#include "invoke_type.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
+#include "mirror/reference-inl.h"
#include "mirror/string.h"
#include "mir_to_lir-inl.h"
+#include "scoped_thread_state_change.h"
#include "x86/codegen_x86.h"
namespace art {
@@ -954,21 +957,35 @@
type, skip_this);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
+ (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return call_state;
+ }
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
- RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
- MarkPossibleNullPointerException(info->opt_flags);
- FreeTemp(tmp);
+ GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
}
}
return call_state;
}
+// Default implementation of implicit null pointer check.
+// Overridden by arch specific as necessary.
+void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ RegStorage tmp = AllocTemp();
+ Load32Disp(reg, 0, tmp);
+ MarkPossibleNullPointerException(opt_flags);
+ FreeTemp(tmp);
+}
+
+
/*
* May have 0+ arguments (also used for jumbo). Note that
* source virtual registers may be in physical registers, so may
@@ -1183,16 +1200,17 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
+ (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return call_state;
+ }
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
- RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
- MarkPossibleNullPointerException(info->opt_flags);
- FreeTemp(tmp);
+ GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
}
}
return call_state;
@@ -1218,6 +1236,98 @@
return res;
}
+bool Mir2Lir::GenInlinedGet(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+
+ // the refrence class is stored in the image dex file which might not be the same as the cu's
+ // dex file. Query the reference class for the image dex file then reset to starting dex file
+ // in after loading class type.
+ uint16_t type_idx = 0;
+ const DexFile* ref_dex_file = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
+ ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
+ }
+ CHECK(LIKELY(ref_dex_file != nullptr));
+
+ // address is either static within the image file, or needs to be patched up after compilation.
+ bool unused_type_initialized;
+ bool use_direct_type_ptr;
+ uintptr_t direct_type_ptr;
+ bool is_finalizable;
+ const DexFile* old_dex = cu_->dex_file;
+ cu_->dex_file = ref_dex_file;
+ RegStorage reg_class = TargetReg(kArg1, kRef);
+ Clobber(reg_class);
+ LockTemp(reg_class);
+ if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
+ &use_direct_type_ptr, &direct_type_ptr,
+ &is_finalizable) || is_finalizable) {
+ cu_->dex_file = old_dex;
+ // address is not known and post-compile patch is not possible, cannot insert intrinsic.
+ return false;
+ }
+ if (use_direct_type_ptr) {
+ LoadConstant(reg_class, direct_type_ptr);
+ } else {
+ LoadClassType(type_idx, kArg1);
+ }
+ cu_->dex_file = old_dex;
+
+ // get the offset for flags in reference class.
+ uint32_t slow_path_flag_offset = 0;
+ uint32_t disable_flag_offset = 0;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
+ slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
+ disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
+ }
+ CHECK(slow_path_flag_offset && disable_flag_offset &&
+ (slow_path_flag_offset != disable_flag_offset));
+
+ // intrinsic logic start.
+ RegLocation rl_obj = info->args[0];
+ rl_obj = LoadValue(rl_obj);
+
+ RegStorage reg_slow_path = AllocTemp();
+ RegStorage reg_disabled = AllocTemp();
+ Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
+ Load32Disp(reg_class, disable_flag_offset, reg_disabled);
+ FreeTemp(reg_class);
+ LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
+ FreeTemp(reg_disabled);
+
+ // if slow path, jump to JNI path target
+ LIR* slow_path_branch;
+ if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
+ // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
+ slow_path_branch = OpCondBranch(kCondNe, nullptr);
+ } else {
+ // Generate compare and branch.
+ slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
+ }
+ FreeTemp(reg_slow_path);
+
+ // slow path not enabled, simply load the referent of the reference object
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+ GenNullCheck(rl_obj.reg, info->opt_flags);
+ LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
+ kNotVolatile);
+ MarkPossibleNullPointerException(info->opt_flags);
+ StoreValue(rl_dest, rl_result);
+
+ LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
+ AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
+
+ return true;
+}
+
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (cu_->instruction_set == kMips) {
// TODO - add Mips implementation
@@ -1268,11 +1378,14 @@
// On x86, we can compare to memory directly
// Set up a launch pad to allow retry in case of bounds violation */
if (rl_idx.is_const) {
+ LIR* comparison;
range_check_branch = OpCmpMemImmBranch(
kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
- mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
- } else {
+ mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
+ MarkPossibleNullPointerExceptionAfter(0, comparison);
+ } else {
OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
+ MarkPossibleNullPointerException(0);
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 634ab94..f1b7416 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -838,6 +838,7 @@
LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
+ virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
RegLocation rl_src2, LIR* taken, LIR* fall_through);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
@@ -982,6 +983,7 @@
*/
RegLocation InlineTargetWide(CallInfo* info);
+ bool GenInlinedGet(CallInfo* info);
bool GenInlinedCharAt(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
@@ -1147,10 +1149,12 @@
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
+ * @param target branch target (or nullptr)
+ * @param compare output for getting LIR for comparison (or nullptr)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target);
+ int offset, int check_value, LIR* target, LIR** compare);
// Required for target - codegen helpers.
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 8df5b6d..ebe3f0a 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -271,21 +271,22 @@
{ kX86Shrd64RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64RRI", "!0r,!1r,!2d" },
{ kX86Shrd64MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64MRI", "[!0r+!1d],!2r,!3d" },
- { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" },
- { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" },
- { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" },
- { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" },
- { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" },
- { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" },
- { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" },
+ { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" },
+ { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" },
+ { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" },
+ { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" },
+ { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" },
+ { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
{ kX86Test64RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64RI", "!0r,!1d" },
{ kX86Test64MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64MI", "[!0r+!1d],!2d" },
{ kX86Test64AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
+ { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
{ kX86Test64RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test64RR", "!0r,!1r" },
+ { kX86Test32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!1d]" },
#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
reg, reg_kind, reg_flags, \
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index b7441d7..40dd9cc 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -222,15 +222,28 @@
LockTemp(rs_rX86_ARG1);
LockTemp(rs_rX86_ARG2);
- /* Build frame, return address already on stack */
- stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
-
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- const bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
- !IsLargeFrame(frame_size_, cu_->target64 ? kX86_64 : kX86);
+ InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
+ const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, isa);
+
+ // If we doing an implicit stack overflow check, perform the load immediately
+ // before the stack pointer is decremented and anything is saved.
+ if (!skip_overflow_check &&
+ cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ // Implicit stack overflow check.
+ // test eax,[esp + -overflow]
+ int overflow = GetStackOverflowReservedBytes(isa);
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
+ MarkPossibleStackOverflowException();
+ }
+
+ /* Build frame, return address already on stack */
+ stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
+ GetInstructionSetPointerSize(cu_->instruction_set));
+
NewLIR0(kPseudoMethodEntry);
/* Spill core callee saves */
SpillCoreRegs();
@@ -260,25 +273,27 @@
private:
const size_t sp_displace_;
};
- // TODO: for large frames we should do something like:
- // spill ebp
- // lea ebp, [esp + frame_size]
- // cmp ebp, fs:[stack_end_]
- // jcc stack_overflow_exception
- // mov esp, ebp
- // in case a signal comes in that's not using an alternate signal stack and the large frame may
- // have moved us outside of the reserved area at the end of the stack.
- // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
- if (cu_->target64) {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
- } else {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
- }
- LIR* branch = OpCondBranch(kCondUlt, nullptr);
- AddSlowPath(
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ // TODO: for large frames we should do something like:
+ // spill ebp
+ // lea ebp, [esp + frame_size]
+ // cmp ebp, fs:[stack_end_]
+ // jcc stack_overflow_exception
+ // mov esp, ebp
+ // in case a signal comes in that's not using an alternate signal stack and the large frame
+ // may have moved us outside of the reserved area at the end of the stack.
+ // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
+ if (cu_->target64) {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ } else {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ }
+ LIR* branch = OpCondBranch(kCondUlt, nullptr);
+ AddSlowPath(
new(arena_)StackOverflowSlowPath(this, branch,
frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set)));
+ }
}
FlushIns(ArgLocs, rl_method);
@@ -318,4 +333,14 @@
NewLIR0(kX86Ret);
}
+void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Implicit null pointer check.
+ // test eax,[arg1+0]
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
+ MarkPossibleNullPointerException(opt_flags);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index f4fa1b4..1f97429 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -85,6 +85,7 @@
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+ void GenImplicitNullCheck(RegStorage reg, int opt_flags);
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
@@ -805,9 +806,11 @@
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
+ * @param target branch target (or nullptr)
+ * @param compare output for getting LIR for comparison (or nullptr)
*/
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target);
+ int offset, int check_value, LIR* target, LIR** compare);
/*
* Can this operation be using core registers without temporaries?
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2f27482..3f1df18 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1098,6 +1098,7 @@
};
OpRegMem(kOpCmp, index, array_base, len_offset);
+ MarkPossibleNullPointerException(0);
LIR* branch = OpCondBranch(kCondUge, nullptr);
AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
index, array_base, len_offset));
@@ -1140,6 +1141,7 @@
};
NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index);
+ MarkPossibleNullPointerException(0);
LIR* branch = OpCondBranch(kCondLs, nullptr);
AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
index, array_base, len_offset));
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index bb1f379..61a0474 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -888,8 +888,12 @@
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
- LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
- return nullptr;
+ // First load the pointer in fs:[suspend-trigger] into eax
+ // Then use a test instruction to indirect via that address.
+ NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), cu_->target64 ?
+ Thread::ThreadSuspendTriggerOffset<8>().Int32Value() :
+ Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
+ return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
}
uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
@@ -1254,6 +1258,7 @@
// Is the string non-NULL?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
+ // uint32_t opt_flags = info->opt_flags;
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
@@ -1280,12 +1285,20 @@
// Character is in EAX.
// Object pointer is in EDX.
+ // Compute the number of words to search in to rCX.
+ Load32Disp(rs_rDX, count_offset, rs_rCX);
+
+ // Possible signal here due to null pointer dereference.
+ // Note that the signal handler will expect the top word of
+ // the stack to be the ArtMethod*. If the PUSH edi instruction
+ // below is ahead of the load above then this will not be true
+ // and the signal handler will not work.
+ MarkPossibleNullPointerException(0);
+
// We need to preserve EDI, but have no spare registers, so push it on the stack.
// We have to remember that all stack addresses after this are offset by sizeof(EDI).
NewLIR1(kX86Push32R, rs_rDI.GetReg());
- // Compute the number of words to search in to rCX.
- Load32Disp(rs_rDX, count_offset, rs_rCX);
LIR *length_compare = nullptr;
int start_value = 0;
bool is_index_on_stack = false;
@@ -2682,7 +2695,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 045e58e..047a65d 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -684,9 +684,9 @@
} else {
DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
if (r_base == r_dest.GetLow()) {
- load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
+ load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
displacement + HIWORD_OFFSET);
- load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
+ load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
} else {
load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
@@ -712,16 +712,16 @@
if (r_dest.GetHigh() == r_index) {
// We can't use either register for the first load.
RegStorage temp = AllocTemp();
- load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
OpRegCopy(r_dest.GetHigh(), temp);
FreeTemp(temp);
} else {
- load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
}
} else {
@@ -744,6 +744,7 @@
}
}
+ // Always return first load generated as this might cause a fault if base is nullptr.
return load;
}
@@ -878,9 +879,12 @@
}
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) {
- NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset,
- check_value);
+ int offset, int check_value, LIR* target, LIR** compare) {
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
LIR* branch = OpCondBranch(cond, target);
return branch;
}
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 5657381..17f9b91 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -499,6 +499,7 @@
UnaryOpcode(kX86Test, RI, MI, AI),
kX86Test32RR,
kX86Test64RR,
+ kX86Test32RM,
UnaryOpcode(kX86Not, R, M, A),
UnaryOpcode(kX86Neg, R, M, A),
UnaryOpcode(kX86Mul, DaR, DaM, DaA),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 92b2fee..c0f91d16 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -56,9 +56,9 @@
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
include_debug_symbols_(kDefaultIncludeDebugSymbols),
- explicit_null_checks_(true),
- explicit_so_checks_(true),
- explicit_suspend_checks_(true)
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(false)
#endif
@@ -74,9 +74,9 @@
bool include_patch_information,
double top_k_profile_threshold,
bool include_debug_symbols,
- bool explicit_null_checks,
- bool explicit_so_checks,
- bool explicit_suspend_checks
+ bool implicit_null_checks,
+ bool implicit_so_checks,
+ bool implicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, bool sea_ir_mode
#endif
@@ -91,9 +91,9 @@
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
include_debug_symbols_(include_debug_symbols),
- explicit_null_checks_(explicit_null_checks),
- explicit_so_checks_(explicit_so_checks),
- explicit_suspend_checks_(explicit_suspend_checks)
+ implicit_null_checks_(implicit_null_checks),
+ implicit_so_checks_(implicit_so_checks),
+ implicit_suspend_checks_(implicit_suspend_checks)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(sea_ir_mode)
#endif
@@ -160,28 +160,28 @@
return include_debug_symbols_;
}
- bool GetExplicitNullChecks() const {
- return explicit_null_checks_;
+ bool GetImplicitNullChecks() const {
+ return implicit_null_checks_;
}
- void SetExplicitNullChecks(bool new_val) {
- explicit_null_checks_ = new_val;
+ void SetImplicitNullChecks(bool new_val) {
+ implicit_null_checks_ = new_val;
}
- bool GetExplicitStackOverflowChecks() const {
- return explicit_so_checks_;
+ bool GetImplicitStackOverflowChecks() const {
+ return implicit_so_checks_;
}
- void SetExplicitStackOverflowChecks(bool new_val) {
- explicit_so_checks_ = new_val;
+ void SetImplicitStackOverflowChecks(bool new_val) {
+ implicit_so_checks_ = new_val;
}
- bool GetExplicitSuspendChecks() const {
- return explicit_suspend_checks_;
+ bool GetImplicitSuspendChecks() const {
+ return implicit_suspend_checks_;
}
- void SetExplicitSuspendChecks(bool new_val) {
- explicit_suspend_checks_ = new_val;
+ void SetImplicitSuspendChecks(bool new_val) {
+ implicit_suspend_checks_ = new_val;
}
#ifdef ART_SEA_IR_MODE
@@ -208,9 +208,9 @@
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool include_debug_symbols_;
- bool explicit_null_checks_;
- bool explicit_so_checks_;
- bool explicit_suspend_checks_;
+ bool implicit_null_checks_;
+ bool implicit_so_checks_;
+ bool implicit_suspend_checks_;
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode_;
#endif
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 6b26980..d7742e7 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -25,7 +25,7 @@
#include "compiler/image_writer.h"
#include "compiler/oat_writer.h"
#include "gc/space/image_space.h"
-#include "implicit_check_options.h"
+#include "image_writer.h"
#include "lock_word.h"
#include "mirror/object-inl.h"
#include "signal_catcher.h"
@@ -79,8 +79,6 @@
t.NewTiming("WriteElf");
ScopedObjectAccess soa(Thread::Current());
SafeMap<std::string, std::string> key_value_store;
- key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(true, true, true));
OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, compiler_driver_.get(), &timings,
&key_value_store);
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
@@ -142,9 +140,6 @@
std::string image("-Ximage:");
image.append(image_location.GetFilename());
options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
- // Turn off implicit checks for this runtime, as we compiled the image with them off.
- std::string explicit_checks("-implicit-checks:none");
- options.push_back(std::make_pair(explicit_checks.c_str(), reinterpret_cast<void*>(NULL)));
if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d2ee0ed..8b91ab2 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -18,7 +18,6 @@
#include "compiler/compiler.h"
#include "compiler/oat_writer.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "implicit_check_options.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -114,8 +113,6 @@
ScratchFile tmp;
SafeMap<std::string, std::string> key_value_store;
key_value_store.Put(OatHeader::kImageLocationKey, "lue.art");
- key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(true, true, true));
OatWriter oat_writer(class_linker->GetBootClassPath(),
42U,
4096U,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0d40c8d..f1b5d6b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -48,7 +48,6 @@
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "image_writer.h"
-#include "implicit_check_options.h"
#include "leb128.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -742,20 +741,6 @@
*parsed_value = value;
}
-void CheckExplicitCheckOptions(InstructionSet isa, bool* explicit_null_checks,
- bool* explicit_so_checks, bool* explicit_suspend_checks) {
- switch (isa) {
- case kArm:
- case kThumb2:
- break; // All checks implemented, leave as is.
-
- default: // No checks implemented, reset all to explicit checks.
- *explicit_null_checks = true;
- *explicit_so_checks = true;
- *explicit_suspend_checks = true;
- }
-}
-
static int dex2oat(int argc, char** argv) {
#if defined(__linux__) && defined(__arm__)
int major, minor;
@@ -839,10 +824,10 @@
bool watch_dog_enabled = !kIsTargetBuild;
bool generate_gdb_information = kIsDebugBuild;
- bool explicit_null_checks = true;
- bool explicit_so_checks = true;
- bool explicit_suspend_checks = true;
- bool has_explicit_checks_options = false;
+ // Checks are all explicit until we know the architecture.
+ bool implicit_null_checks = false;
+ bool implicit_so_checks = false;
+ bool implicit_suspend_checks = false;
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
@@ -1019,31 +1004,6 @@
} else if (option.starts_with("--dump-cfg-passes=")) {
std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
PassDriverMEOpts::SetDumpPassList(dump_passes);
- } else if (option.starts_with("--implicit-checks=")) {
- std::string checks = option.substr(strlen("--implicit-checks=")).data();
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_null_checks = true;
- explicit_so_checks = true;
- explicit_suspend_checks = true;
- } else if (val == "null") {
- explicit_null_checks = false;
- } else if (val == "suspend") {
- explicit_suspend_checks = false;
- } else if (val == "stack") {
- explicit_so_checks = false;
- } else if (val == "all") {
- explicit_null_checks = false;
- explicit_so_checks = false;
- explicit_suspend_checks = false;
- } else {
- Usage("--implicit-checks passed non-recognized value %s", val.c_str());
- }
- }
- has_explicit_checks_options = true;
} else if (option == "--include-patch-information") {
include_patch_information = true;
explicit_include_patch_information = true;
@@ -1176,14 +1136,25 @@
Usage("Unknown --compiler-filter value %s", compiler_filter_string);
}
- ImplicitCheckOptions::CheckISASupport(instruction_set, &explicit_null_checks, &explicit_so_checks,
- &explicit_suspend_checks);
-
if (!explicit_include_patch_information) {
include_patch_information =
(compiler_kind == Compiler::kQuick && CompilerOptions::kDefaultIncludePatchInformation);
}
+ // Set the compilation target's implicit checks options.
+ switch (instruction_set) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ implicit_null_checks = true;
+ implicit_so_checks = true;
+ break;
+
+ default:
+ // Defaults are correct.
+ break;
+ }
+
std::unique_ptr<CompilerOptions> compiler_options(new CompilerOptions(compiler_filter,
huge_method_threshold,
large_method_threshold,
@@ -1194,9 +1165,9 @@
include_patch_information,
top_k_profile_threshold,
include_debug_symbols,
- explicit_null_checks,
- explicit_so_checks,
- explicit_suspend_checks
+ implicit_null_checks,
+ implicit_so_checks,
+ implicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, compiler_options.sea_ir_ =
true;
@@ -1247,7 +1218,7 @@
}
std::unique_ptr<VerificationResults> verification_results(new VerificationResults(
- compiler_options.get()));
+ compiler_options.get()));
DexFileToMethodInlinerMap method_inliner_map;
CompilerCallbacksImpl callbacks(verification_results.get(), &method_inliner_map);
runtime_options.push_back(std::make_pair("compilercallbacks", &callbacks));
@@ -1270,18 +1241,6 @@
}
std::unique_ptr<Dex2Oat> dex2oat(p_dex2oat);
- // TODO: Not sure whether it's a good idea to allow anything else but the runtime option in
- // this case at all, as we'll have to throw away produced code for a mismatch.
- if (!has_explicit_checks_options) {
- if (ImplicitCheckOptions::CheckForCompiling(kRuntimeISA, instruction_set, &explicit_null_checks,
- &explicit_so_checks, &explicit_suspend_checks)) {
- compiler_options->SetExplicitNullChecks(explicit_null_checks);
- compiler_options->SetExplicitStackOverflowChecks(explicit_so_checks);
- compiler_options->SetExplicitSuspendChecks(explicit_suspend_checks);
- }
- }
-
-
// Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
// give it away now so that we don't starve GC.
Thread* self = Thread::Current();
@@ -1384,14 +1343,6 @@
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store(
new SafeMap<std::string, std::string>());
- // Insert implicit check options.
- key_value_store->Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(compiler_options->GetExplicitNullChecks(),
- compiler_options->
- GetExplicitStackOverflowChecks(),
- compiler_options->
- GetExplicitSuspendChecks()));
-
// Insert some compiler things.
std::ostringstream oss;
for (int i = 0; i < argc; ++i) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index b0c1a9c..c2a6f7b 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -97,6 +97,7 @@
mirror/class.cc \
mirror/dex_cache.cc \
mirror/object.cc \
+ mirror/reference.cc \
mirror/stack_trace_element.cc \
mirror/string.cc \
mirror/throwable.cc \
@@ -412,6 +413,7 @@
LOCAL_STATIC_LIBRARIES := libziparchive libz
else # host
LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
+ LOCAL_SHARED_LIBRARIES += libsigchain
LOCAL_LDLIBS += -ldl -lpthread
ifeq ($$(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 2a82129..e22c56e 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -46,9 +46,10 @@
return instr_size;
}
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->arm_sp);
VLOG(signals) << "sp: " << *out_sp;
@@ -114,7 +115,7 @@
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
uint16_t checkinst2 = 0x6800;
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
uint8_t* ptr1 = ptr2 - 4;
@@ -178,7 +179,7 @@
// to the overflow region below the protected region.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
@@ -205,7 +206,7 @@
}
// We know this is a stack overflow. We need to move the sp to the overflow region
- // the exists below the protected region. Determine the address of the next
+ // that exists below the protected region. Determine the address of the next
// available valid address below the protected region.
uintptr_t prevsp = sp;
sp = pregion;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 74c3023..34eede6 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 1ecd7d9..5a64a69 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 7c1980e..435f280 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -21,7 +21,21 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
+#if defined(__APPLE__)
+#define ucontext __darwin_ucontext
+#define CTX_ESP uc_mcontext->__ss.__esp
+#define CTX_EIP uc_mcontext->__ss.__eip
+#define CTX_EAX uc_mcontext->__ss.__eax
+#else
+#define CTX_ESP uc_mcontext.gregs[REG_ESP]
+#define CTX_EIP uc_mcontext.gregs[REG_EIP]
+#define CTX_EAX uc_mcontext.gregs[REG_EAX]
+#endif
//
// X86 specific fault handler functions.
@@ -29,19 +43,292 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_test_suspend();
+
+// From the x86 disassembler...
+enum SegmentPrefix {
+ kCs = 0x2e,
+ kSs = 0x36,
+ kDs = 0x3e,
+ kEs = 0x26,
+ kFs = 0x64,
+ kGs = 0x65,
+};
+
+// Get the size of an instruction in bytes.
+static uint32_t GetInstructionSize(uint8_t* pc) {
+ uint8_t* instruction_start = pc;
+ bool have_prefixes = true;
+ bool two_byte = false;
+
+ // Skip all the prefixes.
+ do {
+ switch (*pc) {
+ // Group 1 - lock and repeat prefixes:
+ case 0xF0:
+ case 0xF2:
+ case 0xF3:
+ // Group 2 - segment override prefixes:
+ case kCs:
+ case kSs:
+ case kDs:
+ case kEs:
+ case kFs:
+ case kGs:
+ // Group 3 - operand size override:
+ case 0x66:
+ // Group 4 - address size override:
+ case 0x67:
+ break;
+ default:
+ have_prefixes = false;
+ break;
+ }
+ if (have_prefixes) {
+ pc++;
+ }
+ } while (have_prefixes);
+
+#if defined(__x86_64__)
+ // Skip REX is present.
+ if (*pc >= 0x40 && *pc <= 0x4F) {
+ ++pc;
+ }
+#endif
+
+ // Check for known instructions.
+ uint32_t known_length = 0;
+ switch (*pc) {
+ case 0x83: // cmp [r + v], b: 4 byte instruction
+ known_length = 4;
+ break;
+ }
+
+ if (known_length > 0) {
+ VLOG(signals) << "known instruction with length " << known_length;
+ return known_length;
+ }
+
+ // Unknown instruction, work out length.
+
+ // Work out if we have a ModR/M byte.
+ uint8_t opcode = *pc++;
+ if (opcode == 0xf) {
+ two_byte = true;
+ opcode = *pc++;
+ }
+
+ bool has_modrm = false; // Is ModR/M byte present?
+ uint8_t hi = opcode >> 4; // Opcode high nybble.
+ uint8_t lo = opcode & 0b1111; // Opcode low nybble.
+
+ // From the Intel opcode tables.
+ if (two_byte) {
+ has_modrm = true; // TODO: all of these?
+ } else if (hi < 4) {
+ has_modrm = lo < 4 || (lo >= 8 && lo <= 0xb);
+ } else if (hi == 6) {
+ has_modrm = lo == 3 || lo == 9 || lo == 0xb;
+ } else if (hi == 8) {
+ has_modrm = lo != 0xd;
+ } else if (hi == 0xc) {
+ has_modrm = lo == 1 || lo == 2 || lo == 6 || lo == 7;
+ } else if (hi == 0xd) {
+ has_modrm = lo < 4;
+ } else if (hi == 0xf) {
+ has_modrm = lo == 6 || lo == 7;
+ }
+
+ if (has_modrm) {
+ uint8_t modrm = *pc++;
+ uint8_t mod = (modrm >> 6) & 0b11;
+ uint8_t reg = (modrm >> 3) & 0b111;
+ switch (mod) {
+ case 0:
+ break;
+ case 1:
+ if (reg == 4) {
+ // SIB + 1 byte displacement.
+ pc += 2;
+ } else {
+ pc += 1;
+ }
+ break;
+ case 2:
+ // SIB + 4 byte displacement.
+ pc += 5;
+ break;
+ case 3:
+ break;
+ }
+ }
+
+ VLOG(signals) << "calculated X86 instruction size is " << (pc - instruction_start);
+ return pc - instruction_start;
+}
+
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ *out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
+ VLOG(signals) << "sp: " << std::hex << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in EAX.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_EAX);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
+ }
+
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ VLOG(signals) << HexDump(pc, 32, true, "PC ");
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ *out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + instruction size). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uint32_t retaddr = reinterpret_cast<uint32_t>(pc + instr_size);
+ uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4);
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uint32_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
+}
+
+// A suspend check is done using the following instruction sequence:
+// 0xf720f1df: 648B058C000000 mov eax, fs:[0x8c] ; suspend_trigger
+// .. some intervening instructions.
+// 0xf720f1e6: 8500 test eax, [eax]
+
+// The offset from fs is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault.
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the mov eax, fs:[xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+
+ VLOG(signals) << "Checking for suspension point";
+ uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+ uint8_t checkinst2[] = {0x85, 0x00};
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) {
+ // Second instruction is not correct (test eax,[eax]).
+ VLOG(signals) << "Not a suspension point";
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = pc - 100; // Compiler will hoist to a max of 20 instructions.
+ uint8_t* ptr = pc - sizeof(checkinst1);
+ bool found = false;
+ while (ptr > limit) {
+ if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) {
+ found = true;
+ break;
+ }
+ ptr -= 1;
+ }
+
+ if (found) {
+ VLOG(signals) << "suspend check match";
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + 2). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uint32_t retaddr = reinterpret_cast<uint32_t>(pc + 2);
+ uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4);
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uint32_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ VLOG(signals) << "Not a suspend check match, first instruction mismatch";
return false;
}
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
+// The stack overflow check is done using the following instruction:
+// test eax, [esp+ -xxx]
+// where 'xxx' is the size of the overflow area.
+//
+// This is done before any frame is established in the method. The return
+// address for the previous method is on the stack at ESP.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+
+ Thread* self = Thread::Current();
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ uc->CTX_EAX = pregion;
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+
+ return true;
}
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 24b9e46..68f46ad 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -173,6 +173,21 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, EAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the ESP is above the protected region. We need to create a
+ // callee save frame and then move ESP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx // get current stack pointer
+ mov %eax, %esp // move ESP to the overflow region.
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
index 233d3c7..88ae7f3 100644
--- a/runtime/arch/x86_64/fault_handler_x86_64.cc
+++ b/runtime/arch/x86_64/fault_handler_x86_64.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index fe5a2ef..fae9271 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -176,6 +176,7 @@
#endif
#define PURE __attribute__ ((__pure__))
+#define WARN_UNUSED __attribute__((warn_unused_result))
template<typename T> void UNUSED(const T&) {}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7779547..f856ddc 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -41,6 +41,7 @@
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
+Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
@@ -149,7 +150,8 @@
for (int i = kLockLevelCount - 1; i >= 0; --i) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ // We expect waits to happen while holding the thread list suspend thread lock.
+ if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -841,6 +843,7 @@
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
+ DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
@@ -848,13 +851,18 @@
DCHECK(intern_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kMutatorLock;
- DCHECK(mutator_lock_ == nullptr);
- mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
+ LockLevel current_lock_level = kThreadListSuspendThreadLock;
+ DCHECK(thread_list_suspend_thread_lock_ == nullptr);
+ thread_list_suspend_thread_lock_ =
+ new Mutex("thread list suspend thread by .. lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- DCHECK_LT(new_level, current_lock_level); \
- current_lock_level = new_level;
+ DCHECK_LT(new_level, current_lock_level); \
+ current_lock_level = new_level;
+
+ UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
DCHECK(heap_bitmap_lock_ == nullptr);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 8d2cd07..7adaa1b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -93,6 +93,7 @@
kRuntimeShutdownLock,
kHeapBitmapLock,
kMutatorLock,
+ kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -474,6 +475,15 @@
public:
static void Init();
+ // There's a potential race for two threads to try to suspend each other and for both of them
+ // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
+ // requesting suspension of another at any time. As the the thread list suspend thread logic
+ // transitions to runnable, if the current thread were tried to be suspended then this thread
+ // would block holding this lock until it could safely request thread suspension of the other
+ // thread without that thread having a suspension request against this thread. This avoids a
+ // potential deadlock cycle.
+ static Mutex* thread_list_suspend_thread_lock_;
+
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
// a share on the mutator_lock_. The garbage collector may also execute with shared access but
@@ -532,7 +542,7 @@
// else | .. running ..
// Goto x | .. running ..
// .. running .. | .. running ..
- static ReaderWriterMutex* mutator_lock_;
+ static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
// Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index fefb907..b70041c 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -209,7 +209,7 @@
// obj will be NULL. Otherwise, obj should always be non-NULL
// and valid.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
return;
@@ -248,7 +248,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
return;
@@ -628,7 +628,7 @@
mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
return false;
@@ -682,7 +682,7 @@
mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
} else if (!a->IsArrayInstance()) {
@@ -703,7 +703,7 @@
}
mirror::ArtField* f = soa_.DecodeField(fid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "invalid jfieldID: %p", fid);
return nullptr;
}
@@ -717,7 +717,7 @@
}
mirror::ArtMethod* m = soa_.DecodeMethod(mid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "invalid jmethodID: %p", mid);
return nullptr;
}
@@ -738,7 +738,7 @@
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
// TODO: when we remove work_around_app_jni_bugs, this should be impossible.
JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1436810..c4855bd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -54,6 +54,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
+#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
#include "object_utils.h"
@@ -257,6 +258,13 @@
java_lang_String->SetObjectSize(mirror::String::InstanceSize());
java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
+ // Setup Reference.
+ Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
+ AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize())));
+ mirror::Reference::SetClass(java_lang_ref_Reference.Get());
+ java_lang_ref_Reference->SetObjectSize(mirror::Reference::InstanceSize());
+ java_lang_ref_Reference->SetStatus(mirror::Class::kStatusResolved, self);
+
// Create storage for root classes, save away our work so far (requires descriptors).
class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
kClassRootsMax);
@@ -267,6 +275,7 @@
SetClassRoot(kObjectArrayClass, object_array_class.Get());
SetClassRoot(kCharArrayClass, char_array_class.Get());
SetClassRoot(kJavaLangString, java_lang_String.Get());
+ SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference.Get());
// Setup the primitive type classes.
SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
@@ -461,8 +470,12 @@
SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
- mirror::Class* java_lang_ref_Reference = FindSystemClass(self, "Ljava/lang/ref/Reference;");
- SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference);
+ // finish initializing Reference class
+ java_lang_ref_Reference->SetStatus(mirror::Class::kStatusNotReady, self);
+ mirror::Class* Reference_class = FindSystemClass(self, "Ljava/lang/ref/Reference;");
+ CHECK_EQ(java_lang_ref_Reference.Get(), Reference_class);
+ CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
+ CHECK_EQ(java_lang_ref_Reference->GetClassSize(), mirror::Reference::ClassSize());
mirror::Class* java_lang_ref_FinalizerReference =
FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
java_lang_ref_FinalizerReference->SetAccessFlags(
@@ -811,9 +824,20 @@
}
} else {
// TODO: What to lock here?
+ bool obsolete_file_cleanup_failed;
open_oat_file.reset(FindOatFileContainingDexFileFromDexLocation(dex_location,
dex_location_checksum_pointer,
- kRuntimeISA, error_msgs));
+ kRuntimeISA, error_msgs,
+ &obsolete_file_cleanup_failed));
+ // There's no point in going forward and eventually try to regenerate the
+ // file if we couldn't remove the obsolete one. Mostly likely we will fail
+ // with the same error when trying to write the new file.
+ // In case the clean up failure is due to permission issues it's *mandatory*
+ // to stop to avoid regenerating under the wrong user.
+ // TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS).
+ if (obsolete_file_cleanup_failed) {
+ return false;
+ }
}
needs_registering = true;
}
@@ -1071,7 +1095,9 @@
const char* dex_location,
const uint32_t* const dex_location_checksum,
InstructionSet isa,
- std::vector<std::string>* error_msgs) {
+ std::vector<std::string>* error_msgs,
+ bool* obsolete_file_cleanup_failed) {
+ *obsolete_file_cleanup_failed = false;
// Look for an existing file next to dex. for example, for
// /foo/bar/baz.jar, look for /foo/bar/<isa>/baz.odex.
std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
@@ -1098,9 +1124,18 @@
if (oat_file != nullptr) {
return oat_file;
}
+
if (!open_failed && TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) {
- PLOG(FATAL) << "Failed to remove obsolete oat file from " << cache_location;
+ std::string error_msg = StringPrintf("Failed to remove obsolete file from %s when searching"
+ "for dex file %s: %s",
+ cache_location.c_str(), dex_location, strerror(errno));
+ error_msgs->push_back(error_msg);
+ VLOG(class_linker) << error_msg;
+ // Let the caller know that we couldn't remove the obsolete file.
+ // This is a good indication that further writes may fail as well.
+ *obsolete_file_cleanup_failed = true;
}
+
std::string compound_msg = StringPrintf("Failed to open oat file from %s (error '%s') or %s "
"(error '%s').", odex_filename.c_str(), error_msg.c_str(),
cache_location.c_str(), cache_error_msg.c_str());
@@ -1231,6 +1266,7 @@
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
+ mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
@@ -1354,6 +1390,7 @@
ClassLinker::~ClassLinker() {
mirror::Class::ResetClass();
mirror::String::ResetClass();
+ mirror::Reference::ResetClass();
mirror::ArtField::ResetClass();
mirror::ArtMethod::ResetClass();
mirror::BooleanArray::ResetArrayClass();
@@ -1600,6 +1637,8 @@
klass.Assign(GetClassRoot(kJavaLangClass));
} else if (strcmp(descriptor, "Ljava/lang/String;") == 0) {
klass.Assign(GetClassRoot(kJavaLangString));
+ } else if (strcmp(descriptor, "Ljava/lang/ref/Reference;") == 0) {
+ klass.Assign(GetClassRoot(kJavaLangRefReference));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
klass.Assign(GetClassRoot(kJavaLangDexCache));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtField;") == 0) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d9b3d25..c17f88d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -571,7 +571,8 @@
const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* location,
const uint32_t* const location_checksum,
InstructionSet isa,
- std::vector<std::string>* error_msgs)
+ std::vector<std::string>* error_msgs,
+ bool* obsolete_file_cleanup_failed)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
// Find a verify an oat file with the given dex file. Will return nullptr when the oat file
@@ -634,7 +635,7 @@
// retire a class, the version of the class in the table is returned and this may differ from
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c95be01..c7739fe 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2249,15 +2249,18 @@
}
JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
- ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
+ Thread* self = Thread::Current();
+ ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
}
if (peer.get() == NULL) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace.
+ // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
+ // trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
bool timed_out;
Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true,
&timed_out);
@@ -3143,7 +3146,7 @@
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- thread_(NULL),
+ thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
other_suspend_(false) {
@@ -3159,10 +3162,15 @@
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = gRegistry->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
- &timed_out);
+ Thread* suspended_thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
+ }
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
- if (suspended_thread == NULL) {
+ if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
error_ = JDWP::ERR_INVALID_THREAD;
} else {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 291e2d0..48dcdca 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -170,13 +170,29 @@
return true;
}
-bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) {
+bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem_size,
+ const char* label) {
+ // Check that size is not 0.
+ CHECK_NE(elem_size, 0U);
+
const byte* range_start = reinterpret_cast<const byte*>(start);
- const byte* range_end = reinterpret_cast<const byte*>(end);
const byte* file_start = reinterpret_cast<const byte*>(begin_);
+
+ // Check for overflow.
+ uintptr_t max = 0 - 1;
+ size_t available_bytes_till_end_of_mem = max - reinterpret_cast<uintptr_t>(start);
+ size_t max_count = available_bytes_till_end_of_mem / elem_size;
+ if (max_count < count) {
+ ErrorStringPrintf("Overflow in range for %s: %zx for %zu@%zu", label,
+ static_cast<size_t>(range_start - file_start),
+ count, elem_size);
+ return false;
+ }
+
+ const byte* range_end = range_start + count * elem_size;
const byte* file_end = file_start + size_;
- if (UNLIKELY((range_start < file_start) || (range_start > file_end) ||
- (range_end < file_start) || (range_end > file_end))) {
+ if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
+ // Note: these two tests are enough as we make sure above that there's no overflow.
ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
static_cast<size_t>(range_start - file_start),
static_cast<size_t>(range_end - file_start));
@@ -185,12 +201,6 @@
return true;
}
-bool DexFileVerifier::CheckListSize(const void* start, uint32_t count,
- uint32_t element_size, const char* label) {
- const byte* list_start = reinterpret_cast<const byte*>(start);
- return CheckPointerRange(list_start, list_start + (count * element_size), label);
-}
-
bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) {
if (UNLIKELY(field >= limit)) {
ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
@@ -329,7 +339,7 @@
uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
uint32_t result = 0;
- if (LIKELY(CheckPointerRange(ptr_, ptr_ + size, "encoded_value"))) {
+ if (LIKELY(CheckListSize(ptr_, size, sizeof(byte), "encoded_value"))) {
for (uint32_t i = 0; i < size; i++) {
result |= ((uint32_t) *(ptr_++)) << (i * 8);
}
@@ -447,7 +457,7 @@
bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
if (offset < aligned_offset) {
- if (!CheckPointerRange(begin_ + offset, begin_ + aligned_offset, "section")) {
+ if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(byte), "section")) {
return false;
}
while (offset < aligned_offset) {
@@ -463,7 +473,7 @@
}
bool DexFileVerifier::CheckEncodedValue() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "encoded_value header")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "encoded_value header")) {
return false;
}
@@ -656,7 +666,7 @@
bool DexFileVerifier::CheckIntraCodeItem() {
const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(ptr_);
- if (!CheckPointerRange(code_item, code_item + 1, "code")) {
+ if (!CheckListSize(code_item, 1, sizeof(DexFile::CodeItem), "code")) {
return false;
}
@@ -945,7 +955,7 @@
}
bool DexFileVerifier::CheckIntraAnnotationItem() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "annotation visibility")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "annotation visibility")) {
return false;
}
@@ -970,7 +980,7 @@
bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
const DexFile::AnnotationsDirectoryItem* item =
reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
- if (!CheckPointerRange(item, item + 1, "annotations_directory")) {
+ if (!CheckListSize(item, 1, sizeof(DexFile::AnnotationsDirectoryItem), "annotations_directory")) {
return false;
}
@@ -1064,42 +1074,42 @@
// Check depending on the section type.
switch (type) {
case DexFile::kDexTypeStringIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::StringId), "string_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::StringId), "string_ids")) {
return false;
}
ptr_ += sizeof(DexFile::StringId);
break;
}
case DexFile::kDexTypeTypeIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::TypeId), "type_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::TypeId), "type_ids")) {
return false;
}
ptr_ += sizeof(DexFile::TypeId);
break;
}
case DexFile::kDexTypeProtoIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ProtoId), "proto_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ProtoId), "proto_ids")) {
return false;
}
ptr_ += sizeof(DexFile::ProtoId);
break;
}
case DexFile::kDexTypeFieldIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::FieldId), "field_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::FieldId), "field_ids")) {
return false;
}
ptr_ += sizeof(DexFile::FieldId);
break;
}
case DexFile::kDexTypeMethodIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::MethodId), "method_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodId), "method_ids")) {
return false;
}
ptr_ += sizeof(DexFile::MethodId);
break;
}
case DexFile::kDexTypeClassDefItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ClassDef), "class_defs")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ClassDef), "class_defs")) {
return false;
}
ptr_ += sizeof(DexFile::ClassDef);
@@ -1110,7 +1120,7 @@
const DexFile::TypeItem* item = &list->GetTypeItem(0);
uint32_t count = list->Size();
- if (!CheckPointerRange(list, list + 1, "type_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::TypeList), "type_list") ||
!CheckListSize(item, count, sizeof(DexFile::TypeItem), "type_list size")) {
return false;
}
@@ -1123,7 +1133,8 @@
const DexFile::AnnotationSetRefItem* item = list->list_;
uint32_t count = list->size_;
- if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::AnnotationSetRefList),
+ "annotation_set_ref_list") ||
!CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
"annotation_set_ref_list size")) {
return false;
@@ -1137,7 +1148,7 @@
const uint32_t* item = set->entries_;
uint32_t count = set->size_;
- if (!CheckPointerRange(set, set + 1, "annotation_set_item") ||
+ if (!CheckListSize(set, 1, sizeof(DexFile::AnnotationSetItem), "annotation_set_item") ||
!CheckListSize(item, count, sizeof(uint32_t), "annotation_set_item size")) {
return false;
}
@@ -1650,6 +1661,12 @@
return false;
}
+ // Only allow non-runtime modifiers.
+ if ((item->access_flags_ & ~kAccJavaFlagsMask) != 0) {
+ ErrorStringPrintf("Invalid class flags: '%d'", item->access_flags_);
+ return false;
+ }
+
if (item->interfaces_off_ != 0 &&
!CheckOffsetToTypeMap(item->interfaces_off_, DexFile::kDexTypeTypeList)) {
return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index f845993..cae1063 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -40,8 +40,7 @@
bool Verify();
bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
- bool CheckPointerRange(const void* start, const void* end, const char* label);
- bool CheckListSize(const void* start, uint32_t count, uint32_t element_size, const char* label);
+ bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
bool CheckHeader();
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 3112bc0..de43b85 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,21 +17,10 @@
#include "fault_handler.h"
#include <sys/mman.h>
#include <sys/ucontext.h>
-#include "base/macros.h"
-#include "globals.h"
-#include "base/logging.h"
-#include "base/hex_dump.h"
-#include "thread.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "scoped_thread_state_change.h"
-#ifdef HAVE_ANDROID_OS
+#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "sigchain.h"
-#endif
+#include "thread-inl.h"
#include "verify_object-inl.h"
namespace art {
@@ -47,6 +36,7 @@
// Signal handler called on SIGSEGV.
static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ // std::cout << "handling fault in ART handler\n";
fault_manager.HandleFault(sig, info, context);
}
@@ -55,10 +45,6 @@
}
FaultManager::~FaultManager() {
-#ifdef HAVE_ANDROID_OS
- UnclaimSignalChain(SIGSEGV);
-#endif
- sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
}
@@ -72,11 +58,12 @@
#endif
// Set our signal handler now.
- sigaction(SIGSEGV, &action, &oldaction_);
-#ifdef HAVE_ANDROID_OS
+ int e = sigaction(SIGSEGV, &action, &oldaction_);
+ if (e != 0) {
+ VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
+ }
// Make sure our signal handler is called before any user handlers.
ClaimSignalChain(SIGSEGV, &oldaction_);
-#endif
}
void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -84,8 +71,12 @@
//
// If malloc calls abort, it will be holding its lock.
// If the handler tries to call malloc, it will deadlock.
+
+ // Also, there is only an 8K stack available here to logging can cause memory
+ // overwrite issues if you are unlucky. If you want to enable logging and
+ // are getting crashes, allocate more space for the alternate signal stack.
VLOG(signals) << "Handling fault";
- if (IsInGeneratedCode(context, true)) {
+ if (IsInGeneratedCode(info, context, true)) {
VLOG(signals) << "in generated code, looking for handler";
for (const auto& handler : generated_code_handlers_) {
VLOG(signals) << "invoking Action on handler " << handler;
@@ -101,11 +92,8 @@
}
art_sigsegv_fault();
-#ifdef HAVE_ANDROID_OS
+ // Pass this on to the next handler in the chain, or the default if none.
InvokeUserSignalHandler(sig, info, context);
-#else
- oldaction_.sa_sigaction(sig, info, context);
-#endif
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -132,7 +120,7 @@
// This function is called within the signal handler. It checks that
// the mutator_lock is held (shared). No annotalysis is done.
-bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) {
+bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool check_dex_pc) {
// We can only be running Java code in the current thread if it
// is in Runnable state.
VLOG(signals) << "Checking for generated code";
@@ -161,7 +149,7 @@
// Get the architecture specific method address and return address. These
// are in architecture specific files in arch/<arch>/fault_handler_<arch>.
- GetMethodAndReturnPCAndSP(context, &method_obj, &return_pc, &sp);
+ GetMethodAndReturnPCAndSP(siginfo, context, &method_obj, &return_pc, &sp);
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
@@ -242,12 +230,12 @@
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- if (manager_->IsInGeneratedCode(context, false)) {
+ if (manager_->IsInGeneratedCode(siginfo, context, false)) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
mirror::ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
- manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
+ manager_->GetMethodAndReturnPCAndSP(siginfo, context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
StackReference<mirror::ArtMethod>* frame =
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 026f5b9..71c9977 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -43,9 +43,10 @@
void HandleFault(int sig, siginfo_t* info, void* context);
void AddHandler(FaultHandler* handler, bool generated_code);
void RemoveHandler(FaultHandler* handler);
- void GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+ void GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp);
- bool IsInGeneratedCode(void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS;
+ bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
+ NO_THREAD_SAFETY_ANALYSIS;
private:
std::vector<FaultHandler*> generated_code_handlers_;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 228d1dc..2686af0 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -185,7 +185,7 @@
<< from_space->GetGcRetentionPolicy();
LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
<< to_space->GetGcRetentionPolicy();
- heap->DumpSpaces();
+ heap->DumpSpaces(LOG(INFO));
LOG(FATAL) << "FATAL ERROR";
}
}
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 974952d..104ed36 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -32,10 +32,7 @@
template<typename MarkVisitor, typename ReferenceVisitor>
inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor) {
- if (kIsDebugBuild && !IsMarked(obj)) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Scanning unmarked object " << obj;
- }
+ DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
obj->VisitReferences<false>(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7e97b3b..95530be 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -313,10 +313,8 @@
}
}
}
- if (current_space_bitmap_ == nullptr) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
- }
+ CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
+ << heap_->DumpSpaces();
}
void MarkSweep::ExpandMarkStack() {
@@ -943,12 +941,9 @@
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
+ CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
+ allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
}
}
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 47682cc..922a71c 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -64,34 +64,25 @@
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (!immune_region_.ContainsObject(obj)) {
- if (from_space_->HasAddress(obj)) {
- mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
- // If the object has already been moved, return the new forward address.
- if (UNLIKELY(forward_address == nullptr)) {
- forward_address = MarkNonForwardedObject(obj);
- DCHECK(forward_address != nullptr);
- // Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't Get stomped over.
- obj->SetLockWord(
- LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
- // Push the object onto the mark stack for later processing.
- MarkStackPush(forward_address);
- }
- obj_ptr->Assign(forward_address);
- } else {
- BitmapSetSlowPathVisitor visitor(this);
- if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(!generational_ || whole_heap_collection_);
- }
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = MarkNonForwardedObject(obj);
+ DCHECK(forward_address != nullptr);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't Get stomped over.
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
+ // Push the object onto the mark stack for later processing.
+ MarkStackPush(forward_address);
+ }
+ obj_ptr->Assign(forward_address);
+ } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) {
+ BitmapSetSlowPathVisitor visitor(this);
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cabfe21..c7c567f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,23 +63,23 @@
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetLiveBitmap() != nullptr) {
- if (space == to_space_) {
- CHECK(to_space_->IsContinuousMemMapAllocSpace());
- to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
- || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
- // Add the main free list space and the non-moving
- // space to the immune space if a bump pointer space
- // only collection.
- || (generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
- CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ } else if (space->GetLiveBitmap() != nullptr) {
+ if (space == to_space_ || collect_from_space_only_) {
+ if (collect_from_space_only_) {
+ // Bind the main free list space and the non-moving space to the immune space if a bump
+ // pointer space only collection.
+ CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() ||
+ space == GetHeap()->GetNonMovingSpace());
+ }
+ CHECK(space->IsContinuousMemMapAllocSpace());
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
}
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
@@ -95,7 +95,7 @@
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
large_object_bytes_allocated_at_last_whole_heap_collection_(0),
- whole_heap_collection_(true),
+ collect_from_space_only_(generational),
collector_name_(name_),
swap_semi_spaces_(true) {
}
@@ -147,6 +147,10 @@
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
+ if (generational_) {
+ promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
+ }
+ fallback_space_ = GetHeap()->GetNonMovingSpace();
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -180,9 +184,9 @@
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
- if (whole_heap_collection_) {
+ if (!collect_from_space_only_) {
VLOG(heap) << "Whole heap collection";
name_ = collector_name_ + " whole";
} else {
@@ -191,7 +195,7 @@
}
}
- if (!generational_ || whole_heap_collection_) {
+ if (!collect_from_space_only_) {
// If non-generational, always clear soft references.
// If generational, clear soft references if a whole heap collection.
GetCurrentIteration()->SetClearSoftReferences(true);
@@ -227,8 +231,6 @@
{
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
MarkRoots();
- // Mark roots of immune spaces.
- UpdateAndMarkModUnion();
// Recursively mark remaining objects.
MarkReachableObjects();
}
@@ -259,46 +261,6 @@
}
}
-void SemiSpace::UpdateAndMarkModUnion() {
- for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune then we need to mark the references to other spaces.
- if (immune_region_.ContainsSpace(space)) {
- accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
- if (table != nullptr) {
- // TODO: Improve naming.
- TimingLogger::ScopedTiming t(
- space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
- } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
- DCHECK(kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. The non-moving space
- // doesn't have a mod union table, but has a remembered
- // set. Its dirty cards will be scanned later in
- // MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- } else {
- DCHECK(!kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. But the non-moving
- // space doesn't have a mod union table. Instead, its live
- // bitmap will be scanned later in MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- }
- }
- }
-}
-
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
@@ -355,20 +317,30 @@
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
}
- t.NewTiming("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune and has no mod union table (the
- // non-moving space when the bump pointer space only collection is
- // enabled,) then we need to scan its live bitmap or dirty cards as roots
- // (including the objects on the live stack which have just marked
- // in the live bitmap above in MarkAllocStackAsLive().)
- if (immune_region_.ContainsSpace(space) &&
- heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
- accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
- if (kUseRememberedSet) {
- DCHECK(rem_set != nullptr);
+ // If the space is immune then we need to mark the references to other spaces.
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedTiming t2(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ GetTimings());
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
+ } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
+ // If the space has no mod union table (the non-moving space and main spaces when the bump
+ // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
+ // cards as roots (including the objects on the live stack which have just marked in the live
+ // bitmap above in MarkAllocStackAsLive().)
+ DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
+ accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
+ CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
+ if (rem_set != nullptr) {
+ TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
from_space_, this);
if (kIsDebugBuild) {
@@ -383,7 +355,7 @@
visitor);
}
} else {
- DCHECK(rem_set == nullptr);
+ TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
@@ -393,9 +365,10 @@
}
}
+ CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
if (is_large_object_space_immune_) {
TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
- DCHECK(generational_ && !whole_heap_collection_);
+ DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
// be newly added to the live set above in MarkAllocStackAsLive().
@@ -506,19 +479,20 @@
}
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
- size_t object_size = obj->SizeOf();
+ const size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ // No logic for marking the bitmap, so it must be null.
+ DCHECK(to_space_->GetLiveBitmap() == nullptr);
} else {
bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
@@ -526,12 +500,12 @@
// space.
GetHeap()->WriteBarrierEveryFieldOf(forward_address);
// Handle the bitmaps marking.
- accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (!whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
@@ -559,12 +533,23 @@
mark_bitmap->Set(forward_address);
}
}
- DCHECK(forward_address != nullptr);
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+ to_space_live_bitmap_->Set(forward_address);
+ }
}
- CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
+ // If it's still null, attempt to use the fallback space.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
+ CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
+ accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
+ if (bitmap != nullptr) {
+ bitmap->Set(forward_address);
+ }
+ }
++objects_moved_;
bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
@@ -579,11 +564,10 @@
}
forward_address->AssertReadBarrierPointer();
}
- if (to_space_live_bitmap_ != nullptr) {
- to_space_live_bitmap_->Set(forward_address);
- }
DCHECK(to_space_->HasAddress(forward_address) ||
- (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ fallback_space_->HasAddress(forward_address) ||
+ (generational_ && promo_dest_space_->HasAddress(forward_address)))
+ << forward_address << "\n" << GetHeap()->DumpSpaces();
return forward_address;
}
@@ -648,7 +632,7 @@
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
- return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
+ return space != from_space_ && space != to_space_;
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -714,22 +698,20 @@
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- space::MallocSpace* promo_dest_space = nullptr;
accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
- promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- live_bitmap = promo_dest_space->GetLiveBitmap();
+ live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
+ if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -742,16 +724,12 @@
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
- if (immune_region_.ContainsObject(obj)) {
- return obj;
- }
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or nullptr.
return GetForwardingAddressInFromSpace(obj);
- } else if (to_space_->HasAddress(obj)) {
- // Should be unlikely.
- // Already forwarded, must be marked.
- return obj;
+ } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
+ to_space_->HasAddress(obj)) {
+ return obj; // Already forwarded, must be marked.
}
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
@@ -777,9 +755,9 @@
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
- // whole_heap_collection.
- if (!whole_heap_collection_) {
- // Enable whole_heap_collection if the bytes promoted since the
+ // collect_from_space_only_.
+ if (collect_from_space_only_) {
+ // Disable collect_from_space_only_ if the bytes promoted since the
// last whole heap collection or the large object bytes
// allocated exceeds a threshold.
bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
@@ -792,14 +770,14 @@
current_los_bytes_allocated >=
last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
} else {
// Reset the counters.
bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
large_object_bytes_allocated_at_last_whole_heap_collection_ =
GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
- whole_heap_collection_ = false;
+ collect_from_space_only_ = true;
}
}
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index bff0847..c5d25f3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -243,9 +243,14 @@
// large objects were allocated at the last whole heap collection.
uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
- // Used for the generational mode. When true, collect the whole
- // heap. When false, collect only the bump pointer spaces.
- bool whole_heap_collection_;
+ // Used for generational mode. When true, we only collect the from_space_.
+ bool collect_from_space_only_;
+
+ // The space which we are promoting into, only used for GSS.
+ space::ContinuousMemMapAllocSpace* promo_dest_space_;
+
+ // The space which we copy to if the to_space_ is full.
+ space::ContinuousMemMapAllocSpace* fallback_space_;
// How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4ec9bc2..696df32 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1126,7 +1126,13 @@
return false;
}
-void Heap::DumpSpaces(std::ostream& stream) {
+std::string Heap::DumpSpaces() const {
+ std::ostringstream oss;
+ DumpSpaces(oss);
+ return oss.str();
+}
+
+void Heap::DumpSpaces(std::ostream& stream) const {
for (const auto& space : continuous_spaces_) {
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1159,10 +1165,7 @@
if (verify_object_mode_ > kVerifyObjectModeFast) {
// Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
- if (!IsLiveObjectLocked(obj)) {
- DumpSpaces();
- LOG(FATAL) << "Object is dead: " << obj;
- }
+ CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
}
}
@@ -2354,7 +2357,7 @@
accounting::RememberedSet* remembered_set = table_pair.second;
remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
}
- DumpSpaces();
+ DumpSpaces(LOG(ERROR));
}
return visitor.GetFailureCount();
}
@@ -2471,12 +2474,7 @@
visitor(*it);
}
}
-
- if (visitor.Failed()) {
- DumpSpaces();
- return false;
- }
- return true;
+ return !visitor.Failed();
}
void Heap::SwapStacks(Thread* self) {
@@ -2574,9 +2572,8 @@
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
- if (!VerifyMissingCardMarks()) {
- LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
- }
+ CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
+ << " missing card mark verification failed\n" << DumpSpaces();
SwapStacks(self);
}
if (verify_mod_union_table_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b207953..0da113f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -539,7 +539,8 @@
}
}
- void DumpSpaces(std::ostream& stream = LOG(INFO));
+ std::string DumpSpaces() const WARN_UNUSED;
+ void DumpSpaces(std::ostream& stream) const;
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/reference_processor-inl.h b/runtime/gc/reference_processor-inl.h
new file mode 100644
index 0000000..f619a15
--- /dev/null
+++ b/runtime/gc/reference_processor-inl.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
+#define ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
+
+#include "reference_processor.h"
+
+namespace art {
+namespace gc {
+
+inline bool ReferenceProcessor::SlowPathEnabled() {
+ return mirror::Reference::GetJavaLangRefReference()->GetSlowPathEnabled();
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_REFERENCE_PROCESSOR_INL_H_
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index e52bc1f..d3641d1 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,7 +17,9 @@
#include "reference_processor.h"
#include "mirror/object-inl.h"
+#include "mirror/reference.h"
#include "mirror/reference-inl.h"
+#include "reference_processor-inl.h"
#include "reflection.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -27,18 +29,17 @@
namespace gc {
ReferenceProcessor::ReferenceProcessor()
- : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
+ : process_references_args_(nullptr, nullptr, nullptr),
preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
condition_("reference processor condition", lock_) {
}
void ReferenceProcessor::EnableSlowPath() {
- Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- slow_path_enabled_ = true;
+ mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
}
void ReferenceProcessor::DisableSlowPath(Thread* self) {
- slow_path_enabled_ = false;
+ mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
condition_.Broadcast(self);
}
@@ -46,11 +47,11 @@
mirror::Object* const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
- if (LIKELY(!slow_path_enabled_) || referent == nullptr) {
+ if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
return referent;
}
MutexLock mu(self, lock_);
- while (slow_path_enabled_) {
+ while (SlowPathEnabled()) {
mirror::HeapReference<mirror::Object>* const referent_addr =
reference->GetReferentReferenceAddr();
// If the referent became cleared, return it. Don't need barrier since thread roots can't get
@@ -117,7 +118,7 @@
process_references_args_.is_marked_callback_ = is_marked_callback;
process_references_args_.mark_callback_ = mark_object_callback;
process_references_args_.arg_ = arg;
- CHECK_EQ(slow_path_enabled_, concurrent) << "Slow path must be enabled iff concurrent";
+ CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
}
// Unless required to clear soft references with white references, preserve some white referents.
if (!clear_soft_references) {
@@ -182,7 +183,7 @@
void* arg) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
- DCHECK(klass->IsReferenceClass());
+ DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
Thread* self = Thread::Current();
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 2771ea8..7274457 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -49,6 +49,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
LOCKS_EXCLUDED(lock_);
+ // The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -60,7 +61,7 @@
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
private:
class ProcessReferencesArgs {
@@ -75,8 +76,10 @@
MarkObjectCallback* mark_callback_;
void* arg_;
};
+ bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
- void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
@@ -84,8 +87,6 @@
void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
// Process args, used by the GetReferent to return referents which are already marked.
ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
- // Boolean for whether or not we need to go slow path in GetReferent.
- volatile bool slow_path_enabled_;
// Boolean for whether or not we are preserving references (either soft references or finalizers).
// If this is true, then we cannot return a referent (see comment in GetReferent).
bool preserving_references_ GUARDED_BY(lock_);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index fff4df1..71c8eb5 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -407,11 +407,11 @@
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
return mark_bitmap_.get();
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 33339f8..243426c 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -634,8 +634,12 @@
// U1: NUL-terminated magic string.
fwrite(magic, 1, sizeof(magic), header_fp_);
- // U4: size of identifiers. We're using addresses as IDs, so make sure a pointer fits.
- U4_TO_BUF_BE(buf, 0, sizeof(void*));
+ // U4: size of identifiers. We're using addresses as IDs and our heap references are stored
+ // as uint32_t.
+ // Note of warning: hprof-conv hard-codes the size of identifiers to 4.
+ COMPILE_ASSERT(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
+ UnexpectedHeapReferenceSize);
+ U4_TO_BUF_BE(buf, 0, sizeof(uint32_t));
fwrite(buf, 1, sizeof(uint32_t), header_fp_);
// The current time, in milliseconds since 0:00 GMT, 1/1/70.
diff --git a/runtime/implicit_check_options.h b/runtime/implicit_check_options.h
deleted file mode 100644
index a6595b8..0000000
--- a/runtime/implicit_check_options.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-#define ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "instruction_set.h"
-#include "runtime.h"
-
-#include <string>
-
-namespace art {
-
-class ImplicitCheckOptions {
- public:
- static constexpr const char* kImplicitChecksOatHeaderKey = "implicit-checks";
-
- static std::string Serialize(bool explicit_null_checks, bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks) {
- char tmp[4];
- tmp[0] = explicit_null_checks ? 'N' : 'n';
- tmp[1] = explicit_stack_overflow_checks ? 'O' : 'o';
- tmp[2] = explicit_suspend_checks ? 'S' : 's';
- tmp[3] = 0;
- return std::string(tmp);
- }
-
- static bool Parse(const char* str, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- if (str != nullptr && str[0] != 0 && str[1] != 0 && str[2] != 0 &&
- (str[0] == 'n' || str[0] == 'N') &&
- (str[1] == 'o' || str[1] == 'O') &&
- (str[2] == 's' || str[2] == 'S')) {
- *explicit_null_checks = str[0] == 'N';
- *explicit_stack_overflow_checks = str[1] == 'O';
- *explicit_suspend_checks = str[2] == 'S';
- return true;
- } else {
- return false;
- }
- }
-
- // Check whether the given flags are correct with respect to the current runtime and the given
- // executable flag.
- static bool CheckRuntimeSupport(bool executable, bool explicit_null_checks,
- bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks, std::string* error_msg) {
- if (!executable) {
- // Not meant to be run, i.e., either we are compiling or dumping. Just accept.
- return true;
- }
-
- Runtime* runtime = Runtime::Current();
- // We really should have a runtime.
- DCHECK_NE(static_cast<Runtime*>(nullptr), runtime);
-
- if (runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- // We are an interpret-only environment. Ignore the check value.
- return true;
- }
-
- if (runtime->ExplicitNullChecks() != explicit_null_checks ||
- runtime->ExplicitStackOverflowChecks() != explicit_stack_overflow_checks ||
- runtime->ExplicitSuspendChecks() != explicit_suspend_checks) {
- if (error_msg != nullptr) {
- // Create an error message.
-
- std::ostringstream os;
- os << "Explicit check options do not match runtime: ";
- os << runtime->ExplicitNullChecks() << " vs " << explicit_null_checks << " | ";
- os << runtime->ExplicitStackOverflowChecks() << " vs " << explicit_stack_overflow_checks
- << " | ";
- os << runtime->ExplicitSuspendChecks() << " vs " << explicit_suspend_checks;
-
- *error_msg = os.str();
- }
-
- // Currently we do not create correct images when pre-opting, so the emulator will fail with
- // this change. Once the change is in the tree, REMOVE.
- if (true) {
- // At least try to log it, though.
- if (error_msg != nullptr) {
- LOG(WARNING) << *error_msg;
- }
- return true;
- } else {
- return false;
- }
- }
-
- // Accepted.
- return true;
- }
-
- // Check (and override) the flags depending on current support in the ISA.
- // Right now will reset all flags to explicit except on ARM.
- static void CheckISASupport(InstructionSet isa, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- switch (isa) {
- case kArm:
- case kThumb2:
- break; // All checks implemented, leave as is.
-
- default: // No checks implemented, reset all to explicit checks.
- *explicit_null_checks = true;
- *explicit_stack_overflow_checks = true;
- *explicit_suspend_checks = true;
- }
- }
-
- static bool CheckForCompiling(InstructionSet host, InstructionSet target,
- bool* explicit_null_checks, bool* explicit_stack_overflow_checks,
- bool* explicit_suspend_checks) {
- // Check the boot image settings.
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- gc::space::ImageSpace* ispace = runtime->GetHeap()->GetImageSpace();
- if (ispace != nullptr) {
- const OatFile* oat_file = ispace->GetOatFile();
- if (oat_file != nullptr) {
- const char* v = oat_file->GetOatHeader().GetStoreValueByKey(kImplicitChecksOatHeaderKey);
- if (!Parse(v, explicit_null_checks, explicit_stack_overflow_checks,
- explicit_suspend_checks)) {
- LOG(FATAL) << "Should have been able to parse boot image implicit check values";
- }
- return true;
- }
- }
- }
-
- // Check the current runtime.
- bool cross_compiling = true;
- switch (host) {
- case kArm:
- case kThumb2:
- cross_compiling = target != kArm && target != kThumb2;
- break;
- default:
- cross_compiling = host != target;
- break;
- }
- if (!cross_compiling) {
- Runtime* runtime = Runtime::Current();
- *explicit_null_checks = runtime->ExplicitNullChecks();
- *explicit_stack_overflow_checks = runtime->ExplicitStackOverflowChecks();
- *explicit_suspend_checks = runtime->ExplicitSuspendChecks();
- return true;
- }
-
- // Give up.
- return false;
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5a03601..b552909 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -35,7 +35,6 @@
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -46,6 +45,7 @@
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
@@ -211,7 +211,6 @@
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -223,6 +222,7 @@
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
// Report this field access to instrumentation if needed. Since we only have the offset of
// the field from the base of the object, we need to look for it first.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 349d4a3..329a984 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -30,6 +30,7 @@
#include "iftable.h"
#include "object_array-inl.h"
#include "read_barrier-inl.h"
+#include "reference-inl.h"
#include "runtime.h"
#include "string.h"
@@ -591,6 +592,11 @@
return this == ArtMethod::GetJavaLangReflectArtMethod<kReadBarrierOption>();
}
+template<ReadBarrierOption kReadBarrierOption>
+inline bool Class::IsReferenceClass() const {
+ return this == Reference::GetJavaLangRefReference<kReadBarrierOption>();
+}
+
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Class::IsClassClass() {
Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
@@ -642,6 +648,30 @@
return GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset);
}
+inline MemberOffset Class::GetDisableIntrinsicFlagOffset() {
+ CHECK(IsReferenceClass());
+ // First static field
+ DCHECK(GetSFields()->Get(0)->IsArtField());
+ DCHECK_STREQ(GetSFields()->Get(0)->GetName(), "disableIntrinsic");
+ return GetSFields()->Get(0)->GetOffset();
+}
+
+inline MemberOffset Class::GetSlowPathFlagOffset() {
+ CHECK(IsReferenceClass());
+ // Second static field
+ DCHECK(GetSFields()->Get(1)->IsArtField());
+ DCHECK_STREQ(GetSFields()->Get(1)->GetName(), "slowPathEnabled");
+ return GetSFields()->Get(1)->GetOffset();
+}
+
+inline bool Class::GetSlowPathEnabled() {
+ return GetField32(GetSlowPathFlagOffset());
+}
+
+inline void Class::SetSlowPath(bool enabled) {
+ SetField32<false>(GetSlowPathFlagOffset(), enabled);
+}
+
inline void Class::InitializeClassVisitor::operator()(
mirror::Object* obj, size_t usable_size) const {
DCHECK_LE(class_size_, usable_size);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 0f42044..648bdde 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -261,7 +261,7 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
}
@@ -419,6 +419,9 @@
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsArtMethodClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
}
@@ -976,6 +979,12 @@
// For proxy class only.
ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // For reference class only.
+ MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
class InitializeClassVisitor {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 3d45683..9dbfb56 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -249,7 +249,7 @@
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsReferenceInstance() {
- return GetClass<kVerifyFlags>()->IsReferenceClass();
+ return GetClass<kVerifyFlags>()->IsTypeOfReferenceClass();
}
template<VerifyObjectFlags kVerifyFlags>
@@ -806,7 +806,7 @@
} else {
DCHECK(!klass->IsVariableSize());
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
- if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
+ if (UNLIKELY(klass->IsTypeOfReferenceClass<kVerifyNone>())) {
ref_visitor(klass, AsReference());
}
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 5f88d54f..bc5cbcb 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -57,7 +57,7 @@
ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
- DCHECK(klass->IsReferenceClass());
+ DCHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 43767c8..b353402 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -22,6 +22,11 @@
namespace art {
namespace mirror {
+inline uint32_t Reference::ClassSize() {
+ uint32_t vtable_entries = Object::kVTableLength + 5;
+ return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0);
+}
+
inline bool Reference::IsEnqueuable() {
// Not using volatile reads as an optimization since this is only called with all the mutators
// suspended.
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
new file mode 100644
index 0000000..077cd4b
--- /dev/null
+++ b/runtime/mirror/reference.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference.h"
+
+namespace art {
+namespace mirror {
+
+Class* Reference::java_lang_ref_Reference_ = nullptr;
+
+void Reference::SetClass(Class* java_lang_ref_Reference) {
+ CHECK(java_lang_ref_Reference_ == nullptr);
+ CHECK(java_lang_ref_Reference != nullptr);
+ java_lang_ref_Reference_ = java_lang_ref_Reference;
+}
+
+void Reference::ResetClass() {
+ CHECK(java_lang_ref_Reference_ != nullptr);
+ java_lang_ref_Reference_ = nullptr;
+}
+
+void Reference::VisitRoots(RootCallback* callback, void* arg) {
+ if (java_lang_ref_Reference_ != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&java_lang_ref_Reference_),
+ arg, 0, kRootStickyClass);
+ }
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 9c9d87b..07d47d3 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -17,7 +17,11 @@
#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
#define ART_RUNTIME_MIRROR_REFERENCE_H_
+#include "class.h"
#include "object.h"
+#include "object_callbacks.h"
+#include "read_barrier.h"
+#include "thread.h"
namespace art {
@@ -36,6 +40,14 @@
// C++ mirror of java.lang.ref.Reference
class MANAGED Reference : public Object {
public:
+ // Size of java.lang.ref.Reference.class.
+ static uint32_t ClassSize();
+
+ // Size of an instance of java.lang.ref.Reference.
+ static constexpr uint32_t InstanceSize() {
+ return sizeof(Reference);
+ }
+
static MemberOffset PendingNextOffset() {
return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
}
@@ -80,6 +92,16 @@
bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(java_lang_ref_Reference_ != nullptr);
+ return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
+ &java_lang_ref_Reference_);
+ }
+ static void SetClass(Class* klass);
+ static void ResetClass(void);
+ static void VisitRoots(RootCallback* callback, void* arg);
+
private:
// Note: This avoids a read barrier, it should only be used by the GC.
HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -92,6 +114,8 @@
HeapReference<Reference> queue_next_; // Note this is Java volatile:
HeapReference<Object> referent_; // Note this is Java volatile:
+ static Class* java_lang_ref_Reference_;
+
friend struct art::ReferenceOffsets; // for verifying offset information
friend class gc::ReferenceProcessor;
friend class gc::ReferenceQueue;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da481e4..a6873ef 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -681,6 +681,8 @@
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index cf31064..5f718ba 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,7 +35,12 @@
// Suspend thread to build stack trace.
soa.Self()->TransitionFromRunnableToSuspended(kNative);
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index bae67f2..8f83f96 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -116,18 +116,25 @@
static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
ScopedUtfChars name(env, java_name);
+ Thread* self;
{
ScopedObjectAccess soa(env);
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
soa.Self()->SetThreadName(name.c_str());
return;
}
+ self = soa.Self();
}
// Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
// thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock
// in the DDMS send code.
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ Thread* thread;
+ {
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index e17e60a..45ef9ae 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -61,7 +61,12 @@
}
// Suspend thread to build stack trace.
- Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ }
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9cefcb6..86c1bae 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -23,7 +23,6 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
-#include "implicit_check_options.h"
#include "oat.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
@@ -80,36 +79,7 @@
}
ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
}
-
- if (ret.get() == nullptr) {
- return nullptr;
- }
-
- // Embedded options check. Right now only implicit checks.
- // TODO: Refactor to somewhere else?
- const char* implicit_checks_value = ret->GetOatHeader().
- GetStoreValueByKey(ImplicitCheckOptions::kImplicitChecksOatHeaderKey);
-
- if (implicit_checks_value == nullptr) {
- *error_msg = "Did not find implicit checks value.";
- return nullptr;
- }
-
- bool explicit_null_checks, explicit_so_checks, explicit_suspend_checks;
- if (ImplicitCheckOptions::Parse(implicit_checks_value, &explicit_null_checks,
- &explicit_so_checks, &explicit_suspend_checks)) {
- // Check whether the runtime agrees with the recorded checks.
- if (ImplicitCheckOptions::CheckRuntimeSupport(executable, explicit_null_checks,
- explicit_so_checks, explicit_suspend_checks,
- error_msg)) {
- return ret.release();
- } else {
- return nullptr;
- }
- } else {
- *error_msg = "Failed parsing implicit check options.";
- return nullptr;
- }
+ return ret.release();
}
OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 0e6f4d8..592deed 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,6 +24,8 @@
// For size_t.
#include <stdlib.h>
+#include "base/macros.h"
+
namespace art {
namespace mirror {
class Class;
@@ -57,8 +59,7 @@
// A callback for visiting an object in the heap.
typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
// A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor, RootType root_type);
@@ -68,13 +69,12 @@
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
// Returns true if the object in the heap reference is marked, if it is marked and has moved the
// callback updates the heap reference contain the new value.
typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
- void* arg) __attribute__((warn_unused_result));
+ void* arg) WARN_UNUSED;
typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index a016cc5..2761cd5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -263,38 +263,6 @@
verify_ = true;
image_isa_ = kRuntimeISA;
- // Default to explicit checks. Switch off with -implicit-checks:.
- // or setprop dalvik.vm.implicit_checks check1,check2,...
-#ifdef HAVE_ANDROID_OS
- {
- char buf[PROP_VALUE_MAX];
- property_get("dalvik.vm.implicit_checks", buf, "null,stack");
- std::string checks(buf);
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- }
- }
- }
-#else
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
-#endif
-
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -310,6 +278,7 @@
Exit(0);
} else if (StartsWith(option, "-Xbootclasspath:")) {
boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
+ LOG(INFO) << "setting boot class path to " << boot_class_path_string_;
} else if (option == "-classpath" || option == "-cp") {
// TODO: support -Djava.class.path
i++;
@@ -587,54 +556,6 @@
if (!ParseUnsignedInteger(option, ':', &profiler_options_.max_stack_depth_)) {
return false;
}
- } else if (StartsWith(option, "-implicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- } else {
- return false;
- }
- }
- } else if (StartsWith(option, "-explicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = 0;
- } else if (val == "null") {
- explicit_checks_ |= kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ |= kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ |= kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else {
- return false;
- }
- }
} else if (StartsWith(option, "-Xcompiler:")) {
if (!ParseStringAfterChar(option, ':', &compiler_executable_)) {
return false;
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 4c74be6..dab04d4 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -84,10 +84,6 @@
bool verify_;
InstructionSet image_isa_;
- static constexpr uint32_t kExplicitNullCheck = 1;
- static constexpr uint32_t kExplicitSuspendCheck = 2;
- static constexpr uint32_t kExplicitStackOverflowCheck = 4;
- uint32_t explicit_checks_;
// Whether or not we use homogeneous space compaction to avoid OOM errors. If enabled,
// the heap will attempt to create an extra space which enables compacting from a malloc space to
// another malloc space when we are about to throw OOM.
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 5128b19..982553d 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,6 +48,7 @@
kIntrinsicMinMaxFloat,
kIntrinsicMinMaxDouble,
kIntrinsicSqrt,
+ kIntrinsicGet,
kIntrinsicCharAt,
kIntrinsicCompareTo,
kIntrinsicIsEmptyOrLength,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 94d6cdf..1fb7948 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -140,7 +140,10 @@
suspend_handler_(nullptr),
stack_overflow_handler_(nullptr),
verify_(false),
- target_sdk_version_(0) {
+ target_sdk_version_(0),
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false) {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
callee_save_methods_[i] = nullptr;
}
@@ -580,41 +583,6 @@
GetInstrumentation()->ForceInterpretOnly();
}
- bool implicit_checks_supported = false;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- implicit_checks_supported = true;
- break;
- default:
- break;
- }
-
- if (!options->interpreter_only_ && implicit_checks_supported &&
- (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
- ParsedOptions::kExplicitNullCheck |
- ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
- fault_manager.Init();
-
- // These need to be in a specific order. The null point check handler must be
- // after the suspend check and stack overflow check handlers.
- if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
- suspend_handler_ = new SuspensionHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
- stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
- null_pointer_handler_ = new NullPointerHandler(&fault_manager);
- }
-
- if (kEnableJavaStackTraceHandler) {
- new JavaStackTraceHandler(&fault_manager);
- }
- }
-
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
@@ -647,6 +615,42 @@
BlockSignals();
InitPlatformSignalHandlers();
+ // Change the implicit checks flags based on runtime architecture.
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ implicit_null_checks_ = true;
+ implicit_so_checks_ = true;
+ break;
+ default:
+ // Keep the defaults.
+ break;
+ }
+
+ if (!options->interpreter_only_ &&
+ (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_)) {
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check handler must be
+ // after the suspend check and stack overflow check handlers.
+ if (implicit_suspend_checks_) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if (implicit_so_checks_) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if (implicit_null_checks_) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+
+ if (kEnableJavaStackTraceHandler) {
+ new JavaStackTraceHandler(&fault_manager);
+ }
+ }
+
java_vm_ = new JavaVMExt(this, options.get());
Thread::Startup();
@@ -923,6 +927,7 @@
mirror::ArtField::VisitRoots(callback, arg);
mirror::ArtMethod::VisitRoots(callback, arg);
mirror::Class::VisitRoots(callback, arg);
+ mirror::Reference::VisitRoots(callback, arg);
mirror::StackTraceElement::VisitRoots(callback, arg);
mirror::String::VisitRoots(callback, arg);
mirror::Throwable::VisitRoots(callback, arg);
@@ -1218,37 +1223,6 @@
argv->push_back("--compiler-filter=interpret-only");
}
- argv->push_back("--runtime-arg");
- std::string checkstr = "-implicit-checks";
-
- int nchecks = 0;
- char checksep = ':';
-
- if (!ExplicitNullChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "null";
- ++nchecks;
- }
- if (!ExplicitSuspendChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "suspend";
- ++nchecks;
- }
-
- if (!ExplicitStackOverflowChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "stack";
- ++nchecks;
- }
-
- if (nchecks == 0) {
- checkstr += ":none";
- }
- argv->push_back(checkstr);
-
// Make the dex2oat instruction set match that of the launching runtime. If we have multiple
// architecture support, dex2oat may be compiled as a different instruction-set than that
// currently being executed.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f839be1..28f1217 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -596,6 +596,11 @@
// Specifies target SDK version to allow workarounds for certain API levels.
int32_t target_sdk_version_;
+ // Implicit checks flags.
+ bool implicit_null_checks_; // NullPointer checks are implicit.
+ bool implicit_so_checks_; // StackOverflow checks are implicit.
+ bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 38f1307..a5caa07 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -57,26 +57,24 @@
}
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
-#ifdef NDEBUG
- UNUSED(check_locks); // Keep GCC happy about unused parameters.
-#else
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
- if (check_locks) {
- bool bad_mutexes_held = false;
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- // We expect no locks except the mutator_lock_.
- if (i != kMutatorLock) {
- BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(ERROR) << "holding \"" << held_mutex->GetName()
- << "\" at point where thread suspension is expected";
- bad_mutexes_held = true;
+ if (kIsDebugBuild) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (check_locks) {
+ bool bad_mutexes_held = false;
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
+ if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
+ if (held_mutex != NULL) {
+ LOG(ERROR) << "holding \"" << held_mutex->GetName()
+ << "\" at point where thread suspension is expected";
+ bad_mutexes_held = true;
+ }
}
}
+ CHECK(!bad_mutexes_held);
}
- CHECK(!bad_mutexes_held);
}
-#endif
}
inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7827dfb..5d15fd4 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -231,47 +231,95 @@
return stack_size;
}
+// Global variable to prevent the compiler optimizing away the page reads for the stack.
+byte dont_optimize_this;
+
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_end_. Just below that
// is the StackOverflow reserved region used when creating the StackOverflow
// exception.
+//
+// There is a little complexity here that deserves a special mention. When running on the
+// host (glibc), the process's main thread's stack is allocated with a special flag
+// to prevent memory being allocated when it's not needed. This flag makes the
+// kernel only allocate memory for the stack by growing down in memory. Because we
+// want to put an mprotected region far away from that at the stack top, we need
+// to make sure the pages for the stack are mapped in before we call mprotect. We do
+// this by reading every page from the stack bottom (highest address) to the stack top.
+// We then madvise this away.
void Thread::InstallImplicitProtection(bool is_main_stack) {
byte* pregion = tlsPtr_.stack_end;
+ byte* stack_lowmem = tlsPtr_.stack_begin;
+ byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) &
+ ~(kPageSize - 1)); // Page containing current top of stack.
+ const bool running_on_intel = (kRuntimeISA == kX86) || (kRuntimeISA == kX86_64);
+
+ if (running_on_intel) {
+ // On Intel, we need to map in the main stack. This must be done by reading from the
+ // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN
+ // in the kernel. Any access more than a page below the current SP will cause
+ // a segv.
+ if (is_main_stack) {
+ // First we need to unprotect the protected region because this may
+ // be called more than once for a particular stack and we will crash
+ // if we try to read the protected page.
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ);
+
+ // Read every page from the high address to the low.
+ for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) {
+ dont_optimize_this = *p;
+ }
+ }
+ }
+
+ // Check and place a marker word at the lowest usable address in the stack. This
+ // is used to prevent a double protection.
constexpr uint32_t kMarker = 0xdadadada;
uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion);
if (*marker == kMarker) {
- // The region has already been set up.
+ // The region has already been set up. But on the main stack on the host we have
+ // removed the protected region in order to read the stack memory. We need to put
+ // this back again.
+ if (is_main_stack && running_on_intel) {
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE);
+ madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED);
+ }
return;
}
// Add marker so that we can detect a second attempt to do this.
*marker = kMarker;
- pregion -= kStackOverflowProtectedSize;
-
- // Touch the pages in the region to map them in. Otherwise mprotect fails. Only
- // need to do this on the main stack. We only need to touch one byte per page.
- if (is_main_stack) {
- byte* start = pregion;
- byte* end = pregion + kStackOverflowProtectedSize;
- while (start < end) {
- *start = static_cast<byte>(0);
- start += kPageSize;
+ if (!running_on_intel) {
+ // Running on !Intel, stacks are mapped cleanly. The protected region for the
+ // main stack just needs to be mapped in. We do this by writing one byte per page.
+ for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) {
+ *p = 0;
}
}
+ pregion -= kStackOverflowProtectedSize;
+
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+
if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:"
<< strerror(errno);
}
// Tell the kernel that we won't be needing these pages any more.
+ // NB. madvise will probably write zeroes into the memory (on linux it does).
if (is_main_stack) {
- madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ if (running_on_intel) {
+ // On the host, it's the whole stack (minus a page to prevent overwrite of stack top).
+ madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED);
+ } else {
+ // On Android, just the protected region.
+ madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ }
}
}
@@ -532,13 +580,17 @@
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
if (is_main_thread) {
- // The main thread has a 16K protected region at the bottom. We need
+ size_t guardsize;
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
+ // The main thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- constexpr uint32_t kDelta = 16 * KB;
- tlsPtr_.stack_begin += kDelta;
- tlsPtr_.stack_end += kDelta;
- tlsPtr_.stack_size -= kDelta;
+ tlsPtr_.stack_begin += guardsize;
+ tlsPtr_.stack_end += guardsize;
+ tlsPtr_.stack_size -= guardsize;
}
InstallImplicitProtection(is_main_thread);
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b649b62..ff1a079 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -170,16 +170,7 @@
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us,
- bool holding_locks) {
- if (!holding_locks) {
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
- }
- }
- }
+static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -244,7 +235,7 @@
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, true);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -444,6 +435,11 @@
while (true) {
Thread* thread;
{
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
@@ -483,7 +479,7 @@
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
@@ -502,9 +498,14 @@
CHECK_NE(thread_id, kInvalidThreadId);
while (true) {
{
- Thread* thread = NULL;
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
+ Thread* thread = nullptr;
for (const auto& it : list_) {
if (it->GetThreadId() == thread_id) {
thread = it;
@@ -550,7 +551,7 @@
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index d46987a..1b67ac0 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,6 +68,7 @@
// is set to true.
static Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -77,6 +78,7 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/utils.h b/runtime/utils.h
index b47de81..2ea4953 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -167,8 +167,7 @@
// For rounding integers.
template<typename T>
-static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
@@ -178,8 +177,7 @@
}
template<typename T>
-static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
@@ -188,7 +186,7 @@
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignDown(T* x, uintptr_t n) {
@@ -196,7 +194,7 @@
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignUp(T* x, uintptr_t n) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index eabb993..c0e4ae7 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1727,6 +1727,15 @@
const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
+ // If this is a primitive type, fail HARD.
+ mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
+ if (klass != nullptr && klass->IsPrimitive()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
+ << dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
+ << GetDeclaringClass();
+ break;
+ }
+
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
@@ -1971,6 +1980,7 @@
if (!orig_type.Equals(cast_type) &&
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
+ cast_type.HasClass() && // Could be conflict type, make sure it has a class.
!cast_type.GetClass()->IsInterface() &&
(orig_type.IsZero() ||
orig_type.IsStrictlyAssignableFrom(cast_type.Merge(orig_type, ®_types_)))) {
@@ -2762,12 +2772,30 @@
* "try" block when they throw, control transfers out of the method.)
*/
if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
- bool within_catch_all = false;
+ bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
+ // Need the linker to try and resolve the handled class to check if it's Throwable.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+
for (; iterator.HasNext(); iterator.Next()) {
- if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
- within_catch_all = true;
+ uint16_t handler_type_idx = iterator.GetHandlerTypeIndex();
+ if (handler_type_idx == DexFile::kDexNoIndex16) {
+ has_catch_all_handler = true;
+ } else {
+ // It is also a catch-all if it is java.lang.Throwable.
+ mirror::Class* klass = linker->ResolveType(*dex_file_, handler_type_idx, *dex_cache_,
+ *class_loader_);
+ if (klass != nullptr) {
+ if (klass == mirror::Throwable::GetJavaLangThrowable()) {
+ has_catch_all_handler = true;
+ }
+ } else {
+ // Clear exception.
+ Thread* self = Thread::Current();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
}
/*
* Merge registers into the "catch" block. We want to use the "savedRegs" rather than
@@ -2783,7 +2811,7 @@
* If the monitor stack depth is nonzero, there must be a "catch all" handler for this
* instruction. This does apply to monitor-exit because of async exception handling.
*/
- if (work_line_->MonitorStackDepth() > 0 && !within_catch_all) {
+ if (work_line_->MonitorStackDepth() > 0 && !has_catch_all_handler) {
/*
* The state in work_line reflects the post-execution state. If the current instruction is a
* monitor-enter and the monitor stack was empty, we don't need a catch-all (if it throws,
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index 8e25339..d86735d 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -23,8 +23,23 @@
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
LOCAL_SRC_FILES := sigchain.cc
+LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
LOCAL_SHARED_LIBRARIES := liblog libdl
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
include $(BUILD_SHARED_LIBRARY)
+
+# Build host library.
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_IS_HOST_MODULE := true
+LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_MODULE:= libsigchain
+LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+LOCAL_LDLIBS = -ldl
+LOCAL_MULTILIB := both
+include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 5a5805f..6f93083 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -14,12 +14,22 @@
* limitations under the License.
*/
+#ifdef HAVE_ANDROID_OS
#include <android/log.h>
+#else
+#include <stdarg.h>
+#include <iostream>
+#endif
+
#include <dlfcn.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#if defined(__APPLE__)
+#define _NSIG NSIG
+#endif
+
namespace art {
class SignalAction {
@@ -67,7 +77,11 @@
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
+#ifdef HAVE_ANDROID_OS
__android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+#else
+ std::cout << buf << "\n";
+#endif
va_end(ap);
}
@@ -104,10 +118,16 @@
if ((action.sa_flags & SA_SIGINFO) == 0) {
if (action.sa_handler != NULL) {
action.sa_handler(sig);
+ } else {
+ signal(sig, SIG_DFL);
+ raise(sig);
}
} else {
if (action.sa_sigaction != NULL) {
action.sa_sigaction(sig, info, context);
+ } else {
+ signal(sig, SIG_DFL);
+ raise(sig);
}
}
}
diff --git a/test/Android.oat.mk b/test/Android.oat.mk
index 16300bb..2b142db 100644
--- a/test/Android.oat.mk
+++ b/test/Android.oat.mk
@@ -203,6 +203,7 @@
ANDROID_ROOT=$(HOST_OUT) \
ANDROID_LOG_TAGS='*:d' \
LD_LIBRARY_PATH=$$($(2)ART_HOST_OUT_SHARED_LIBRARIES) \
+ LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) \
$(HOST_OUT_EXECUTABLES)/dalvikvm$$($(2)ART_PHONY_TEST_HOST_SUFFIX) $(DALVIKVM_FLAGS) $(5) \
-XXlib:libartd$(HOST_SHLIB_SUFFIX) -Ximage:$$(HOST_CORE_IMG_LOCATION) \
-classpath $(ART_HOST_TEST_DIR)/android-data-$$@/oat-test-dex-$(1).jar \