Merge "Increase test timeouts"
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 4a98342..951b075 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -606,13 +606,13 @@
INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
type_flags), \
INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags | kIntrinsicFlagIsVolatile), \
+ (type_flags) | kIntrinsicFlagIsVolatile), \
INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
type_flags), \
INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsVolatile), \
+ (type_flags) | kIntrinsicFlagIsVolatile), \
INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsOrdered)
+ (type_flags) | kIntrinsicFlagIsOrdered)
UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone),
UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong),
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 606302b..03c94a4 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -104,7 +104,7 @@
bool VerificationResults::IsCandidateForCompilation(MethodReference&,
const uint32_t access_flags) {
- if (!compiler_options_->IsCompilationEnabled()) {
+ if (!compiler_options_->IsBytecodeCompilationEnabled()) {
return false;
}
// Don't compile class initializers unless kEverything.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1ab1d31..d20f510 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -553,8 +553,8 @@
MethodReference method_ref(&dex_file, method_idx);
if ((access_flags & kAccNative) != 0) {
- // Are we interpreting only and have support for generic JNI down calls?
- if (!driver->GetCompilerOptions().IsCompilationEnabled() &&
+ // Are we extracting only and have support for generic JNI down calls?
+ if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
// Leaving this empty will trigger the generic JNI version
} else {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 6bbd3c5..60b700a 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -88,8 +88,12 @@
return compiler_filter_ == CompilerFilter::kVerifyAtRuntime;
}
- bool IsCompilationEnabled() const {
- return CompilerFilter::IsCompilationEnabled(compiler_filter_);
+ bool IsBytecodeCompilationEnabled() const {
+ return CompilerFilter::IsBytecodeCompilationEnabled(compiler_filter_);
+ }
+
+ bool IsJniCompilationEnabled() const {
+ return CompilerFilter::IsJniCompilationEnabled(compiler_filter_);
}
bool IsVerificationEnabled() const {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8da9f06..140db0c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1413,8 +1413,8 @@
offset = CompiledCode::AlignCode(offset, instruction_set); \
adjusted_offset = offset + CompiledCode::CodeDelta(instruction_set); \
oat_header_->Set ## fn_name ## Offset(adjusted_offset); \
- field = compiler_driver_->Create ## fn_name(); \
- offset += field->size();
+ (field) = compiler_driver_->Create ## fn_name(); \
+ offset += (field)->size();
DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
@@ -1526,8 +1526,8 @@
if (kIsDebugBuild) {
uint32_t size_total = 0;
#define DO_STAT(x) \
- VLOG(compiler) << #x "=" << PrettySize(x) << " (" << x << "B)"; \
- size_total += x;
+ VLOG(compiler) << #x "=" << PrettySize(x) << " (" << (x) << "B)"; \
+ size_total += (x);
DO_STAT(size_dex_file_alignment_);
DO_STAT(size_executable_offset_alignment_);
@@ -1683,12 +1683,12 @@
uint32_t alignment_padding = aligned_offset - relative_offset; \
out->Seek(alignment_padding, kSeekCurrent); \
size_trampoline_alignment_ += alignment_padding; \
- if (!WriteData(out, field->data(), field->size())) { \
+ if (!WriteData(out, (field)->data(), (field)->size())) { \
PLOG(ERROR) << "Failed to write " # field " to " << out->GetLocation(); \
return false; \
} \
- size_ ## field += field->size(); \
- relative_offset += alignment_padding + field->size(); \
+ size_ ## field += (field)->size(); \
+ relative_offset += alignment_padding + (field)->size(); \
DCHECK_OFFSET(); \
} while (false)
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e010662..7ddd677 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -59,7 +59,8 @@
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
-#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
@@ -674,7 +675,8 @@
};
#undef __
-#define __ down_cast<ArmAssembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<ArmAssembler*>(GetAssembler())-> // NOLINT
inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 261c04f..362957b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -132,7 +132,8 @@
return ARM64ReturnLocation(return_type);
}
-#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index fb50680..c3f425a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -141,7 +141,8 @@
return MipsReturnLocation(type);
}
-#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
@@ -478,7 +479,8 @@
}
#undef __
-#define __ down_cast<MipsAssembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e67d8d0..bb6df50 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -102,7 +102,8 @@
return Mips64ReturnLocation(type);
}
-#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
@@ -424,7 +425,8 @@
}
#undef __
-#define __ down_cast<Mips64Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index bdbafcd..b95c806 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -47,7 +47,8 @@
static constexpr int kFakeReturnRegister = Register(8);
-#define __ down_cast<X86Assembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
@@ -691,7 +692,8 @@
};
#undef __
-#define __ down_cast<X86Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86Assembler*>(GetAssembler())-> /* NOLINT */
inline Condition X86Condition(IfCondition cond) {
switch (cond) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 30eca2c..054891b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -51,7 +51,8 @@
static constexpr int kC2ConditionMask = 0x400;
-#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
@@ -710,7 +711,8 @@
};
#undef __
-#define __ down_cast<X86_64Assembler*>(GetAssembler())->
+// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
+#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
inline Condition X86_64IntegerCondition(IfCondition cond) {
switch (cond) {
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 214250f..83a5127 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -165,7 +165,7 @@
void Set##name() { SetBit(k##name); } \
bool Get##name() const { return IsBitSet(k##name); } \
private: \
-static constexpr size_t k##name = bit + kNumberOfGenericOptimizations
+static constexpr size_t k##name = (bit) + kNumberOfGenericOptimizations
class StringEqualsOptimizations : public IntrinsicOptimizations {
public:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cb274dc..9f6f453 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1450,25 +1450,6 @@
class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
}
- /*
- * If we're not in interpret-only or verify-none or verify-at-runtime or verify-profile mode,
- * go ahead and compile small applications. Don't bother to check if we're doing the image.
- */
- if (!IsBootImage() &&
- compiler_options_->IsCompilationEnabled() &&
- compiler_kind_ == Compiler::kQuick) {
- size_t num_methods = 0;
- for (size_t i = 0; i != dex_files_.size(); ++i) {
- const DexFile* dex_file = dex_files_[i];
- CHECK(dex_file != nullptr);
- num_methods += dex_file->NumMethodIds();
- }
- if (num_methods <= compiler_options_->GetNumDexMethodsThreshold()) {
- compiler_options_->SetCompilerFilter(CompilerFilter::kSpeed);
- VLOG(compiler) << "Below method threshold, compiling anyways";
- }
- }
-
return true;
}
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index eddd172..48bec73 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -19,8 +19,8 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64 + 4*8
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64 + 4*8
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 176 + 4*8
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE (64 + 4*8)
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE (64 + 4*8)
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (176 + 4*8)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index d911497..98d3345 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -122,21 +122,21 @@
#define FIELD_GET(object, type) \
DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
- DCHECK(object != nullptr) << PrettyField(this); \
- DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+ DCHECK((object) != nullptr) << PrettyField(this); \
+ DCHECK(!IsStatic() || ((object) == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
if (UNLIKELY(IsVolatile())) { \
- return object->GetField ## type ## Volatile(GetOffset()); \
+ return (object)->GetField ## type ## Volatile(GetOffset()); \
} \
- return object->GetField ## type(GetOffset());
+ return (object)->GetField ## type(GetOffset());
#define FIELD_SET(object, type, value) \
DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
- DCHECK(object != nullptr) << PrettyField(this); \
- DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+ DCHECK((object) != nullptr) << PrettyField(this); \
+ DCHECK(!IsStatic() || ((object) == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
if (UNLIKELY(IsVolatile())) { \
- object->SetField ## type ## Volatile<kTransactionActive>(GetOffset(), value); \
+ (object)->SetField ## type ## Volatile<kTransactionActive>(GetOffset(), value); \
} else { \
- object->SetField ## type<kTransactionActive>(GetOffset(), value); \
+ (object)->SetField ## type<kTransactionActive>(GetOffset(), value); \
}
inline uint8_t ArtField::GetBoolean(mirror::Object* object) {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 21725d3..8eb3742 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -396,10 +396,10 @@
#define THREAD_CHECKPOINT_REQUEST 2
ADD_TEST_EQ(THREAD_CHECKPOINT_REQUEST, static_cast<int32_t>(art::kCheckpointRequest))
-#define JIT_CHECK_OSR -1
+#define JIT_CHECK_OSR (-1)
ADD_TEST_EQ(JIT_CHECK_OSR, static_cast<int32_t>(art::jit::kJitCheckForOSR))
-#define JIT_HOTNESS_DISABLE -2
+#define JIT_HOTNESS_DISABLE (-2)
ADD_TEST_EQ(JIT_HOTNESS_DISABLE, static_cast<int32_t>(art::jit::kJitHotnessDisabled))
#if defined(__cplusplus)
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index e9e97b8..6323eee 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -140,11 +140,11 @@
// Helper for CHECK_STRxx(s1,s2) macros.
#define CHECK_STROP(s1, s2, sense) \
- if (UNLIKELY((strcmp(s1, s2) == 0) != sense)) \
+ if (UNLIKELY((strcmp(s1, s2) == 0) != (sense))) \
LOG(::art::FATAL) << "Check failed: " \
- << "\"" << s1 << "\"" \
- << (sense ? " == " : " != ") \
- << "\"" << s2 << "\""
+ << "\"" << (s1) << "\"" \
+ << ((sense) ? " == " : " != ") \
+ << "\"" << (s2) << "\""
// Check for string (const char*) equality between s1 and s2, LOG(FATAL) if not.
#define CHECK_STREQ(s1, s2) CHECK_STROP(s1, s2, true)
@@ -156,7 +156,7 @@
int rc = call args; \
if (rc != 0) { \
errno = rc; \
- PLOG(::art::FATAL) << # call << " failed for " << what; \
+ PLOG(::art::FATAL) << # call << " failed for " << (what); \
} \
} while (false)
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 7a293c7..3c43253 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -75,7 +75,7 @@
ALWAYS_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ALWAYS_INLINE void operator delete(void*, void*) noexcept { } \
private: \
- void* operator new(size_t) = delete
+ void* operator new(size_t) = delete // NOLINT
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
@@ -135,7 +135,7 @@
#define ARRAYSIZE_UNSAFE(a) \
((sizeof(a) / sizeof(*(a))) / static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-#define SIZEOF_MEMBER(t, f) sizeof((reinterpret_cast<t*>(4096))->f)
+#define SIZEOF_MEMBER(t, f) sizeof((reinterpret_cast<t*>(4096))->f) // NOLINT
#define OFFSETOF_MEMBER(t, f) \
(reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u)) // NOLINT
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 620bf9c..71b238b 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -971,7 +971,7 @@
instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- if (new_level >= current_lock_level) { \
+ if ((new_level) >= current_lock_level) { \
/* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
fprintf(stderr, "New local level %d is not less than current level %d\n", \
new_level, current_lock_level); \
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 639f913..96fa53c 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -2429,19 +2429,20 @@
Primitive::kPrimDouble));
}
+// NOLINT added to avoid wrong warning/fix from clang-tidy.
#define PRIMITIVE_ARRAY_FUNCTIONS(ctype, name, ptype) \
- static ctype* Get##name##ArrayElements(JNIEnv* env, ctype##Array array, jboolean* is_copy) { \
- return reinterpret_cast<ctype*>( \
+ static ctype* Get##name##ArrayElements(JNIEnv* env, ctype##Array array, jboolean* is_copy) { /* NOLINT */ \
+ return reinterpret_cast<ctype*>( /* NOLINT */ \
GetPrimitiveArrayElements(__FUNCTION__, ptype, env, array, is_copy)); \
} \
\
- static void Release##name##ArrayElements(JNIEnv* env, ctype##Array array, ctype* elems, \
+ static void Release##name##ArrayElements(JNIEnv* env, ctype##Array array, ctype* elems, /* NOLINT */ \
jint mode) { \
ReleasePrimitiveArrayElements(__FUNCTION__, ptype, env, array, elems, mode); \
} \
\
static void Get##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
- ctype* buf) { \
+ ctype* buf) { /* NOLINT */ \
GetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
} \
\
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index d617caf..dc197c10 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -20,7 +20,7 @@
namespace art {
-bool CompilerFilter::IsCompilationEnabled(Filter filter) {
+bool CompilerFilter::IsBytecodeCompilationEnabled(Filter filter) {
switch (filter) {
case CompilerFilter::kVerifyNone:
case CompilerFilter::kVerifyAtRuntime:
@@ -39,6 +39,25 @@
UNREACHABLE();
}
+bool CompilerFilter::IsJniCompilationEnabled(Filter filter) {
+ switch (filter) {
+ case CompilerFilter::kVerifyNone:
+ case CompilerFilter::kVerifyAtRuntime: return false;
+
+ case CompilerFilter::kVerifyProfile:
+ case CompilerFilter::kInterpretOnly:
+ case CompilerFilter::kSpaceProfile:
+ case CompilerFilter::kSpace:
+ case CompilerFilter::kBalanced:
+ case CompilerFilter::kTime:
+ case CompilerFilter::kSpeedProfile:
+ case CompilerFilter::kSpeed:
+ case CompilerFilter::kEverythingProfile:
+ case CompilerFilter::kEverything: return true;
+ }
+ UNREACHABLE();
+}
+
bool CompilerFilter::IsVerificationEnabled(Filter filter) {
switch (filter) {
case CompilerFilter::kVerifyNone:
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index e8d74dd..37631cc 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -30,10 +30,10 @@
// Note: Order here matters. Later filter choices are considered "as good
// as" earlier filter choices.
enum Filter {
- kVerifyNone, // Skip verification and compile nothing except JNI stubs.
- kVerifyAtRuntime, // Only compile JNI stubs and verify at runtime.
- kVerifyProfile, // Verify only the classes in the profile.
- kInterpretOnly, // Verify, and compile only JNI stubs.
+ kVerifyNone, // Skip verification but mark all classes as verified anyway.
+ kVerifyAtRuntime, // Delay verication to runtime, do not compile anything.
+ kVerifyProfile, // Verify only the classes in the profile, compile only JNI stubs.
+ kInterpretOnly, // Verify everything, compile only JNI stubs.
kTime, // Compile methods, but minimize compilation time.
kSpaceProfile, // Maximize space savings based on profile.
kSpace, // Maximize space savings.
@@ -47,8 +47,12 @@
static const Filter kDefaultCompilerFilter = kSpeed;
// Returns true if an oat file with this compiler filter contains
- // compiled executable code.
- static bool IsCompilationEnabled(Filter filter);
+ // compiled executable code for bytecode.
+ static bool IsBytecodeCompilationEnabled(Filter filter);
+
+ // Returns true if an oat file with this compiler filter contains
+ // compiled executable code for JNI methods.
+ static bool IsJniCompilationEnabled(Filter filter);
// Returns true if this compiler filter requires running verification.
static bool IsVerificationEnabled(Filter filter);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index ce7f62a..638821b 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -57,7 +57,11 @@
// TODO: move all of the macro functionality into the DexCache class.
class DexFile {
public:
+ // First Dex format version supporting default methods.
static const uint32_t kDefaultMethodsVersion = 37;
+ // First Dex format version enforcing class definition ordering rules.
+ static const uint32_t kClassDefinitionOrderEnforcedVersion = 37;
+
static const uint8_t kDexMagic[];
static constexpr size_t kNumDexVersions = 2;
static constexpr size_t kDexVersionLen = 4;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index bbffbbb..1d24349 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -101,31 +101,31 @@
}
// Helper macro to load string and return false on error.
-#define LOAD_STRING(var, idx, error) \
- const char* var = CheckLoadStringByIdx(idx, error); \
- if (UNLIKELY(var == nullptr)) { \
- return false; \
+#define LOAD_STRING(var, idx, error) \
+ const char* (var) = CheckLoadStringByIdx(idx, error); \
+ if (UNLIKELY((var) == nullptr)) { \
+ return false; \
}
// Helper macro to load string by type idx and return false on error.
-#define LOAD_STRING_BY_TYPE(var, type_idx, error) \
- const char* var = CheckLoadStringByTypeIdx(type_idx, error); \
- if (UNLIKELY(var == nullptr)) { \
- return false; \
+#define LOAD_STRING_BY_TYPE(var, type_idx, error) \
+ const char* (var) = CheckLoadStringByTypeIdx(type_idx, error); \
+ if (UNLIKELY((var) == nullptr)) { \
+ return false; \
}
// Helper macro to load method id. Return last parameter on error.
-#define LOAD_METHOD(var, idx, error_string, error_stmt) \
- const DexFile::MethodId* var = CheckLoadMethodId(idx, error_string); \
- if (UNLIKELY(var == nullptr)) { \
- error_stmt; \
+#define LOAD_METHOD(var, idx, error_string, error_stmt) \
+ const DexFile::MethodId* (var) = CheckLoadMethodId(idx, error_string); \
+ if (UNLIKELY((var) == nullptr)) { \
+ error_stmt; \
}
// Helper macro to load method id. Return last parameter on error.
-#define LOAD_FIELD(var, idx, fmt, error_stmt) \
- const DexFile::FieldId* var = CheckLoadFieldId(idx, fmt); \
- if (UNLIKELY(var == nullptr)) { \
- error_stmt; \
+#define LOAD_FIELD(var, idx, fmt, error_stmt) \
+ const DexFile::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
+ if (UNLIKELY((var) == nullptr)) { \
+ error_stmt; \
}
bool DexFileVerifier::Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
@@ -1956,6 +1956,31 @@
}
if (item->superclass_idx_ != DexFile::kDexNoIndex16) {
+ if (header_->GetVersion() >= DexFile::kClassDefinitionOrderEnforcedVersion) {
+ // Check that a class does not inherit from itself directly (by having
+ // the same type idx as its super class).
+ if (UNLIKELY(item->superclass_idx_ == item->class_idx_)) {
+ ErrorStringPrintf("Class with same type idx as its superclass: '%d'", item->class_idx_);
+ return false;
+ }
+
+ // Check that a class is defined after its super class (if the
+ // latter is defined in the same Dex file).
+ const DexFile::ClassDef* superclass_def = dex_file_->FindClassDef(item->superclass_idx_);
+ if (superclass_def != nullptr) {
+ // The superclass is defined in this Dex file.
+ if (superclass_def > item) {
+ // ClassDef item for super class appearing after the class' ClassDef item.
+ ErrorStringPrintf("Invalid class definition ordering:"
+ " class with type idx: '%d' defined before"
+ " superclass with type idx: '%d'",
+ item->class_idx_,
+ item->superclass_idx_);
+ return false;
+ }
+ }
+ }
+
LOAD_STRING_BY_TYPE(superclass_descriptor, item->superclass_idx_,
"inter_class_def_item superclass_idx")
if (UNLIKELY(!IsValidDescriptor(superclass_descriptor) || superclass_descriptor[0] != 'L')) {
@@ -1964,12 +1989,39 @@
}
}
+ // Check interfaces.
const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
if (interfaces != nullptr) {
uint32_t size = interfaces->Size();
-
- // Ensure that all interfaces refer to classes (not arrays or primitives).
for (uint32_t i = 0; i < size; i++) {
+ if (header_->GetVersion() >= DexFile::kClassDefinitionOrderEnforcedVersion) {
+ // Check that a class does not implement itself directly (by having the
+ // same type idx as one of its immediate implemented interfaces).
+ if (UNLIKELY(interfaces->GetTypeItem(i).type_idx_ == item->class_idx_)) {
+ ErrorStringPrintf("Class with same type idx as implemented interface: '%d'",
+ item->class_idx_);
+ return false;
+ }
+
+ // Check that a class is defined after the interfaces it implements
+ // (if they are defined in the same Dex file).
+ const DexFile::ClassDef* interface_def =
+ dex_file_->FindClassDef(interfaces->GetTypeItem(i).type_idx_);
+ if (interface_def != nullptr) {
+ // The interface is defined in this Dex file.
+ if (interface_def > item) {
+ // ClassDef item for interface appearing after the class' ClassDef item.
+ ErrorStringPrintf("Invalid class definition ordering:"
+ " class with type idx: '%d' defined before"
+ " implemented interface with type idx: '%d'",
+ item->class_idx_,
+ interfaces->GetTypeItem(i).type_idx_);
+ return false;
+ }
+ }
+ }
+
+ // Ensure that the interface refers to a class (not an array nor a primitive type).
LOAD_STRING_BY_TYPE(inf_descriptor, interfaces->GetTypeItem(i).type_idx_,
"inter_class_def_item interface type_idx")
if (UNLIKELY(!IsValidDescriptor(inf_descriptor) || inf_descriptor[0] != 'L')) {
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 3741c1e..4e53914 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -184,6 +184,12 @@
return dex_file;
}
+// To generate a base64 encoded Dex file (such as kGoodTestDex, below)
+// from Smali files, use:
+//
+// smali -o classes.dex class1.smali [class2.smali ...]
+// base64 classes.dex >classes.dex.base64
+
// For reference.
static const char kGoodTestDex[] =
"ZGV4CjAzNQDrVbyVkxX1HljTznNf95AglkUAhQuFtmKkAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAN"
@@ -1521,4 +1527,174 @@
}
}
+// To generate a base64 encoded Dex file version 037 from Smali files, use:
+//
+// smali --api-level 24 -o classes.dex class1.smali [class2.smali ...]
+// base64 classes.dex >classes.dex.base64
+
+// Dex file version 037 generated from:
+//
+// .class public LB28685551;
+// .super LB28685551;
+
+static const char kClassExtendsItselfTestDex[] =
+ "ZGV4CjAzNwDeGbgRg1kb6swszpcTWrrOAALB++F4OPT0AAAAcAAAAHhWNBIAAAAAAAAAAKgAAAAB"
+ "AAAAcAAAAAEAAAB0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAHgAAABcAAAAmAAAAJgA"
+ "AAAAAAAAAAAAAAEAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAALTEIyODY4NTU1MTsAAAAABgAA"
+ "AAAAAAABAAAAAAAAAAEAAAABAAAAcAAAAAIAAAABAAAAdAAAAAYAAAABAAAAeAAAAAIgAAABAAAA"
+ "mAAAAAAQAAABAAAAqAAAAA==";
+
+TEST_F(DexFileVerifierTest, ClassExtendsItself) {
+ VerifyModification(
+ kClassExtendsItselfTestDex,
+ "class_extends_itself",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Class with same type idx as its superclass: '0'");
+}
+
+// Dex file version 037 generated from:
+//
+// .class public LFoo;
+// .super LBar;
+//
+// and:
+//
+// .class public LBar;
+// .super LFoo;
+
+static const char kClassesExtendOneAnotherTestDex[] =
+ "ZGV4CjAzNwBXHSrwpDMwRBkg+L+JeQCuFNRLhQ86duEcAQAAcAAAAHhWNBIAAAAAAAAAANAAAAAC"
+ "AAAAcAAAAAIAAAB4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAIAAAABcAAAAwAAAAMAA"
+ "AADHAAAAAAAAAAEAAAABAAAAAQAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAAAAAABAAAAAQAA"
+ "AAAAAAD/////AAAAAAAAAAAAAAAABUxCYXI7AAVMRm9vOwAAAAYAAAAAAAAAAQAAAAAAAAABAAAA"
+ "AgAAAHAAAAACAAAAAgAAAHgAAAAGAAAAAgAAAIAAAAACIAAAAgAAAMAAAAAAEAAAAQAAANAAAAA=";
+
+TEST_F(DexFileVerifierTest, ClassesExtendOneAnother) {
+ VerifyModification(
+ kClassesExtendOneAnotherTestDex,
+ "classes_extend_one_another",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Invalid class definition ordering: class with type idx: '1' defined before"
+ " superclass with type idx: '0'");
+}
+
+// Dex file version 037 generated from:
+//
+// .class public LAll;
+// .super LYour;
+//
+// and:
+//
+// .class public LYour;
+// .super LBase;
+//
+// and:
+//
+// .class public LBase;
+// .super LAll;
+
+static const char kCircularClassInheritanceTestDex[] =
+ "ZGV4CjAzNwBMJxgP0SJz6oLXnKfl+J7lSEORLRwF5LNMAQAAcAAAAHhWNBIAAAAAAAAAAAABAAAD"
+ "AAAAcAAAAAMAAAB8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAIgAAABkAAAA6AAAAOgA"
+ "AADvAAAA9wAAAAAAAAABAAAAAgAAAAEAAAABAAAAAAAAAAAAAAD/////AAAAAAAAAAAAAAAAAgAA"
+ "AAEAAAABAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAAAAAAA/////wAAAAAAAAAA"
+ "AAAAAAVMQWxsOwAGTEJhc2U7AAZMWW91cjsAAAYAAAAAAAAAAQAAAAAAAAABAAAAAwAAAHAAAAAC"
+ "AAAAAwAAAHwAAAAGAAAAAwAAAIgAAAACIAAAAwAAAOgAAAAAEAAAAQAAAAABAAA=";
+
+TEST_F(DexFileVerifierTest, CircularClassInheritance) {
+ VerifyModification(
+ kCircularClassInheritanceTestDex,
+ "circular_class_inheritance",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Invalid class definition ordering: class with type idx: '1' defined before"
+ " superclass with type idx: '0'");
+}
+
+// Dex file version 037 generated from:
+//
+// .class public abstract interface LInterfaceImplementsItself;
+// .super Ljava/lang/Object;
+// .implements LInterfaceImplementsItself;
+
+static const char kInterfaceImplementsItselfTestDex[] =
+ "ZGV4CjAzNwCKKrjatp8XbXl5S/bEVJnqaBhjZkQY4440AQAAcAAAAHhWNBIAAAAAAAAAANwAAAAC"
+ "AAAAcAAAAAIAAAB4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAIAAAACUAAAAoAAAAKAA"
+ "AAC9AAAAAAAAAAEAAAAAAAAAAQYAAAEAAADUAAAA/////wAAAAAAAAAAAAAAABtMSW50ZXJmYWNl"
+ "SW1wbGVtZW50c0l0c2VsZjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAAAAABAAAAAAAAAAcAAAAAAAAA"
+ "AQAAAAAAAAABAAAAAgAAAHAAAAACAAAAAgAAAHgAAAAGAAAAAQAAAIAAAAACIAAAAgAAAKAAAAAB"
+ "EAAAAQAAANQAAAAAEAAAAQAAANwAAAA=";
+
+TEST_F(DexFileVerifierTest, InterfaceImplementsItself) {
+ VerifyModification(
+ kInterfaceImplementsItselfTestDex,
+ "interface_implements_itself",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Class with same type idx as implemented interface: '0'");
+}
+
+// Dex file version 037 generated from:
+//
+// .class public abstract interface LPing;
+// .super Ljava/lang/Object;
+// .implements LPong;
+//
+// and:
+//
+// .class public abstract interface LPong;
+// .super Ljava/lang/Object;
+// .implements LPing;
+
+static const char kInterfacesImplementOneAnotherTestDex[] =
+ "ZGV4CjAzNwD0Kk9sxlYdg3Dy1Cff0gQCuJAQfEP6ohZUAQAAcAAAAHhWNBIAAAAAAAAAAPwAAAAD"
+ "AAAAcAAAAAMAAAB8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAIgAAACMAAAAyAAAAMgA"
+ "AADQAAAA2AAAAAAAAAABAAAAAgAAAAEAAAABBgAAAgAAAOwAAAD/////AAAAAAAAAAAAAAAAAAAA"
+ "AAEGAAACAAAA9AAAAP////8AAAAAAAAAAAAAAAAGTFBpbmc7AAZMUG9uZzsAEkxqYXZhL2xhbmcv"
+ "T2JqZWN0OwABAAAAAAAAAAEAAAABAAAABwAAAAAAAAABAAAAAAAAAAEAAAADAAAAcAAAAAIAAAAD"
+ "AAAAfAAAAAYAAAACAAAAiAAAAAIgAAADAAAAyAAAAAEQAAACAAAA7AAAAAAQAAABAAAA/AAAAA==";
+
+TEST_F(DexFileVerifierTest, InterfacesImplementOneAnother) {
+ VerifyModification(
+ kInterfacesImplementOneAnotherTestDex,
+ "interfaces_implement_one_another",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Invalid class definition ordering: class with type idx: '1' defined before"
+ " implemented interface with type idx: '0'");
+}
+
+// Dex file version 037 generated from:
+//
+// .class public abstract interface LA;
+// .super Ljava/lang/Object;
+// .implements LB;
+//
+// and:
+//
+// .class public abstract interface LB;
+// .super Ljava/lang/Object;
+// .implements LC;
+//
+// and:
+//
+// .class public abstract interface LC;
+// .super Ljava/lang/Object;
+// .implements LA;
+
+static const char kCircularInterfaceImplementationTestDex[] =
+ "ZGV4CjAzNwCzKmD5Fol6XAU6ichYHcUTIP7Z7MdTcEmEAQAAcAAAAHhWNBIAAAAAAAAAACwBAAAE"
+ "AAAAcAAAAAQAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAJAAAACUAAAA8AAAAPAA"
+ "AAD1AAAA+gAAAP8AAAAAAAAAAQAAAAIAAAADAAAAAgAAAAEGAAADAAAAHAEAAP////8AAAAAAAAA"
+ "AAAAAAABAAAAAQYAAAMAAAAUAQAA/////wAAAAAAAAAAAAAAAAAAAAABBgAAAwAAACQBAAD/////"
+ "AAAAAAAAAAAAAAAAA0xBOwADTEI7AANMQzsAEkxqYXZhL2xhbmcvT2JqZWN0OwAAAQAAAAIAAAAB"
+ "AAAAAAAAAAEAAAABAAAABwAAAAAAAAABAAAAAAAAAAEAAAAEAAAAcAAAAAIAAAAEAAAAgAAAAAYA"
+ "AAADAAAAkAAAAAIgAAAEAAAA8AAAAAEQAAADAAAAFAEAAAAQAAABAAAALAEAAA==";
+
+TEST_F(DexFileVerifierTest, CircularInterfaceImplementation) {
+ VerifyModification(
+ kCircularInterfaceImplementationTestDex,
+ "circular_interface_implementation",
+ [](DexFile* dex_file ATTRIBUTE_UNUSED) { /* empty */ },
+ "Invalid class definition ordering: class with type idx: '2' defined before"
+ " implemented interface with type idx: '0'");
+}
+
} // namespace art
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 3f62124..300e618 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -69,11 +69,11 @@
int const Instruction::kInstructionSizeInCodeUnits[] = {
#define INSTRUCTION_SIZE(opcode, c, p, format, r, i, a, v) \
- ((opcode == NOP) ? -1 : \
- ((format >= k10x) && (format <= k10t)) ? 1 : \
- ((format >= k20t) && (format <= k25x)) ? 2 : \
- ((format >= k32x) && (format <= k3rc)) ? 3 : \
- (format == k51l) ? 5 : -1),
+ (((opcode) == NOP) ? -1 : \
+ (((format) >= k10x) && ((format) <= k10t)) ? 1 : \
+ (((format) >= k20t) && ((format) <= k25x)) ? 2 : \
+ (((format) >= k32x) && ((format) <= k3rc)) ? 3 : \
+ ((format) == k51l) ? 5 : -1),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE)
#undef DEX_INSTRUCTION_LIST
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 035230e..89c3db6 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -80,7 +80,7 @@
};
enum Code { // private marker to avoid generate-operator-out.py from processing.
-#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode,
+#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = (opcode),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
#undef DEX_INSTRUCTION_LIST
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 4e4f851..c3b3ac0 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -32,7 +32,7 @@
uint32_t type_idx, ArtMethod* method, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, sizeof(void*)); \
if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -59,7 +59,7 @@
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
size_t byte_count = klass->GetObjectSize(); \
byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
@@ -85,7 +85,7 @@
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
mirror::Object* obj; \
@@ -136,7 +136,7 @@
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- if (!instrumented_bool) { \
+ if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
} else { \
return CheckAndAllocArrayFromCodeInstrumented(type_idx, component_count, method, self, false, allocator_type); \
@@ -146,7 +146,7 @@
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- if (!instrumented_bool) { \
+ if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
} else { \
return CheckAndAllocArrayFromCodeInstrumented(type_idx, component_count, method, self, true, allocator_type); \
@@ -170,7 +170,7 @@
return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
offset, allocator_type); \
} \
-extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \
+extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( /* NOLINT */ \
mirror::String* string, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 26f5ad3..64fa434 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -28,6 +28,47 @@
namespace gc {
namespace collector {
+inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegionOrImmuneSpace(
+ mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
+ // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
+ // to gray even though the object has already been marked through. This happens if a mutator
+ // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
+ // object (changes it from white to gray and back to white), and the thread runs and
+ // incorrectly changes it from white to gray. We need to detect such "false gray" cases and
+ // change the objects back to white at the end of marking.
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if (bitmap->Test(ref)) {
+ return ref;
+ }
+ }
+ // This may or may not succeed, which is ok because the object may already be gray.
+ bool cas_success = false;
+ if (kUseBakerReadBarrier) {
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ }
+ if (bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier &&
+ cas_success &&
+ // The object could be white here if a thread gets preempted after a success at the
+ // above AtomicSetReadBarrierPointer, GC has marked through it, and the thread runs up
+ // to this point.
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ // Register a "false-gray" object to change it from gray to white at the end of marking.
+ PushOntoFalseGrayStack(ref);
+ }
+ } else {
+ // Newly marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ }
+ PushOntoMarkStack(ref);
+ }
+ return ref;
+}
+
inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
if (from_ref == nullptr) {
return nullptr;
@@ -68,21 +109,7 @@
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- mirror::Object* to_ref = from_ref;
- if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
- // Already marked.
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(to_ref);
- }
- return to_ref;
+ return MarkUnevacFromSpaceRegionOrImmuneSpace(from_ref, region_space_bitmap_);
}
case space::RegionSpace::RegionType::kRegionTypeNone:
return MarkNonMoving(from_ref);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d393f0b..3f8f628 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -165,6 +165,10 @@
<< reinterpret_cast<void*>(region_space_->Limit());
}
CheckEmptyMarkStack();
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ CHECK(false_gray_stack_.empty());
+ }
immune_spaces_.Reset();
bytes_moved_.StoreRelaxed(0);
objects_moved_.StoreRelaxed(0);
@@ -247,6 +251,9 @@
}
cc->is_marking_ = true;
cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
+ if (kIsDebugBuild) {
+ cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+ }
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
CHECK(Runtime::Current()->IsAotCompiler());
TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
@@ -314,17 +321,7 @@
DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
<< "Immune space object must be already marked";
}
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (cc_bitmap->AtomicTestAndSet(obj)) {
- // Already marked. Do nothing.
- } else {
- // Newly marked. Set the gray bit and push it onto the mark stack.
- CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- collector_->PushOntoMarkStack(obj);
- }
+ collector_->MarkUnevacFromSpaceRegionOrImmuneSpace(obj, cc_bitmap);
}
private:
@@ -459,6 +456,9 @@
Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
// Marking is done. Disable marking.
DisableMarking();
+ if (kUseBakerReadBarrier) {
+ ProcessFalseGrayStack();
+ }
CheckEmptyMarkStack();
}
@@ -548,6 +548,32 @@
mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
}
+void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
+ CHECK(kUseBakerReadBarrier);
+ DCHECK(ref != nullptr);
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ false_gray_stack_.push_back(ref);
+}
+
+void ConcurrentCopying::ProcessFalseGrayStack() {
+ CHECK(kUseBakerReadBarrier);
+ // Change the objects on the false gray stack from gray to white.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ for (mirror::Object* obj : false_gray_stack_) {
+ DCHECK(IsMarked(obj));
+ // The object could be white here if a thread got preempted after a success at the
+ // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
+ // still gray), and the thread ran to register it onto the false gray stack.
+ if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success);
+ }
+ }
+ false_gray_stack_.clear();
+}
+
+
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
EmptyCheckpoint check_point(this);
@@ -655,8 +681,8 @@
return heap_->live_stack_.get();
}
-// The following visitors are that used to verify that there's no
-// references to the from-space left after marking.
+// The following visitors are used to verify that there's no references to the from-space left after
+// marking.
class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
public:
explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
@@ -670,20 +696,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
if (kUseBakerReadBarrier) {
- if (collector_->RegionSpace()->IsInToSpace(ref)) {
- CHECK(ref->GetReadBarrierPointer() == nullptr)
- << "To-space ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
- } else {
- CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
- (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
- collector_->IsOnAllocStack(ref)))
- << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
- << " but isn't on the alloc stack (and has white rb_ptr)."
- << " Is it in the non-moving space="
- << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
- }
+ CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ << "Ref " << ref << " " << PrettyTypeOf(ref)
+ << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
}
}
@@ -749,18 +764,8 @@
ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
- if (collector->RegionSpace()->IsInToSpace(obj)) {
- CHECK(obj->GetReadBarrierPointer() == nullptr)
- << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
- } else {
- CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
- (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
- collector->IsOnAllocStack(obj)))
- << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
- << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
- << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
- << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
- }
+ CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
}
}
@@ -1069,7 +1074,6 @@
}
// Scan ref fields.
Scan(to_ref);
- // Mark the gray ref as white or black.
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
@@ -1079,41 +1083,34 @@
if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
!IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
- // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
- // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
+ // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
+ // will change it to white later in ReferenceQueue::DequeuePendingReference().
DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
} else {
- // We may occasionally leave a Reference black or white in the queue if its referent happens to
- // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
- // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
- // here in this else block.
+ // We may occasionally leave a reference white in the queue if its referent happens to be
+ // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
+ // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
+ // else block.
if (kUseBakerReadBarrier) {
- if (region_space_->IsInToSpace(to_ref)) {
- // If to-space, change from gray to white.
- bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
- ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
- DCHECK(success) << "Must succeed as we won the race.";
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
- } else {
- // If non-moving space/unevac from space, change from gray
- // to black. We can't change gray to white because it's not
- // safe to use CAS if two threads change values in opposite
- // directions (A->B and B->A). So, we change it to black to
- // indicate non-moving objects that have been marked
- // through. Note we'd need to change from black to white
- // later (concurrently).
- bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
- ReadBarrier::GrayPtr(),
- ReadBarrier::BlackPtr());
- DCHECK(success) << "Must succeed as we won the race.";
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
- }
+ bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
+ ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success) << "Must succeed as we won the race.";
}
}
#else
DCHECK(!kUseBakerReadBarrier);
#endif
+
+ if (region_space_->IsInUnevacFromSpace(to_ref)) {
+ // Add to the live bytes per unevacuated from space. Note this code is always run by the
+ // GC-running thread (no synchronization required).
+ DCHECK(region_space_bitmap_->Test(to_ref));
+ // Disable the read barrier in SizeOf for performance, which is safe.
+ size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ region_space_->AddLiveBytes(to_ref, alloc_size);
+ }
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
@@ -1226,61 +1223,6 @@
RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
-class ConcurrentCopyingClearBlackPtrsVisitor {
- public:
- explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
- DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
- obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
- DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
-// Clear the black ptrs in non-moving objects back to white.
-void ConcurrentCopying::ClearBlackPtrs() {
- CHECK(kUseBakerReadBarrier);
- TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
- ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
- for (auto& space : heap_->GetContinuousSpaces()) {
- if (space == region_space_) {
- continue;
- }
- accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- if (kVerboseMode) {
- LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
- }
- mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
- }
- space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
- large_object_space->GetMarkBitmap()->VisitMarkedRange(
- reinterpret_cast<uintptr_t>(large_object_space->Begin()),
- reinterpret_cast<uintptr_t>(large_object_space->End()),
- visitor);
- // Objects on the allocation stack?
- if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
- size_t count = GetAllocationStack()->Size();
- auto* it = GetAllocationStack()->Begin();
- auto* end = GetAllocationStack()->End();
- for (size_t i = 0; i < count; ++i, ++it) {
- CHECK_LT(it, end);
- mirror::Object* obj = it->AsMirrorPtr();
- if (obj != nullptr) {
- // Must have been cleared above.
- CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
- }
- }
- }
-}
-
void ConcurrentCopying::ReclaimPhase() {
TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
if (kVerboseMode) {
@@ -1338,20 +1280,12 @@
}
{
- TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
- ComputeUnevacFromSpaceLiveRatio();
- }
-
- {
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
region_space_->ClearFromSpace();
}
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- if (kUseBakerReadBarrier) {
- ClearBlackPtrs();
- }
Sweep(false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -1373,39 +1307,6 @@
}
}
-class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
- public:
- explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
- void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(ref != nullptr);
- DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
- DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
- // Clear the black ptr.
- ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
- }
- size_t obj_size = ref->SizeOf();
- size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
- collector_->region_space_->AddLiveBytes(ref, alloc_size);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
-// Compute how much live objects are left in regions.
-void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
- region_space_->AssertAllRegionLiveBytesZeroOrCleared();
- ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
- region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
- reinterpret_cast<uintptr_t>(region_space_->Limit()),
- visitor);
-}
-
// Assert the to-space invariant.
void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
mirror::Object* ref) {
@@ -1999,19 +1900,7 @@
DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
<< "Immune space object must be already marked";
}
- // This may or may not succeed, which is ok.
- if (kUseBakerReadBarrier) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
- }
- if (cc_bitmap->AtomicTestAndSet(ref)) {
- // Already marked.
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(ref);
- }
+ MarkUnevacFromSpaceRegionOrImmuneSpace(ref, cc_bitmap);
} else {
// Use the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -2024,13 +1913,13 @@
// Already marked.
if (kUseBakerReadBarrier) {
DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
} else if (is_los && los_bitmap->Test(ref)) {
// Already marked in LOS.
if (kUseBakerReadBarrier) {
DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
} else {
// Not marked.
@@ -2046,15 +1935,34 @@
DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
+ // For the baker-style RB, we need to handle 'false-gray' cases. See the
+ // kRegionTypeUnevacFromSpace-case comment in Mark().
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if ((!is_los && mark_bitmap->Test(ref)) ||
+ (is_los && los_bitmap->Test(ref))) {
+ return ref;
+ }
+ }
// Not marked or on the allocation stack. Try to mark it.
// This may or may not succeed, which is ok.
+ bool cas_success = false;
if (kUseBakerReadBarrier) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
}
if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
// Already marked.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
+ }
} else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
// Already marked in LOS.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
+ }
} else {
// Newly marked.
if (kUseBakerReadBarrier) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 76315fe..e9ff618 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -160,8 +160,6 @@
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
- void ClearBlackPtrs()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
@@ -185,10 +183,19 @@
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegionOrImmuneSpace(mirror::Object* from_ref,
+ accounting::SpaceBitmap<kObjectAlignment>* bitmap)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
+ std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<accounting::ObjectStack*> revoked_mark_stacks_
GUARDED_BY(mark_stack_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 03ab9a1..6088a43 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -68,31 +68,19 @@
Heap* heap = Runtime::Current()->GetHeap();
if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
heap->ConcurrentCopyingCollector()->IsActive()) {
- // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to black or white.
+ // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white.
// We check IsActive() above because we don't want to do this when the zygote compaction
// collector (SemiSpace) is running.
CHECK(ref != nullptr);
collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
- const bool is_moving = concurrent_copying->RegionSpace()->IsInToSpace(ref);
- if (ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- if (is_moving) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
- } else {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::BlackPtr());
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr());
- }
+ mirror::Object* rb_ptr = ref->GetReadBarrierPointer();
+ if (rb_ptr == ReadBarrier::GrayPtr()) {
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
+ CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
} else {
- // In ConcurrentCopying::ProcessMarkStackRef() we may leave a black or white Reference in the
- // queue and find it here, which is OK. Check that the color makes sense depending on whether
- // the Reference is moving or not and that the referent has been marked.
- if (is_moving) {
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
- << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
- } else {
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr())
- << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
- }
+ // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
+ // find it here, which is OK.
+ CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 4e56c4a..c6b2870 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -39,7 +39,7 @@
int rc = call args; \
if (UNLIKELY(rc != 0)) { \
errno = rc; \
- PLOG(FATAL) << # call << " failed for " << what; \
+ PLOG(FATAL) << # call << " failed for " << (what); \
} \
} while (false)
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 9a2d0c6..5d710bf 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -216,17 +216,6 @@
evac_region_ = nullptr;
}
-void RegionSpace::AssertAllRegionLiveBytesZeroOrCleared() {
- if (kIsDebugBuild) {
- MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = ®ions_[i];
- size_t live_bytes = r->LiveBytes();
- CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
- }
- }
-}
-
void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
size_t /* failed_alloc_bytes */) {
size_t max_contiguous_allocation = 0;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 14e8005..4e8dfe8 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -215,7 +215,16 @@
reg->AddLiveBytes(alloc_size);
}
- void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_);
+ void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ size_t live_bytes = r->LiveBytes();
+ CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
+ }
+ }
+ }
void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 7bd85ec..8cdf96d 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -301,13 +301,13 @@
CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, return_val)
#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value, return_val) \
- if (UNLIKELY(value == nullptr)) { \
+ if (UNLIKELY((value) == nullptr)) { \
JavaVmExtFromEnv(env)->JniAbortF(name, #value " == null"); \
return return_val; \
}
#define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
- if (UNLIKELY(length != 0 && value == nullptr)) { \
+ if (UNLIKELY((length) != 0 && (value) == nullptr)) { \
JavaVmExtFromEnv(env)->JniAbortF(__FUNCTION__, #value " == null"); \
return; \
}
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
index 46ddaa9..c314fd2 100644
--- a/runtime/lambda/shorty_field_type.h
+++ b/runtime/lambda/shorty_field_type.h
@@ -391,7 +391,7 @@
private:
#define IS_VALID_TYPE_SPECIALIZATION(type, name) \
- static inline constexpr bool Is ## name ## TypeImpl(type* const = 0) { \
+ static inline constexpr bool Is ## name ## TypeImpl(type* const = 0) { /*NOLINT*/ \
return true; \
} \
\
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index fba10ca..64b40b7 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -153,7 +153,7 @@
}
OatFileAssistant::DexOptNeeded OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target) {
- bool compilation_desired = CompilerFilter::IsCompilationEnabled(target);
+ bool compilation_desired = CompilerFilter::IsBytecodeCompilationEnabled(target);
// See if the oat file is in good shape as is.
bool oat_okay = OatFileCompilerFilterIsOkay(target);
@@ -600,7 +600,7 @@
CompilerFilter::Filter current_compiler_filter = file.GetCompilerFilter();
- if (CompilerFilter::IsCompilationEnabled(current_compiler_filter)) {
+ if (CompilerFilter::IsBytecodeCompilationEnabled(current_compiler_filter)) {
if (!file.IsPic()) {
const ImageInfo* image_info = GetImageInfo();
if (image_info == nullptr) {
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 15a1aa4..c79a9a6 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -233,7 +233,7 @@
EXPECT_TRUE(odex_file->HasPatchInfo());
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
- if (CompilerFilter::IsCompilationEnabled(filter)) {
+ if (CompilerFilter::IsBytecodeCompilationEnabled(filter)) {
const std::vector<gc::space::ImageSpace*> image_spaces =
runtime->GetHeap()->GetBootImageSpaces();
ASSERT_TRUE(!image_spaces.empty() && image_spaces[0] != nullptr);
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index ca5efe5..ba71dc3 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -58,11 +58,6 @@
#include <sys/socket.h>
#include <sys/ioctl.h>
-#ifdef ART_TARGET_ANDROID
-// This function is provided by android linker.
-extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path);
-#endif // ART_TARGET_ANDROID
-
#undef LOG_TAG
#define LOG_TAG "artopenjdk"
@@ -324,22 +319,6 @@
exit(status);
}
-static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPath) {
-#ifdef ART_TARGET_ANDROID
- if (javaLdLibraryPath != nullptr) {
- ScopedUtfChars ldLibraryPath(env, javaLdLibraryPath);
- if (ldLibraryPath.c_str() != nullptr) {
- android_update_LD_LIBRARY_PATH(ldLibraryPath.c_str());
- }
- }
-
-#else
- LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
- UNUSED(javaLdLibraryPath, env);
-#endif
-}
-
-
JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env,
jstring javaFilename,
jobject javaLoader,
@@ -349,17 +328,6 @@
return NULL;
}
- int32_t target_sdk_version = art::Runtime::Current()->GetTargetSdkVersion();
-
- // Starting with N nativeLoad uses classloader local
- // linker namespace instead of global LD_LIBRARY_PATH
- // (23 is Marshmallow). This call is here to preserve
- // backwards compatibility for the apps targeting sdk
- // version <= 23
- if (target_sdk_version == 0) {
- SetLdLibraryPath(env, javaLibrarySearchPath);
- }
-
std::string error_msg;
{
art::JavaVMExt* vm = art::Runtime::Current()->GetJavaVM();
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 4610f6f..ab69d4f 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -73,7 +73,7 @@
using Key = RuntimeArgumentMapKey<TValue>;
// List of key declarations, shorthand for 'static const Key<T> Name'
-#define RUNTIME_OPTIONS_KEY(Type, Name, ...) static const Key<Type> Name;
+#define RUNTIME_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name);
#include "runtime_options.def"
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 4248944..1d7e065 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2411,8 +2411,8 @@
template<size_t ptr_size>
void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
#define DO_THREAD_OFFSET(x, y) \
- if (offset == x.Uint32Value()) { \
- os << y; \
+ if (offset == (x).Uint32Value()) { \
+ os << (y); \
return; \
}
DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 2b96328..b2be770 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -4652,7 +4652,7 @@
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
<< " from other class " << GetDeclaringClass();
- return;
+ // Keep hunting for possible hard fails.
}
}
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 0dab400..c6a2e9a 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -55,7 +55,7 @@
const OatFile* oat_file = oat_dex_file->GetOatFile();
return !oat_file->IsPic()
- && CompilerFilter::IsCompilationEnabled(oat_file->GetCompilerFilter());
+ && CompilerFilter::IsBytecodeCompilationEnabled(oat_file->GetCompilerFilter());
}
};
diff --git a/test/600-verifier-fails/expected.txt b/test/600-verifier-fails/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/600-verifier-fails/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/600-verifier-fails/info.txt b/test/600-verifier-fails/info.txt
new file mode 100644
index 0000000..478dd9b
--- /dev/null
+++ b/test/600-verifier-fails/info.txt
@@ -0,0 +1,4 @@
+The situation in this test was discovered by running dexfuzz on
+another fuzzingly random generated Java test. The soft verification
+fail (on the final field modification) should not hide the hard
+verification fail (on the type mismatch) to avoid a crash later on.
diff --git a/test/600-verifier-fails/smali/sput.smali b/test/600-verifier-fails/smali/sput.smali
new file mode 100644
index 0000000..87f3799
--- /dev/null
+++ b/test/600-verifier-fails/smali/sput.smali
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LA;
+.super Ljava/lang/Object;
+
+.method public foo(I)V
+.registers 2
+ sput v1, LMain;->staticField:Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/600-verifier-fails/src/Main.java b/test/600-verifier-fails/src/Main.java
new file mode 100644
index 0000000..ba4cc31
--- /dev/null
+++ b/test/600-verifier-fails/src/Main.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ public static final String staticField = null;
+
+ public static void main(String[] args) throws Exception {
+ try {
+ Class<?> a = Class.forName("A");
+ } catch (java.lang.VerifyError e) {
+ System.out.println("passed");
+ }
+ }
+}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Device.java b/tools/dexfuzz/src/dexfuzz/executors/Device.java
index 4a53957..45538fe 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Device.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Device.java
@@ -68,7 +68,13 @@
return envVars.get(key);
}
- private String getHostCoreImagePath() {
+ private String getHostCoreImagePathWithArch() {
+ // TODO: Using host currently implies x86 (see Options.java), change this when generalized.
+ assert(Options.useArchX86);
+ return androidHostOut + "/framework/x86/core.art";
+ }
+
+ private String getHostCoreImagePathNoArch() {
return androidHostOut + "/framework/core.art";
}
@@ -80,7 +86,7 @@
androidHostOut = checkForEnvVar(envVars, "ANDROID_HOST_OUT");
if (Options.executeOnHost) {
- File coreImage = new File(getHostCoreImagePath());
+ File coreImage = new File(getHostCoreImagePathWithArch());
if (!coreImage.exists()) {
Log.errorAndQuit("Host core image not found at " + coreImage.getPath()
+ ". Did you forget to build it?");
@@ -156,7 +162,7 @@
* Get any extra flags required to execute ART on the host.
*/
public String getHostExecutionFlags() {
- return String.format("-Xnorelocate -Ximage:%s", getHostCoreImagePath());
+ return String.format("-Xnorelocate -Ximage:%s", getHostCoreImagePathNoArch());
}
public String getAndroidHostOut() {