Merge "Revert "Ignore libcore.java.lang.OldSystemTest#test_gc failures with CC.""
diff --git a/Android.mk b/Android.mk
index a518d2f..25796a0 100644
--- a/Android.mk
+++ b/Android.mk
@@ -558,3 +558,10 @@
TEST_ART_TARGET_SYNC_DEPS :=
include $(art_path)/runtime/openjdkjvm/Android.mk
+
+# Helper target that depends on boot image creation.
+#
+# Can be used, for example, to dump initialization failures:
+# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt
+.PHONY: art-boot-image
+art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME)
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c747ffa..8bb462c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -576,7 +576,16 @@
}
} else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
bin = kBinString; // Strings are almost always immutable (except for object header).
- } // else bin = kBinRegular
+ } else if (object->GetClass<kVerifyNone>() ==
+ Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangObject)) {
+ // Instance of java lang object, probably a lock object. This means it will be dirty when we
+ // synchronize on it.
+ bin = kBinMiscDirty;
+ } else if (object->IsDexCache()) {
+ // Dex file field becomes dirty when the image is loaded.
+ bin = kBinMiscDirty;
+ }
+ // else bin = kBinRegular
}
size_t oat_index = GetOatIndex(object);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index f204b28..0cb6aea 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -149,16 +149,17 @@
void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
// Classify different kinds of bins that objects end up getting packed into during image writing.
+ // Ordered from dirtiest to cleanest (until ArtMethods).
enum Bin {
- // Likely-clean:
- kBinString, // [String] Almost always immutable (except for obj header).
+ kBinMiscDirty, // Dex caches, object locks, etc...
+ kBinClassVerified, // Class verified, but initializers haven't been run
// Unknown mix of clean/dirty:
kBinRegular,
- // Likely-dirty:
+ kBinClassInitialized, // Class initializers have been run
// All classes get their own bins since their fields often dirty
kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
- kBinClassInitialized, // Class initializers have been run
- kBinClassVerified, // Class verified, but initializers haven't been run
+ // Likely-clean:
+ kBinString, // [String] Almost always immutable (except for obj header).
// Add more bins here if we add more segregation code.
// Non mirror fields must be below.
// ArtFields should be always clean.
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 3da8285..863dd1c 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -124,11 +124,12 @@
void Set##name() { SetBit(k##name); } \
bool Get##name() const { return IsBitSet(k##name); } \
private: \
-static constexpr int k##name = bit
+static constexpr size_t k##name = bit
class IntrinsicOptimizations : public ValueObject {
public:
- explicit IntrinsicOptimizations(HInvoke* invoke) : value_(invoke->GetIntrinsicOptimizations()) {}
+ explicit IntrinsicOptimizations(HInvoke* invoke)
+ : value_(invoke->GetIntrinsicOptimizations()) {}
explicit IntrinsicOptimizations(const HInvoke& invoke)
: value_(invoke.GetIntrinsicOptimizations()) {}
@@ -138,15 +139,17 @@
protected:
bool IsBitSet(uint32_t bit) const {
+ DCHECK_LT(bit, sizeof(uint32_t) * kBitsPerByte);
return (*value_ & (1 << bit)) != 0u;
}
void SetBit(uint32_t bit) {
- *(const_cast<uint32_t*>(value_)) |= (1 << bit);
+ DCHECK_LT(bit, sizeof(uint32_t) * kBitsPerByte);
+ *(const_cast<uint32_t* const>(value_)) |= (1 << bit);
}
private:
- const uint32_t *value_;
+ const uint32_t* const value_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicOptimizations);
};
@@ -158,7 +161,7 @@
void Set##name() { SetBit(k##name); } \
bool Get##name() const { return IsBitSet(k##name); } \
private: \
-static constexpr int k##name = bit + kNumberOfGenericOptimizations
+static constexpr size_t k##name = bit + kNumberOfGenericOptimizations
class StringEqualsOptimizations : public IntrinsicOptimizations {
public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 4b94c94..86b7bc1 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1394,15 +1394,13 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
codegen_->AddSlowPath(slow_path);
- Label ok;
+ Label conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
- if (!optimizations.GetDestinationIsSource()) {
- if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
- __ cmp(src, ShifterOperand(dest));
- }
+ if (!optimizations.GetDestinationIsSource() &&
+ (!src_pos.IsConstant() || !dest_pos.IsConstant())) {
+ __ cmp(src, ShifterOperand(dest));
}
-
// If source and destination are the same, we go to slow path if we need to do
// forward copying.
if (src_pos.IsConstant()) {
@@ -1413,14 +1411,14 @@
|| (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
} else {
if (!optimizations.GetDestinationIsSource()) {
- __ b(&ok, NE);
+ __ b(&conditions_on_positions_validated, NE);
}
__ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
__ b(slow_path->GetEntryLabel(), GT);
}
} else {
if (!optimizations.GetDestinationIsSource()) {
- __ b(&ok, NE);
+ __ b(&conditions_on_positions_validated, NE);
}
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
@@ -1431,7 +1429,7 @@
__ b(slow_path->GetEntryLabel(), LT);
}
- __ Bind(&ok);
+ __ Bind(&conditions_on_positions_validated);
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
@@ -1482,7 +1480,7 @@
bool did_unpoison = false;
if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
!optimizations.GetSourceIsNonPrimitiveArray()) {
- // One or two of the references need to be unpoisoned. Unpoisoned them
+ // One or two of the references need to be unpoisoned. Unpoison them
// both to make the identity check valid.
__ MaybeUnpoisonHeapReference(temp1);
__ MaybeUnpoisonHeapReference(temp2);
@@ -1491,6 +1489,7 @@
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
__ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
__ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
__ MaybeUnpoisonHeapReference(temp3);
@@ -1501,7 +1500,7 @@
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// Bail out if the source is not a non primitive array.
- // Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ temp3 = temp2->component_type_
__ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
__ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
__ MaybeUnpoisonHeapReference(temp3);
@@ -1518,8 +1517,10 @@
if (!did_unpoison) {
__ MaybeUnpoisonHeapReference(temp1);
}
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
__ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
__ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
__ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
// No need to unpoison the result, we're comparing against null.
__ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
@@ -1530,8 +1531,10 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = src->klass_
__ LoadFromOffset(kLoadWord, temp1, src, class_offset);
__ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp3 = temp1->component_type_
__ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
__ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
__ MaybeUnpoisonHeapReference(temp3);
@@ -1585,7 +1588,7 @@
temp2,
dest,
Register(kNoRegister),
- /* can_be_null */ false);
+ /* value_can_be_null */ false);
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 927e2ec..04ae3a6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1819,39 +1819,32 @@
const Register& dst_base,
const Register& src_end) {
DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar)
- << "Unexpected element type: "
- << type;
- const int32_t char_size = Primitive::ComponentSize(type);
- const int32_t char_size_shift = Primitive::ComponentSizeShift(type);
+ << "Unexpected element type: " << type;
+ const int32_t element_size = Primitive::ComponentSize(type);
+ const int32_t element_size_shift = Primitive::ComponentSizeShift(type);
- uint32_t offset = mirror::Array::DataOffset(char_size).Uint32Value();
+ uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
if (src_pos.IsConstant()) {
int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
- __ Add(src_base, src, char_size * constant + offset);
+ __ Add(src_base, src, element_size * constant + data_offset);
} else {
- __ Add(src_base, src, offset);
- __ Add(src_base,
- src_base,
- Operand(XRegisterFrom(src_pos), LSL, char_size_shift));
+ __ Add(src_base, src, data_offset);
+ __ Add(src_base, src_base, Operand(XRegisterFrom(src_pos), LSL, element_size_shift));
}
if (dst_pos.IsConstant()) {
int32_t constant = dst_pos.GetConstant()->AsIntConstant()->GetValue();
- __ Add(dst_base, dst, char_size * constant + offset);
+ __ Add(dst_base, dst, element_size * constant + data_offset);
} else {
- __ Add(dst_base, dst, offset);
- __ Add(dst_base,
- dst_base,
- Operand(XRegisterFrom(dst_pos), LSL, char_size_shift));
+ __ Add(dst_base, dst, data_offset);
+ __ Add(dst_base, dst_base, Operand(XRegisterFrom(dst_pos), LSL, element_size_shift));
}
if (copy_length.IsConstant()) {
int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue();
- __ Add(src_end, src_base, char_size * constant);
+ __ Add(src_end, src_base, element_size * constant);
} else {
- __ Add(src_end,
- src_base,
- Operand(XRegisterFrom(copy_length), LSL, char_size_shift));
+ __ Add(src_end, src_base, Operand(XRegisterFrom(copy_length), LSL, element_size_shift));
}
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 9ca4ef0..1d32dc7 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1150,15 +1150,13 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
- NearLabel ok;
+ NearLabel conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
- if (!optimizations.GetDestinationIsSource()) {
- if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
- __ cmpl(src, dest);
- }
+ if (!optimizations.GetDestinationIsSource() &&
+ (!src_pos.IsConstant() || !dest_pos.IsConstant())) {
+ __ cmpl(src, dest);
}
-
// If source and destination are the same, we go to slow path if we need to do
// forward copying.
if (src_pos.IsConstant()) {
@@ -1169,14 +1167,14 @@
|| (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
} else {
if (!optimizations.GetDestinationIsSource()) {
- __ j(kNotEqual, &ok);
+ __ j(kNotEqual, &conditions_on_positions_validated);
}
__ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant));
__ j(kGreater, slow_path->GetEntryLabel());
}
} else {
if (!optimizations.GetDestinationIsSource()) {
- __ j(kNotEqual, &ok);
+ __ j(kNotEqual, &conditions_on_positions_validated);
}
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
@@ -1188,7 +1186,7 @@
}
}
- __ Bind(&ok);
+ __ Bind(&conditions_on_positions_validated);
if (!optimizations.GetSourceIsNotNull()) {
// Bail out if the source is null.
@@ -1241,7 +1239,7 @@
bool did_unpoison = false;
if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
!optimizations.GetSourceIsNonPrimitiveArray()) {
- // One or two of the references need to be unpoisoned. Unpoisoned them
+ // One or two of the references need to be unpoisoned. Unpoison them
// both to make the identity check valid.
__ MaybeUnpoisonHeapReference(temp1);
__ MaybeUnpoisonHeapReference(temp2);
@@ -1250,6 +1248,7 @@
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
__ movl(CpuRegister(TMP), Address(temp1, component_offset));
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1260,6 +1259,7 @@
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ TMP = temp2->component_type_
__ movl(CpuRegister(TMP), Address(temp2, component_offset));
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1276,8 +1276,10 @@
if (!did_unpoison) {
__ MaybeUnpoisonHeapReference(temp1);
}
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
__ movl(temp1, Address(temp1, component_offset));
__ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
__ movl(temp1, Address(temp1, super_offset));
// No need to unpoison the result, we're comparing against null.
__ testl(temp1, temp1);
@@ -1289,8 +1291,10 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
+ // /* HeapReference<Class> */ temp1 = src->klass_
__ movl(temp1, Address(src, class_offset));
__ MaybeUnpoisonHeapReference(temp1);
+ // /* HeapReference<Class> */ TMP = temp1->component_type_
__ movl(CpuRegister(TMP), Address(temp1, component_offset));
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1086cbf..1afa36a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -535,11 +535,16 @@
}
}
-void HLoopInformation::PopulateIrreducibleRecursive(HBasicBlock* block) {
- if (blocks_.IsBitSet(block->GetBlockId())) {
+void HLoopInformation::PopulateIrreducibleRecursive(HBasicBlock* block, ArenaBitVector* finalized) {
+ size_t block_id = block->GetBlockId();
+
+ // If `block` is in `finalized`, we know its membership in the loop has been
+ // decided and it does not need to be revisited.
+ if (finalized->IsBitSet(block_id)) {
return;
}
+ bool is_finalized = false;
if (block->IsLoopHeader()) {
// If we hit a loop header in an irreducible loop, we first check if the
// pre header of that loop belongs to the currently analyzed loop. If it does,
@@ -547,26 +552,36 @@
// Note that we cannot use GetPreHeader, as the loop may have not been populated
// yet.
HBasicBlock* pre_header = block->GetPredecessors()[0];
- PopulateIrreducibleRecursive(pre_header);
+ PopulateIrreducibleRecursive(pre_header, finalized);
if (blocks_.IsBitSet(pre_header->GetBlockId())) {
- blocks_.SetBit(block->GetBlockId());
block->SetInLoop(this);
+ blocks_.SetBit(block_id);
+ finalized->SetBit(block_id);
+ is_finalized = true;
+
HLoopInformation* info = block->GetLoopInformation();
for (HBasicBlock* back_edge : info->GetBackEdges()) {
- PopulateIrreducibleRecursive(back_edge);
+ PopulateIrreducibleRecursive(back_edge, finalized);
}
}
} else {
// Visit all predecessors. If one predecessor is part of the loop, this
// block is also part of this loop.
for (HBasicBlock* predecessor : block->GetPredecessors()) {
- PopulateIrreducibleRecursive(predecessor);
- if (blocks_.IsBitSet(predecessor->GetBlockId())) {
- blocks_.SetBit(block->GetBlockId());
+ PopulateIrreducibleRecursive(predecessor, finalized);
+ if (!is_finalized && blocks_.IsBitSet(predecessor->GetBlockId())) {
block->SetInLoop(this);
+ blocks_.SetBit(block_id);
+ finalized->SetBit(block_id);
+ is_finalized = true;
}
}
}
+
+ // All predecessors have been recursively visited. Mark finalized if not marked yet.
+ if (!is_finalized) {
+ finalized->SetBit(block_id);
+ }
}
void HLoopInformation::Populate() {
@@ -576,22 +591,37 @@
// to end the recursion.
// This is a recursive implementation of the algorithm described in
// "Advanced Compiler Design & Implementation" (Muchnick) p192.
+ HGraph* graph = header_->GetGraph();
blocks_.SetBit(header_->GetBlockId());
header_->SetInLoop(this);
+
+ bool is_irreducible_loop = false;
for (HBasicBlock* back_edge : GetBackEdges()) {
DCHECK(back_edge->GetDominator() != nullptr);
if (!header_->Dominates(back_edge)) {
- irreducible_ = true;
- header_->GetGraph()->SetHasIrreducibleLoops(true);
- PopulateIrreducibleRecursive(back_edge);
- } else {
- if (header_->GetGraph()->IsCompilingOsr()) {
- irreducible_ = true;
- header_->GetGraph()->SetHasIrreducibleLoops(true);
- }
+ is_irreducible_loop = true;
+ break;
+ }
+ }
+
+ if (is_irreducible_loop) {
+ ArenaBitVector visited(graph->GetArena(),
+ graph->GetBlocks().size(),
+ /* expandable */ false,
+ kArenaAllocGraphBuilder);
+ for (HBasicBlock* back_edge : GetBackEdges()) {
+ PopulateIrreducibleRecursive(back_edge, &visited);
+ }
+ } else {
+ for (HBasicBlock* back_edge : GetBackEdges()) {
PopulateRecursive(back_edge);
}
}
+
+ if (is_irreducible_loop || graph->IsCompilingOsr()) {
+ irreducible_ = true;
+ graph->SetHasIrreducibleLoops(true);
+ }
}
HBasicBlock* HLoopInformation::GetPreHeader() const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 0088fed..dc5a8fa 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -727,7 +727,7 @@
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
- void PopulateIrreducibleRecursive(HBasicBlock* block);
+ void PopulateIrreducibleRecursive(HBasicBlock* block, ArenaBitVector* finalized);
HBasicBlock* header_;
HSuspendCheck* suspend_check_;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index c2a812e..cbd0c40 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -416,7 +416,8 @@
// Look up local classes by their descriptor
std::map<std::string, mirror::Class*> local_class_map;
- std::unordered_set<mirror::Object*> dirty_objects;
+ // Use set to have sorted output.
+ std::set<mirror::Object*> dirty_objects;
size_t dirty_object_bytes = 0;
const uint8_t* begin_image_ptr = image_begin_unaligned;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 506316e..1cdda2d 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1638,23 +1638,17 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
- // Fast path tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
-#if defined(USE_READ_BARRIER)
- mvn x0, xzr // Read barrier not supported here.
- ret // Return -1.
-#endif
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz x2, .Lart_quick_alloc_object_tlab_slow_path // Check null class
+// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+//
+// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
+// x3-x7: free.
+// Need to preserve x0 and x1 to the slow path.
+.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
+ cbz x2, \slowPathLabel // Check null class
// Check class status.
ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne .Lart_quick_alloc_object_tlab_slow_path
+ bne \slowPathLabel
// Add a fake dependence from the
// following access flag and size
// loads to the status load.
@@ -1668,7 +1662,7 @@
// Check access flags has
// kAccClassIsFinalizable.
ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tbnz x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, .Lart_quick_alloc_object_tlab_slow_path
+ tbnz x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
// Load thread_local_pos (x4) and
// thread_local_end (x5).
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
@@ -1678,7 +1672,7 @@
cmp x7, x6 // Check if it fits. OK to do this
// before rounding up the object size
// assuming the buf size alignment.
- bhi .Lart_quick_alloc_object_tlab_slow_path
+ bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
// Round up the object size by the
// object alignment. (addr + 7) & ~7.
@@ -1703,6 +1697,21 @@
// class status load.)
dmb ish
ret
+.endm
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_tlab
+ // Fast path tlab allocation.
+ // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // x2-x7: free.
+#if defined(USE_READ_BARRIER)
+ mvn x0, xzr // Read barrier not supported here.
+ ret // Return -1.
+#endif
+ ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
+ // Load the class (x2)
+ ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
@@ -1711,7 +1720,42 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+ENTRY art_quick_alloc_object_region_tlab
+ // Fast path region tlab allocation.
+ // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // x2-x7: free.
+#if !defined(USE_READ_BARRIER)
+ mvn x0, xzr // Read barrier must be enabled here.
+ ret // Return -1.
+#endif
+ ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
+ // Load the class (x2)
+ ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ // Read barrier for class load.
+ ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
+ ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
+.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
+ // The read barrier slow path. Mark
+ // the class.
+ stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
+ str xLR, [sp, #16] // Align sp by 16 bytes.
+ mov x0, x2 // Pass the class as the first param.
+ bl artReadBarrierMark
+ mov x2, x0 // Get the (marked) class back.
+ ldp x0, x1, [sp, #0] // Restore registers.
+ ldr xLR, [sp, #16]
+ add sp, sp, #32
+ b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+.Lart_quick_alloc_object_region_tlab_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
+ mov x2, xSELF // Pass Thread::Current.
+ bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_region_tlab
/*
* Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 18def2d..1e7ee65 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7760,12 +7760,11 @@
void ClassLinker::InsertDexFileInToClassLoader(mirror::Object* dex_file,
mirror::ClassLoader* class_loader) {
DCHECK(dex_file != nullptr);
- DCHECK(class_loader != nullptr);
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const table = class_loader->GetClassTable();
+ ClassTable* const table = ClassTableForClassLoader(class_loader);
DCHECK(table != nullptr);
- if (table->InsertDexFile(dex_file)) {
+ if (table->InsertDexFile(dex_file) && class_loader != nullptr) {
// It was not already inserted, perform the write barrier to let the GC know the class loader's
// class table was modified.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b4b7f34..5de502b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -595,6 +595,7 @@
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // May be called with null class_loader due to legacy code. b/27954959
void InsertDexFileInToClassLoader(mirror::Object* dex_file, mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index fc186b1..d386c74 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -703,6 +703,11 @@
return src;
}
+ // Must be called on pointers that already have been relocated to the destination relocation.
+ ALWAYS_INLINE bool IsInAppImage(mirror::Object* object) const {
+ return app_image_.InDest(reinterpret_cast<uintptr_t>(object));
+ }
+
protected:
// Source section.
const RelocationRange boot_image_;
@@ -717,36 +722,12 @@
template<typename... Args>
explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
- // Must be called on pointers that already have been relocated to the destination relocation.
- ALWAYS_INLINE bool IsInAppImage(mirror::Object* object) const {
- return app_image_.InDest(reinterpret_cast<uintptr_t>(object));
- }
-
template <typename T>
T* operator()(T* obj) const {
return ForwardObject(obj);
}
};
-class FixupClassVisitor : public FixupVisitor {
- public:
- template<typename... Args>
- explicit FixupClassVisitor(Args... args) : FixupVisitor(args...) {}
-
- // The image space is contained so the GC doesn't need to know about it. Avoid requiring mutator
- // lock to prevent possible pauses.
- ALWAYS_INLINE void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
- DCHECK(klass != nullptr) << "Null class in image";
- // No AsClass since our fields aren't quite fixed up yet.
- mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
- // Keep clean if possible.
- if (klass != new_klass) {
- obj->SetClass<kVerifyNone>(new_klass);
- }
- }
-};
-
class FixupRootVisitor : public FixupVisitor {
public:
template<typename... Args>
@@ -772,12 +753,12 @@
class FixupObjectVisitor : public FixupVisitor {
public:
template<typename... Args>
- explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* pointer_array_visited,
+ explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
const size_t pointer_size,
Args... args)
: FixupVisitor(args...),
pointer_size_(pointer_size),
- pointer_array_visited_(pointer_array_visited) {}
+ visited_(visited) {}
// Fix up separately since we also need to fix up method entrypoints.
ALWAYS_INLINE void VisitRootIfNonNull(
@@ -805,13 +786,20 @@
// Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
// boot image. Uses the bitmap to ensure the same array is not visited multiple times.
template <typename Visitor>
- void VisitPointerArray(mirror::PointerArray* array, const Visitor& visitor) const
+ void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const
NO_THREAD_SAFETY_ANALYSIS {
- if (array != nullptr &&
- visitor.IsInAppImage(array) &&
- !pointer_array_visited_->Test(array)) {
+ DCHECK(array != nullptr);
+ DCHECK(visitor.IsInAppImage(array));
+ // The bit for the array contents is different than the bit for the array. Since we may have
+ // already visited the array as a long / int array from walking the bitmap without knowing it
+ // was a pointer array.
+ static_assert(kObjectAlignment == 8u, "array bit may be in another object");
+ mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
+ // If the bit is not set then the contents have not yet been updated.
+ if (!visited_->Test(contents_bit)) {
array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, pointer_size_, visitor);
- pointer_array_visited_->Set(array);
+ visited_->Set(contents_bit);
}
}
@@ -824,26 +812,61 @@
ForwardObject(obj));
}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ if (visited_->Test(obj)) {
+ // Already visited.
+ return;
+ }
+ visited_->Set(obj);
+
+ // Handle class specially first since we need it to be updated to properly visit the rest of
+ // the instance fields.
+ {
+ mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ DCHECK(klass != nullptr) << "Null class in image";
+ // No AsClass since our fields aren't quite fixed up yet.
+ mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
+ if (klass != new_klass) {
+ obj->SetClass<kVerifyNone>(new_klass);
+ }
+ if (new_klass != klass && IsInAppImage(new_klass)) {
+ // Make sure the klass contents are fixed up since we depend on it to walk the fields.
+ operator()(new_klass);
+ }
+ }
+
obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
*this,
*this);
+ // Note that this code relies on no circular dependencies.
// We want to use our own class loader and not the one in the image.
if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
- mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Class* as_klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
- klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(klass, pointer_size_, visitor);
+ as_klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(as_klass,
+ pointer_size_,
+ visitor);
// Deal with the pointer arrays. Use the helper function since multiple classes can reference
// the same arrays.
- VisitPointerArray(klass->GetVTable<kVerifyNone, kWithoutReadBarrier>(), visitor);
- mirror::IfTable* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
- if (iftable != nullptr) {
+ mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+ if (vtable != nullptr && IsInAppImage(vtable)) {
+ operator()(vtable);
+ UpdatePointerArrayContents(vtable, visitor);
+ }
+ mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+ // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
+ // contents.
+ if (iftable != nullptr && IsInAppImage(iftable)) {
+ operator()(iftable);
for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
mirror::PointerArray* methods =
iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
- DCHECK(methods != nullptr);
- VisitPointerArray(methods, visitor);
+ if (visitor.IsInAppImage(methods)) {
+ operator()(methods);
+ DCHECK(methods != nullptr);
+ UpdatePointerArrayContents(methods, visitor);
+ }
}
}
}
@@ -852,7 +875,7 @@
private:
const size_t pointer_size_;
- gc::accounting::ContinuousSpaceBitmap* const pointer_array_visited_;
+ gc::accounting::ContinuousSpaceBitmap* const visited_;
};
class ForwardObjectAdapter {
@@ -999,7 +1022,7 @@
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> visited_bitmap(
- gc::accounting::ContinuousSpaceBitmap::Create("Pointer array bitmap",
+ gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
target_base,
image_header.GetImageSize()));
FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
@@ -1009,10 +1032,6 @@
app_image,
app_oat);
TimingLogger::ScopedTiming timing("Fixup classes", &logger);
- // Fixup class only touches app image classes, don't need the mutator lock since the space is
- // not yet visible to the GC.
- FixupClassVisitor fixup_class_visitor(boot_image, boot_oat, app_image, app_oat);
- bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_class_visitor);
// Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
// its probably not required.
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 4615ec9..02e05c5 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -526,17 +526,12 @@
void UnstartedRuntime::UnstartedMathCeil(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- double in = shadow_frame->GetVRegDouble(arg_offset);
- double out;
- // Special cases:
- // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
- // -1 < in < 0 -> out := -0.
- if (-1.0 < in && in < 0) {
- out = -0.0;
- } else {
- out = ceil(in);
- }
- result->SetD(out);
+ result->SetD(ceil(shadow_frame->GetVRegDouble(arg_offset)));
+}
+
+void UnstartedRuntime::UnstartedMathFloor(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ result->SetD(floor(shadow_frame->GetVRegDouble(arg_offset)));
}
void UnstartedRuntime::UnstartedObjectHashCode(
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index a3ed558..3312701 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -36,6 +36,7 @@
V(SystemGetSecurityManager, "java.lang.SecurityManager java.lang.System.getSecurityManager()") \
V(ThreadLocalGet, "java.lang.Object java.lang.ThreadLocal.get()") \
V(MathCeil, "double java.lang.Math.ceil(double)") \
+ V(MathFloor, "double java.lang.Math.floor(double)") \
V(ObjectHashCode, "int java.lang.Object.hashCode()") \
V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \
V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index f40e4e3..1b5b665 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -16,6 +16,9 @@
#include "unstarted_runtime.h"
+#include <limits>
+
+#include "base/casts.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_instruction.h"
@@ -154,6 +157,31 @@
length);
CheckObjectArray(dst_handle.Get(), expected_result);
}
+
+ void TestCeilFloor(bool ceil,
+ Thread* self,
+ ShadowFrame* tmp,
+ double const test_pairs[][2],
+ size_t num_pairs)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ for (size_t i = 0; i < num_pairs; ++i) {
+ tmp->SetVRegDouble(0, test_pairs[i][0]);
+
+ JValue result;
+ if (ceil) {
+ UnstartedMathCeil(self, tmp, &result, 0);
+ } else {
+ UnstartedMathFloor(self, tmp, &result, 0);
+ }
+
+ ASSERT_FALSE(self->IsExceptionPending());
+
+ // We want precise results.
+ int64_t result_int64t = bit_cast<int64_t, double>(result.GetD());
+ int64_t expect_int64t = bit_cast<int64_t, double>(test_pairs[i][1]);
+ EXPECT_EQ(expect_int64t, result_int64t) << result.GetD() << " vs " << test_pairs[i][1];
+ }
+ }
};
TEST_F(UnstartedRuntimeTest, MemoryPeekByte) {
@@ -603,5 +631,63 @@
ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
+TEST_F(UnstartedRuntimeTest, Ceil) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ constexpr double nan = std::numeric_limits<double>::quiet_NaN();
+ constexpr double inf = std::numeric_limits<double>::infinity();
+ constexpr double ld1 = static_cast<double>((UINT64_C(1) << 53) - 1);
+ constexpr double ld2 = static_cast<double>(UINT64_C(1) << 55);
+ constexpr double test_pairs[][2] = {
+ { -0.0, -0.0 },
+ { 0.0, 0.0 },
+ { -0.5, -0.0 },
+ { -1.0, -1.0 },
+ { 0.5, 1.0 },
+ { 1.0, 1.0 },
+ { nan, nan },
+ { inf, inf },
+ { -inf, -inf },
+ { ld1, ld1 },
+ { ld2, ld2 }
+ };
+
+ TestCeilFloor(true /* ceil */, self, tmp, test_pairs, arraysize(test_pairs));
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, Floor) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ constexpr double nan = std::numeric_limits<double>::quiet_NaN();
+ constexpr double inf = std::numeric_limits<double>::infinity();
+ constexpr double ld1 = static_cast<double>((UINT64_C(1) << 53) - 1);
+ constexpr double ld2 = static_cast<double>(UINT64_C(1) << 55);
+ constexpr double test_pairs[][2] = {
+ { -0.0, -0.0 },
+ { 0.0, 0.0 },
+ { -0.5, -1.0 },
+ { -1.0, -1.0 },
+ { 0.5, 0.0 },
+ { 1.0, 1.0 },
+ { nan, nan },
+ { inf, inf },
+ { -inf, -inf },
+ { ld1, ld1 },
+ { ld2, ld2 }
+ };
+
+ TestCeilFloor(false /* floor */, self, tmp, test_pairs, arraysize(test_pairs));
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 887eee0..1aa789f 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -209,9 +209,21 @@
}
}
+static void ZygoteHooks_startZygoteNoThreadCreation(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ Runtime::Current()->SetZygoteNoThreadSection(true);
+}
+
+static void ZygoteHooks_stopZygoteNoThreadCreation(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ Runtime::Current()->SetZygoteNoThreadSection(false);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZLjava/lang/String;)V"),
+ NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
+ NATIVE_METHOD(ZygoteHooks, stopZygoteNoThreadCreation, "()V"),
};
void register_dalvik_system_ZygoteHooks(JNIEnv* env) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 13edd67..a742e81 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -47,6 +47,15 @@
static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
jboolean daemon) {
+ // There are sections in the zygote that forbid thread creation.
+ Runtime* runtime = Runtime::Current();
+ if (runtime->IsZygote() && runtime->IsZygoteNoThreadSection()) {
+ jclass internal_error = env->FindClass("java/lang/InternalError");
+ CHECK(internal_error != nullptr);
+ env->ThrowNew(internal_error, "Cannot create threads in zygote");
+ return;
+ }
+
Thread::CreateNativeThread(env, java_thread, stack_size, daemon == JNI_TRUE);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d3454e8..0a65b6b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -204,6 +204,7 @@
implicit_so_checks_(false),
implicit_suspend_checks_(false),
no_sig_chain_(false),
+ force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
zygote_max_failed_boots_(0),
@@ -211,9 +212,11 @@
oat_file_manager_(nullptr),
is_low_memory_mode_(false),
safe_mode_(false),
+ dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
- process_state_(kProcessStateJankPerceptible) {
+ process_state_(kProcessStateJankPerceptible),
+ zygote_no_threads_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
interpreter::CheckInterpreterAsmConstants();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6a6fdb7..ae25dd1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -635,6 +635,14 @@
return process_state_ == kProcessStateJankPerceptible;
}
+ void SetZygoteNoThreadSection(bool val) {
+ zygote_no_threads_ = val;
+ }
+
+ bool IsZygoteNoThreadSection() const {
+ return zygote_no_threads_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -856,6 +864,9 @@
// Whether or not we currently care about pause times.
ProcessState process_state_;
+ // Whether zygote code is in a section that should not start threads.
+ bool zygote_no_threads_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/test/031-class-attributes/expected.txt b/test/031-class-attributes/expected.txt
index 72656ae..de99872 100644
--- a/test/031-class-attributes/expected.txt
+++ b/test/031-class-attributes/expected.txt
@@ -84,7 +84,7 @@
enclosingCon: null
enclosingMeth: null
modifiers: 1
- package: package otherpackage, Unknown, version 0.0
+ package: package otherpackage
declaredClasses: [0]
member classes: [0]
isAnnotation: false
diff --git a/test/068-classloader/expected.txt b/test/068-classloader/expected.txt
index 8725799..ae937e0 100644
--- a/test/068-classloader/expected.txt
+++ b/test/068-classloader/expected.txt
@@ -13,3 +13,4 @@
Got LinkageError on IDI (early)
class Main
Got expected ClassNotFoundException
+Loaded class into null class loader
diff --git a/test/068-classloader/src/Main.java b/test/068-classloader/src/Main.java
index 361e293..b2d843b 100644
--- a/test/068-classloader/src/Main.java
+++ b/test/068-classloader/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
/**
* Class loader test.
*/
@@ -62,6 +65,28 @@
testSeparation();
testClassForName();
+
+ testNullClassLoader();
+ }
+
+ static void testNullClassLoader() {
+ try {
+ /* this is the "alternate" DEX/Jar file */
+ String DEX_FILE = System.getenv("DEX_LOCATION") + "/068-classloader-ex.jar";
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ Class mDexClass = Class.forName("dalvik.system.DexFile");
+ Constructor ctor = mDexClass.getConstructor(new Class[] {String.class});
+ Object mDexFile = ctor.newInstance(DEX_FILE);
+ Method meth = mDexClass.getMethod("loadClass",
+ new Class[] { String.class, ClassLoader.class });
+ Object klass = meth.invoke(mDexFile, "Mutator", null);
+ if (klass == null) {
+ throw new AssertionError("loadClass with nullclass loader failed");
+ }
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ System.out.println("Loaded class into null class loader");
}
static void testSeparation() {
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 278f540..28a99de 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -381,7 +381,8 @@
dex2oat_cmdline="true"
mkdir_cmdline="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA"
-app_image="--app-image-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.art"
+# Pick a base that will force the app image to get relocated.
+app_image="--base=0x4000 --app-image-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.art"
if [ "$PREBUILD" = "y" ]; then
mkdir_cmdline="${mkdir_cmdline} && mkdir -p ${DEX_LOCATION}/oat/$ISA"