Revert "Hash-based DexCache field array."
Reverting to allow rebasing the revert
https://android-review.googlesource.com/351689
without too many conflicts.
Bug: 30627598
This reverts commit 1aea3510b8dd0c512cec61c91c5ef1f1e5d53d64.
Change-Id: I4af65e9f41c8bad8106c028947eca7c5a9534c53
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 35aa1ee..562f97b 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -133,10 +133,9 @@
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
<< dex.GetMethodName(dex.GetMethodId(i));
}
- EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields()
- || dex.NumFieldIds() == dex_cache->NumResolvedFields());
+ EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- ArtField* field = dex_cache->GetResolvedField(i, cl->GetImagePointerSize());
+ ArtField* field = cl->GetResolvedField(i, dex_cache);
EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a4a1fd3..65d82ed 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -968,12 +968,11 @@
<< Class::PrettyClass(declaring_class) << " not in class linker table";
}
}
- mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
+ ArtField** resolved_fields = dex_cache->GetResolvedFields();
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, i, target_ptr_size_);
- ArtField* field = pair.object;
+ ArtField* field = mirror::DexCache::GetElementPtrSize(resolved_fields, i, target_ptr_size_);
if (field != nullptr && !KeepClass(field->GetDeclaringClass().Ptr())) {
- dex_cache->ClearResolvedField(pair.index, target_ptr_size_);
+ dex_cache->SetResolvedField(i, nullptr, target_ptr_size_);
}
}
// Clean the dex field. It might have been populated during the initialization phase, but
@@ -1597,7 +1596,7 @@
break;
}
case kBinDexCacheArray:
- bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment(target_ptr_size_));
+ bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment());
break;
case kBinImTable:
case kBinIMTConflictTable: {
@@ -2237,17 +2236,16 @@
mirror::DexCache::SetElementPtrSize(copy_methods, i, copy, target_ptr_size_);
}
}
- mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
+ ArtField** orig_fields = orig_dex_cache->GetResolvedFields();
if (orig_fields != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
NativeLocationInImage(orig_fields),
PointerSize::k64);
- mirror::FieldDexCacheType* copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
+ ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(orig_fields, i, target_ptr_size_);
- mirror::FieldDexCachePair copy(NativeLocationInImage(orig.object), orig.index);
- mirror::DexCache::SetNativePairPtrSize(copy_fields, i, copy, target_ptr_size_);
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
+ ArtField* copy = NativeLocationInImage(orig);
+ mirror::DexCache::SetElementPtrSize(copy_fields, i, copy, target_ptr_size_);
}
}
mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 583008b..664b95a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1209,8 +1209,9 @@
// TODO: Needs null check.
return false;
}
+ Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
- HInstanceFieldGet* iget = CreateInstanceFieldGet(data.field_idx, resolved_method, obj);
+ HInstanceFieldGet* iget = CreateInstanceFieldGet(dex_cache, data.field_idx, obj);
DCHECK_EQ(iget->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iget->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iget, invoke_instruction);
@@ -1223,9 +1224,10 @@
// TODO: Needs null check.
return false;
}
+ Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, data.src_arg);
- HInstanceFieldSet* iput = CreateInstanceFieldSet(data.field_idx, resolved_method, obj, value);
+ HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, data.field_idx, obj, value);
DCHECK_EQ(iput->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iput->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
@@ -1259,19 +1261,24 @@
[](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
// Create HInstanceFieldSet for each IPUT that stores non-zero data.
+ Handle<mirror::DexCache> dex_cache;
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
bool needs_constructor_barrier = false;
for (size_t i = 0; i != number_of_iputs; ++i) {
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
if (!value->IsConstant() || !value->AsConstant()->IsZeroBitPattern()) {
+ if (dex_cache.GetReference() == nullptr) {
+ dex_cache = handles_->NewHandle(resolved_method->GetDexCache());
+ }
uint16_t field_index = iput_field_indexes[i];
- bool is_final;
- HInstanceFieldSet* iput =
- CreateInstanceFieldSet(field_index, resolved_method, obj, value, &is_final);
+ HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, field_index, obj, value);
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
// Check whether the field is final. If it is, we need to add a barrier.
- if (is_final) {
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
+ DCHECK(resolved_field != nullptr);
+ if (resolved_field->IsFinal()) {
needs_constructor_barrier = true;
}
}
@@ -1290,13 +1297,12 @@
return true;
}
-HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
- ArtMethod* referrer,
+HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
+ uint32_t field_index,
HInstruction* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
obj,
@@ -1306,13 +1312,12 @@
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
- *referrer->GetDexFile(),
+ *dex_cache->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
- Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
ReferenceTypePropagation rtp(graph_,
outer_compilation_unit_.GetClassLoader(),
dex_cache,
@@ -1323,21 +1328,14 @@
return iget;
}
-HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
- ArtMethod* referrer,
+HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
+ uint32_t field_index,
HInstruction* obj,
- HInstruction* value,
- bool* is_final)
+ HInstruction* value)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
- if (is_final != nullptr) {
- // This information is needed only for constructors.
- DCHECK(referrer->IsConstructor());
- *is_final = resolved_field->IsFinal();
- }
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
obj,
value,
@@ -1347,7 +1345,7 @@
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
- *referrer->GetDexFile(),
+ *dex_cache->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a032042..8f8b268 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -107,15 +107,14 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Create a new HInstanceFieldGet.
- HInstanceFieldGet* CreateInstanceFieldGet(uint32_t field_index,
- ArtMethod* referrer,
+ HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
+ uint32_t field_index,
HInstruction* obj);
// Create a new HInstanceFieldSet.
- HInstanceFieldSet* CreateInstanceFieldSet(uint32_t field_index,
- ArtMethod* referrer,
+ HInstanceFieldSet* CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
+ uint32_t field_index,
HInstruction* obj,
- HInstruction* value,
- bool* is_final = nullptr);
+ HInstruction* value);
// Try inlining the invoke instruction using inline caches.
bool TryInlineFromInlineCache(
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e767023..becb827 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2210,13 +2210,13 @@
ScopedIndentation indent2(&state->vios_);
auto* resolved_fields = dex_cache->GetResolvedFields();
for (size_t i = 0, length = dex_cache->NumResolvedFields(); i < length; ++i) {
- auto* elem = mirror::DexCache::GetNativePairPtrSize(
- resolved_fields, i, image_pointer_size).object;
+ auto* elem = mirror::DexCache::GetElementPtrSize(
+ resolved_fields, i, image_pointer_size);
size_t run = 0;
for (size_t j = i + 1;
- j != length &&
- elem == mirror::DexCache::GetNativePairPtrSize(
- resolved_fields, j, image_pointer_size).object;
+ j != length && elem == mirror::DexCache::GetElementPtrSize(resolved_fields,
+ j,
+ image_pointer_size);
++j) {
++run;
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index dfaae7d..18a6670 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -534,18 +534,17 @@
mirror::DexCache::SetElementPtrSize(copy_methods, j, copy, pointer_size);
}
}
- mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
- mirror::FieldDexCacheType* relocated_fields = RelocatedAddressOfPointer(orig_fields);
+ ArtField** orig_fields = orig_dex_cache->GetResolvedFields();
+ ArtField** relocated_fields = RelocatedAddressOfPointer(orig_fields);
copy_dex_cache->SetField64<false>(
mirror::DexCache::ResolvedFieldsOffset(),
static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_fields)));
if (orig_fields != nullptr) {
- mirror::FieldDexCacheType* copy_fields = RelocatedCopyOf(orig_fields);
+ ArtField** copy_fields = RelocatedCopyOf(orig_fields);
for (size_t j = 0, num = orig_dex_cache->NumResolvedFields(); j != num; ++j) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(orig_fields, j, pointer_size);
- mirror::FieldDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
- mirror::DexCache::SetNativePairPtrSize(copy_fields, j, copy, pointer_size);
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, j, pointer_size);
+ ArtField* copy = RelocatedAddressOfPointer(orig);
+ mirror::DexCache::SetElementPtrSize(copy_fields, j, copy, pointer_size);
}
}
mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 5aede38..db43319 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -15,7 +15,6 @@
*/
#include <algorithm>
-#include <cstddef>
#include <iomanip>
#include <numeric>
@@ -28,7 +27,7 @@
namespace art {
-constexpr size_t kMemoryToolRedZoneBytes = 8;
+static constexpr size_t kMemoryToolRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
template <bool kCount>
@@ -169,75 +168,23 @@
Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
}
-class MallocArena FINAL : public Arena {
- public:
- explicit MallocArena(size_t size = Arena::kDefaultSize);
- virtual ~MallocArena();
- private:
- static constexpr size_t RequiredOverallocation() {
- return (alignof(std::max_align_t) < ArenaAllocator::kArenaAlignment)
- ? ArenaAllocator::kArenaAlignment - alignof(std::max_align_t)
- : 0u;
- }
-
- uint8_t* unaligned_memory_;
-};
-
MallocArena::MallocArena(size_t size) {
- // We need to guarantee kArenaAlignment aligned allocation for the new arena.
- // TODO: Use std::aligned_alloc() when it becomes available with C++17.
- constexpr size_t overallocation = RequiredOverallocation();
- unaligned_memory_ = reinterpret_cast<uint8_t*>(calloc(1, size + overallocation));
- CHECK(unaligned_memory_ != nullptr); // Abort on OOM.
- DCHECK_ALIGNED(unaligned_memory_, alignof(std::max_align_t));
- if (overallocation == 0u) {
- memory_ = unaligned_memory_;
- } else {
- memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
- size_t head = memory_ - unaligned_memory_;
- size_t tail = overallocation - head;
- MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
- MEMORY_TOOL_MAKE_NOACCESS(memory_ + size, tail);
- }
- }
- DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+ memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ CHECK(memory_ != nullptr); // Abort on OOM.
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kAlignment);
size_ = size;
}
MallocArena::~MallocArena() {
- constexpr size_t overallocation = RequiredOverallocation();
- if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
- size_t head = memory_ - unaligned_memory_;
- size_t tail = overallocation - head;
- MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
- MEMORY_TOOL_MAKE_UNDEFINED(memory_ + size_, tail);
- }
- free(reinterpret_cast<void*>(unaligned_memory_));
+ free(reinterpret_cast<void*>(memory_));
}
-class MemMapArena FINAL : public Arena {
- public:
- MemMapArena(size_t size, bool low_4gb, const char* name);
- virtual ~MemMapArena();
- void Release() OVERRIDE;
-
- private:
- std::unique_ptr<MemMap> map_;
-};
-
MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
- // Round up to a full page as that's the smallest unit of allocation for mmap()
- // and we want to be able to use all memory that we actually allocate.
- size = RoundUp(size, kPageSize);
std::string error_msg;
map_.reset(MemMap::MapAnonymous(
name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
CHECK(map_.get() != nullptr) << error_msg;
memory_ = map_->Begin();
- static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
- "Arena should not need stronger alignment than kPageSize.");
- DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
size_ = map_->Size();
}
@@ -385,7 +332,20 @@
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
uint8_t* ret;
if (UNLIKELY(rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
- ret = AllocFromNewArenaWithMemoryTool(rounded_bytes);
+ ret = AllocFromNewArena(rounded_bytes);
+ uint8_t* noaccess_begin = ret + bytes;
+ uint8_t* noaccess_end;
+ if (ret == arena_head_->Begin()) {
+ DCHECK(ptr_ - rounded_bytes == ret);
+ noaccess_end = end_;
+ } else {
+ // We're still using the old arena but `ret` comes from a new one just after it.
+ DCHECK(arena_head_->next_ != nullptr);
+ DCHECK(ret == arena_head_->next_->Begin());
+ DCHECK_EQ(rounded_bytes, arena_head_->next_->GetBytesAllocated());
+ noaccess_end = arena_head_->next_->End();
+ }
+ MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
} else {
ret = ptr_;
ptr_ += rounded_bytes;
@@ -396,30 +356,6 @@
return ret;
}
-void* ArenaAllocator::AllocWithMemoryToolAlign16(size_t bytes, ArenaAllocKind kind) {
- // We mark all memory for a newly retrieved arena as inaccessible and then
- // mark only the actually allocated memory as defined. That leaves red zones
- // and padding between allocations marked as inaccessible.
- size_t rounded_bytes = bytes + kMemoryToolRedZoneBytes;
- DCHECK_ALIGNED(rounded_bytes, 8); // `bytes` is 16-byte aligned, red zone is 8-byte aligned.
- uintptr_t padding =
- ((reinterpret_cast<uintptr_t>(ptr_) + 15u) & 15u) - reinterpret_cast<uintptr_t>(ptr_);
- ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
- uint8_t* ret;
- if (UNLIKELY(padding + rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
- static_assert(kArenaAlignment >= 16, "Expecting sufficient alignment for new Arena.");
- ret = AllocFromNewArenaWithMemoryTool(rounded_bytes);
- } else {
- ptr_ += padding; // Leave padding inaccessible.
- ret = ptr_;
- ptr_ += rounded_bytes;
- }
- MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
- // Check that the memory is already zeroed out.
- DCHECK(std::all_of(ret, ret + bytes, [](uint8_t val) { return val == 0u; }));
- return ret;
-}
-
ArenaAllocator::~ArenaAllocator() {
// Reclaim all the arenas by giving them back to the thread pool.
UpdateBytesAllocated();
@@ -450,24 +386,6 @@
return new_arena->Begin();
}
-uint8_t* ArenaAllocator::AllocFromNewArenaWithMemoryTool(size_t bytes) {
- uint8_t* ret = AllocFromNewArena(bytes);
- uint8_t* noaccess_begin = ret + bytes;
- uint8_t* noaccess_end;
- if (ret == arena_head_->Begin()) {
- DCHECK(ptr_ - bytes == ret);
- noaccess_end = end_;
- } else {
- // We're still using the old arena but `ret` comes from a new one just after it.
- DCHECK(arena_head_->next_ != nullptr);
- DCHECK(ret == arena_head_->next_->Begin());
- DCHECK_EQ(bytes, arena_head_->next_->GetBytesAllocated());
- noaccess_end = arena_head_->next_->End();
- }
- MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
- return ret;
-}
-
bool ArenaAllocator::Contains(const void* ptr) const {
if (ptr >= begin_ && ptr < end_) {
return true;
@@ -480,9 +398,7 @@
return false;
}
-MemStats::MemStats(const char* name,
- const ArenaAllocatorStats* stats,
- const Arena* first_arena,
+MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
ssize_t lost_bytes_adjustment)
: name_(name),
stats_(stats),
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index c39429c..f92fbea 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -34,6 +34,7 @@
class ArenaAllocator;
class ArenaStack;
class ScopedArenaAllocator;
+class MemMap;
class MemStats;
template <typename T>
@@ -243,6 +244,22 @@
DISALLOW_COPY_AND_ASSIGN(Arena);
};
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+};
+
+class MemMapArena FINAL : public Arena {
+ public:
+ MemMapArena(size_t size, bool low_4gb, const char* name);
+ virtual ~MemMapArena();
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
class ArenaPool {
public:
explicit ArenaPool(bool use_malloc = true,
@@ -302,31 +319,8 @@
return ret;
}
- // Returns zeroed memory.
- void* AllocAlign16(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
- // It is an error to request 16-byte aligned allocation of unaligned size.
- DCHECK_ALIGNED(bytes, 16);
- if (UNLIKELY(IsRunningOnMemoryTool())) {
- return AllocWithMemoryToolAlign16(bytes, kind);
- }
- uintptr_t padding =
- ((reinterpret_cast<uintptr_t>(ptr_) + 15u) & 15u) - reinterpret_cast<uintptr_t>(ptr_);
- ArenaAllocatorStats::RecordAlloc(bytes, kind);
- if (UNLIKELY(padding + bytes > static_cast<size_t>(end_ - ptr_))) {
- static_assert(kArenaAlignment >= 16, "Expecting sufficient alignment for new Arena.");
- return AllocFromNewArena(bytes);
- }
- ptr_ += padding;
- uint8_t* ret = ptr_;
- DCHECK_ALIGNED(ret, 16);
- ptr_ += bytes;
- return ret;
- }
-
// Realloc never frees the input pointer, it is the caller's job to do this if necessary.
- void* Realloc(void* ptr,
- size_t ptr_size,
- size_t new_size,
+ void* Realloc(void* ptr, size_t ptr_size, size_t new_size,
ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
DCHECK_GE(new_size, ptr_size);
DCHECK_EQ(ptr == nullptr, ptr_size == 0u);
@@ -377,17 +371,12 @@
bool Contains(const void* ptr) const;
- // The alignment guaranteed for individual allocations.
- static constexpr size_t kAlignment = 8u;
-
- // The alignment required for the whole Arena rather than individual allocations.
- static constexpr size_t kArenaAlignment = 16u;
+ static constexpr size_t kAlignment = 8;
private:
void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
- void* AllocWithMemoryToolAlign16(size_t bytes, ArenaAllocKind kind);
uint8_t* AllocFromNewArena(size_t bytes);
- uint8_t* AllocFromNewArenaWithMemoryTool(size_t bytes);
+
void UpdateBytesAllocated();
@@ -407,9 +396,7 @@
class MemStats {
public:
- MemStats(const char* name,
- const ArenaAllocatorStats* stats,
- const Arena* first_arena,
+ MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
ssize_t lost_bytes_adjustment = 0);
void Dump(std::ostream& os) const;
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 1a0eb5e..55044b3 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -39,6 +39,8 @@
kFree,
};
+static constexpr size_t kArenaAlignment = 8;
+
// Holds a list of Arenas for use by ScopedArenaAllocator stack.
// The memory is returned to the ArenaPool when the ArenaStack is destroyed.
class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
@@ -65,9 +67,6 @@
return *(reinterpret_cast<ArenaFreeTag*>(ptr) - 1);
}
- // The alignment guaranteed for individual allocations.
- static constexpr size_t kAlignment = 8u;
-
private:
struct Peak;
struct Current;
@@ -90,8 +89,8 @@
if (UNLIKELY(IsRunningOnMemoryTool())) {
return AllocWithMemoryTool(bytes, kind);
}
- // Add kAlignment for the free or used tag. Required to preserve alignment.
- size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kAlignment : 0u), kAlignment);
+ // Add kArenaAlignment for the free or used tag. Required to preserve alignment.
+ size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kArenaAlignment : 0u), kArenaAlignment);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
@@ -99,7 +98,7 @@
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
if (kIsDebugBuild) {
- ptr += kAlignment;
+ ptr += kArenaAlignment;
ArenaTagForAllocation(ptr) = ArenaFreeTag::kUsed;
}
return ptr;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 9ddc6cf..bd510ca 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -161,15 +161,9 @@
return resolved_method;
}
-inline ArtField* ClassLinker::LookupResolvedField(uint32_t field_idx,
- ArtMethod* referrer,
- bool is_static) {
- ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
- ArtField* field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
- if (field == nullptr) {
- field = LookupResolvedField(field_idx, dex_cache, referrer->GetClassLoader(), is_static);
- }
- return field;
+inline ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
+ ObjPtr<mirror::DexCache> dex_cache) {
+ return dex_cache->GetResolvedField(field_idx, image_pointer_size_);
}
inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
@@ -177,8 +171,7 @@
bool is_static) {
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ArtField* resolved_field =
- referrer->GetDexCache()->GetResolvedField(field_idx, image_pointer_size_);
+ ArtField* resolved_field = GetResolvedField(field_idx, referrer->GetDexCache());
if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ab2b395..b611aa2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1282,10 +1282,7 @@
num_types = dex_file->NumTypeIds();
}
const size_t num_methods = dex_file->NumMethodIds();
- size_t num_fields = mirror::DexCache::kDexCacheFieldCacheSize;
- if (dex_file->NumFieldIds() < num_fields) {
- num_fields = dex_file->NumFieldIds();
- }
+ const size_t num_fields = dex_file->NumFieldIds();
size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
if (dex_file->NumProtoIds() < num_method_types) {
num_method_types = dex_file->NumProtoIds();
@@ -1329,22 +1326,17 @@
dex_cache->SetResolvedMethods(methods);
}
if (num_fields != 0u) {
- mirror::FieldDexCacheType* const image_resolved_fields = dex_cache->GetResolvedFields();
- mirror::FieldDexCacheType* const fields =
- reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
- for (size_t j = 0; j < num_fields; ++j) {
- DCHECK_EQ(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size_).index,
- 0u);
- DCHECK(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size_).object ==
- nullptr);
- mirror::DexCache::SetNativePairPtrSize(
- fields,
- j,
- mirror::DexCache::GetNativePairPtrSize(image_resolved_fields,
- j,
- image_pointer_size_),
- image_pointer_size_);
+ ArtField** const fields =
+ reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
+ for (size_t j = 0; kIsDebugBuild && j < num_fields; ++j) {
+ DCHECK(fields[j] == nullptr);
}
+ CopyNonNull(dex_cache->GetResolvedFields(),
+ num_fields,
+ fields,
+ [] (const ArtField* field) {
+ return field == nullptr;
+ });
dex_cache->SetResolvedFields(fields);
}
if (num_method_types != 0u) {
@@ -8268,43 +8260,6 @@
return resolved;
}
-ArtField* ClassLinker::LookupResolvedField(uint32_t field_idx,
- ObjPtr<mirror::DexCache> dex_cache,
- ObjPtr<mirror::ClassLoader> class_loader,
- bool is_static) {
- const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
- ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(field_id.class_idx_);
- if (klass == nullptr) {
- klass = LookupResolvedType(dex_file, field_id.class_idx_, dex_cache, class_loader);
- }
- if (klass == nullptr) {
- // The class has not been resolved yet, so the field is also unresolved.
- return nullptr;
- }
- DCHECK(klass->IsResolved());
- Thread* self = is_static ? Thread::Current() : nullptr;
-
- // First try to find a field declared directly by `klass` by the field index.
- ArtField* resolved_field = is_static
- ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
- : klass->FindInstanceField(dex_cache, field_idx);
-
- if (resolved_field == nullptr) {
- // If not found in `klass` by field index, search the class hierarchy using the name and type.
- const char* name = dex_file.GetFieldName(field_id);
- const char* type = dex_file.GetFieldTypeDescriptor(field_id);
- resolved_field = is_static
- ? mirror::Class::FindStaticField(self, klass, name, type)
- : klass->FindInstanceField(name, type);
- }
-
- if (resolved_field != nullptr) {
- dex_cache->SetResolvedField(field_idx, resolved_field, image_pointer_size_);
- }
- return resolved_field;
-}
-
ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
@@ -8365,8 +8320,9 @@
return nullptr;
}
- StringPiece name(dex_file.GetFieldName(field_id));
- StringPiece type(dex_file.GetFieldTypeDescriptor(field_id));
+ StringPiece name(dex_file.StringDataByIdx(field_id.name_idx_));
+ StringPiece type(dex_file.StringDataByIdx(
+ dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
resolved = mirror::Class::FindField(self, klass, name, type);
if (resolved != nullptr) {
dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6254acb..a5d26c7 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -333,7 +333,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- ArtField* LookupResolvedField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
+ ArtField* GetResolvedField(uint32_t field_idx, ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -842,13 +842,6 @@
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Find a field by its field index.
- ArtField* LookupResolvedField(uint32_t field_idx,
- ObjPtr<mirror::DexCache> dex_cache,
- ObjPtr<mirror::ClassLoader> class_loader,
- bool is_static)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
void RegisterDexFileLocked(const DexFile& dex_file,
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader)
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 568f8d6..010ef11 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1259,18 +1259,17 @@
}
}
}
- mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
+ ArtField** fields = dex_cache->GetResolvedFields();
if (fields != nullptr) {
- mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
+ ArtField** new_fields = fixup_adapter.ForwardObject(fields);
if (fields != new_fields) {
dex_cache->SetResolvedFields(new_fields);
}
for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
- mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
- if (orig.object != copy.object) {
- mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, pointer_size);
+ ArtField* copy = fixup_adapter.ForwardObject(orig);
+ if (orig != copy) {
+ mirror::DexCache::SetElementPtrSize(new_fields, j, copy, pointer_size);
}
}
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 5fbb7a6..88f28f3 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '1', '\0' }; // hash-based DexCache fields
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '0', '\0' }; // Integer.valueOf intrinsic
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index e9db9b8..f91b0ed 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -33,11 +33,6 @@
return allocator_.Alloc(size);
}
-void* LinearAlloc::AllocAlign16(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
- return allocator_.AllocAlign16(size);
-}
-
size_t LinearAlloc::GetUsedMemory() const {
MutexLock mu(Thread::Current(), lock_);
return allocator_.BytesUsed();
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 384b2e3..df7f17d 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -29,7 +29,6 @@
explicit LinearAlloc(ArenaPool* pool);
void* Alloc(Thread* self, size_t size) REQUIRES(!lock_);
- void* AllocAlign16(Thread* self, size_t size) REQUIRES(!lock_);
// Realloc never frees the input pointer, it is the caller's job to do this if necessary.
void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 582ecb2..29bf6a0 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -24,7 +24,6 @@
#include "base/casts.h"
#include "base/enums.h"
#include "base/logging.h"
-#include "dex_file.h"
#include "gc_root.h"
#include "mirror/class.h"
#include "mirror/call_site.h"
@@ -37,15 +36,6 @@
namespace art {
namespace mirror {
-template <typename T>
-inline void NativeDexCachePair<T>::Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache,
- PointerSize pointer_size) {
- NativeDexCachePair<T> first_elem;
- first_elem.object = nullptr;
- first_elem.index = InvalidIndexForSlot(0);
- DexCache::SetNativePairPtrSize(dex_cache, 0, first_elem, pointer_size);
-}
-
inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 5;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
@@ -174,36 +164,20 @@
}
}
-inline uint32_t DexCache::FieldSlotIndex(uint32_t field_idx) {
- DCHECK_LT(field_idx, GetDexFile()->NumFieldIds());
- const uint32_t slot_idx = field_idx % kDexCacheFieldCacheSize;
- DCHECK_LT(slot_idx, NumResolvedFields());
- return slot_idx;
-}
-
inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
- auto pair = GetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), ptr_size);
- return pair.GetObjectForIndex(field_idx);
+ DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
+ ArtField* field = GetElementPtrSize(GetResolvedFields(), field_idx, ptr_size);
+ if (field == nullptr || field->GetDeclaringClass()->IsErroneous()) {
+ return nullptr;
+ }
+ return field;
}
inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
- DCHECK(field != nullptr);
- FieldDexCachePair pair(field, field_idx);
- SetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), pair, ptr_size);
-}
-
-inline void DexCache::ClearResolvedField(uint32_t field_idx, PointerSize ptr_size) {
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
- uint32_t slot_idx = FieldSlotIndex(field_idx);
- auto* resolved_fields = GetResolvedFields();
- // This is racy but should only be called from the single-threaded ImageWriter.
- DCHECK(Runtime::Current()->IsAotCompiler());
- if (GetNativePairPtrSize(resolved_fields, slot_idx, ptr_size).index == field_idx) {
- FieldDexCachePair cleared(nullptr, FieldDexCachePair::InvalidIndexForSlot(slot_idx));
- SetNativePairPtrSize(resolved_fields, slot_idx, cleared, ptr_size);
- }
+ DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
+ SetElementPtrSize(GetResolvedFields(), field_idx, field, ptr_size);
}
inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
@@ -251,40 +225,6 @@
}
}
-template <typename T>
-NativeDexCachePair<T> DexCache::GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
- size_t idx,
- PointerSize ptr_size) {
- if (ptr_size == PointerSize::k64) {
- auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
- ConversionPair64 value = AtomicLoadRelaxed16B(&array[idx]);
- return NativeDexCachePair<T>(reinterpret_cast64<T*>(value.first),
- dchecked_integral_cast<size_t>(value.second));
- } else {
- auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
- ConversionPair32 value = array[idx].load(std::memory_order_relaxed);
- return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second);
- }
-}
-
-template <typename T>
-void DexCache::SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
- size_t idx,
- NativeDexCachePair<T> pair,
- PointerSize ptr_size) {
- if (ptr_size == PointerSize::k64) {
- auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
- ConversionPair64 v(reinterpret_cast64<uint64_t>(pair.object), pair.index);
- AtomicStoreRelease16B(&array[idx], v);
- } else {
- auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
- ConversionPair32 v(
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(pair.object)),
- dchecked_integral_cast<uint32_t>(pair.index));
- array[idx].store(v, std::memory_order_release);
- }
-}
-
template <typename T,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index c95d92e..1b8b391 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -52,12 +52,8 @@
dex_file->NumTypeIds() != 0u ||
dex_file->NumMethodIds() != 0u ||
dex_file->NumFieldIds() != 0u) {
- static_assert(ArenaAllocator::kAlignment == 8, "Expecting arena alignment of 8.");
- DCHECK(layout.Alignment() == 8u || layout.Alignment() == 16u);
// Zero-initialized.
- raw_arrays = (layout.Alignment() == 16u)
- ? reinterpret_cast<uint8_t*>(linear_alloc->AllocAlign16(self, layout.Size()))
- : reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
+ raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
@@ -66,21 +62,17 @@
reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
- mirror::FieldDexCacheType* fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
- reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
+ ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
+ reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
- size_t num_strings = kDexCacheStringCacheSize;
+ size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
- size_t num_types = kDexCacheTypeCacheSize;
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
if (dex_file->NumTypeIds() < num_types) {
num_types = dex_file->NumTypeIds();
}
- size_t num_fields = kDexCacheFieldCacheSize;
- if (dex_file->NumFieldIds() < num_fields) {
- num_fields = dex_file->NumFieldIds();
- }
// Note that we allocate the method type dex caches regardless of this flag,
// and we make sure here that they're not used by the runtime. This is in the
@@ -88,17 +80,17 @@
//
// If this needs to be mitigated in a production system running this code,
// DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
- MethodTypeDexCacheType* method_types = nullptr;
+ mirror::MethodTypeDexCacheType* method_types = nullptr;
size_t num_method_types = 0;
- if (dex_file->NumProtoIds() < kDexCacheMethodTypeCacheSize) {
+ if (dex_file->NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
num_method_types = dex_file->NumProtoIds();
} else {
- num_method_types = kDexCacheMethodTypeCacheSize;
+ num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
}
if (num_method_types > 0) {
- method_types = reinterpret_cast<MethodTypeDexCacheType*>(
+ method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
raw_arrays + layout.MethodTypesOffset());
}
@@ -106,13 +98,13 @@
? nullptr
: reinterpret_cast<GcRoot<mirror::CallSite>*>(raw_arrays + layout.CallSitesOffset());
- DCHECK_ALIGNED(raw_arrays, alignof(StringDexCacheType)) <<
+ DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
"Expected raw_arrays to align to StringDexCacheType.";
- DCHECK_ALIGNED(layout.StringsOffset(), alignof(StringDexCacheType)) <<
+ DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
"Expected StringsOffset() to align to StringDexCacheType.";
- DCHECK_ALIGNED(strings, alignof(StringDexCacheType)) <<
+ DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) <<
"Expected strings to align to StringDexCacheType.";
- static_assert(alignof(StringDexCacheType) == 8u,
+ static_assert(alignof(mirror::StringDexCacheType) == 8u,
"Expected StringDexCacheType to have align of 8.");
if (kIsDebugBuild) {
// Sanity check to make sure all the dex cache arrays are empty. b/28992179
@@ -125,11 +117,10 @@
CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
}
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
- CHECK(GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
+ CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
}
- for (size_t i = 0; i < num_fields; ++i) {
- CHECK_EQ(GetNativePairPtrSize(fields, i, image_pointer_size).index, 0u);
- CHECK(GetNativePairPtrSize(fields, i, image_pointer_size).object == nullptr);
+ for (size_t i = 0; i < dex_file->NumFieldIds(); ++i) {
+ CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size) == nullptr);
}
for (size_t i = 0; i < num_method_types; ++i) {
CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
@@ -145,9 +136,6 @@
if (types != nullptr) {
mirror::TypeDexCachePair::Initialize(types);
}
- if (fields != nullptr) {
- mirror::FieldDexCachePair::Initialize(fields, image_pointer_size);
- }
if (method_types != nullptr) {
mirror::MethodTypeDexCachePair::Initialize(method_types);
}
@@ -160,7 +148,7 @@
methods,
dex_file->NumMethodIds(),
fields,
- num_fields,
+ dex_file->NumFieldIds(),
method_types,
num_method_types,
call_sites,
@@ -176,7 +164,7 @@
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
- FieldDexCacheType* resolved_fields,
+ ArtField** resolved_fields,
uint32_t num_resolved_fields,
MethodTypeDexCacheType* resolved_method_types,
uint32_t num_resolved_method_types,
@@ -230,23 +218,5 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
}
-#if !defined(__aarch64__) && !defined(__x86_64__)
-static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
- pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
- DexCache::ConversionPair64 value = *reinterpret_cast<ConversionPair64*>(target);
- pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
- return value;
-}
-
-void DexCache::AtomicStoreRelease16B(std::atomic<ConversionPair64>* target,
- ConversionPair64 value) {
- pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
- *reinterpret_cast<ConversionPair64*>(target) = value;
- pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
-}
-#endif
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 35707ef..0579198 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -91,44 +91,12 @@
}
};
-template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
- T* object;
- size_t index;
- // This is similar to DexCachePair except that we're storing a native pointer
- // instead of a GC root. See DexCachePair for the details.
- NativeDexCachePair(T* object, uint32_t index)
- : object(object),
- index(index) {}
- NativeDexCachePair() : object(nullptr), index(0u) { }
- NativeDexCachePair(const NativeDexCachePair<T>&) = default;
- NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
-
- static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
-
- static uint32_t InvalidIndexForSlot(uint32_t slot) {
- // Since the cache size is a power of two, 0 will always map to slot 0.
- // Use 1 for slot 0 and 0 for all other slots.
- return (slot == 0) ? 1u : 0u;
- }
-
- T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (idx != index) {
- return nullptr;
- }
- DCHECK(object != nullptr);
- return object;
- }
-};
-
using TypeDexCachePair = DexCachePair<Class>;
using TypeDexCacheType = std::atomic<TypeDexCachePair>;
using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
-using FieldDexCachePair = NativeDexCachePair<ArtField>;
-using FieldDexCacheType = std::atomic<FieldDexCachePair>;
-
using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
@@ -148,11 +116,6 @@
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
"String dex cache size is not a power of 2.");
- // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
- static constexpr size_t kDexCacheFieldCacheSize = 1024;
- static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
- "Field dex cache size is not a power of 2.");
-
// Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
// to hold.
static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
@@ -167,10 +130,6 @@
return kDexCacheStringCacheSize;
}
- static constexpr size_t StaticArtFieldSize() {
- return kDexCacheFieldCacheSize;
- }
-
static constexpr size_t StaticMethodTypeSize() {
return kDexCacheMethodTypeCacheSize;
}
@@ -296,8 +255,6 @@
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
- REQUIRES_SHARED(Locks::mutator_lock_);
MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -342,11 +299,11 @@
SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
}
- FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
+ ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
}
- void SetResolvedFields(FieldDexCacheType* resolved_fields)
+ void SetResolvedFields(ArtField** resolved_fields)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
@@ -419,17 +376,6 @@
template <typename PtrType>
static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
- template <typename T>
- static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
- size_t idx,
- PointerSize ptr_size);
-
- template <typename T>
- static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
- size_t idx,
- NativeDexCachePair<T> pair,
- PointerSize ptr_size);
-
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -439,7 +385,7 @@
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
- FieldDexCacheType* resolved_fields,
+ ArtField** resolved_fields,
uint32_t num_resolved_fields,
MethodTypeDexCacheType* resolved_method_types,
uint32_t num_resolved_method_types,
@@ -448,22 +394,8 @@
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
- // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
- template <typename IntType>
- struct PACKED(2 * sizeof(IntType)) ConversionPair {
- ConversionPair(IntType f, IntType s) : first(f), second(s) { }
- ConversionPair(const ConversionPair&) = default;
- ConversionPair& operator=(const ConversionPair&) = default;
- IntType first;
- IntType second;
- };
- using ConversionPair32 = ConversionPair<uint32_t>;
- using ConversionPair64 = ConversionPair<uint64_t>;
-
uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
// Visit instance fields of the dex cache as well as its associated arrays.
@@ -474,55 +406,12 @@
void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
- // Due to lack of 16-byte atomics support, we use hand-crafted routines.
-#if defined(__aarch64__)
- // 16-byte atomics are supported on aarch64.
- ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
- std::atomic<ConversionPair64>* target) {
- return target->load(std::memory_order_relaxed);
- }
-
- ALWAYS_INLINE static void AtomicStoreRelease16B(
- std::atomic<ConversionPair64>* target, ConversionPair64 value) {
- target->store(value, std::memory_order_release);
- }
-#elif defined(__x86_64__)
- ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
- std::atomic<ConversionPair64>* target) {
- uint64_t first, second;
- __asm__ __volatile__(
- "lock cmpxchg16b (%2)"
- : "=&a"(first), "=&d"(second)
- : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
- : "cc");
- return ConversionPair64(first, second);
- }
-
- ALWAYS_INLINE static void AtomicStoreRelease16B(
- std::atomic<ConversionPair64>* target, ConversionPair64 value) {
- uint64_t first, second;
- __asm__ __volatile__ (
- "movq (%2), %%rax\n\t"
- "movq 8(%2), %%rdx\n\t"
- "1:\n\t"
- "lock cmpxchg16b (%2)\n\t"
- "jnz 1b"
- : "=&a"(first), "=&d"(second)
- : "r"(target), "b"(value.first), "c"(value.second)
- : "cc");
- }
-#else
- static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
- static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
-#endif
-
HeapReference<Object> dex_;
HeapReference<String> location_;
uint64_t dex_file_; // const DexFile*
uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_
// elements.
- uint64_t resolved_fields_; // std::atomic<FieldDexCachePair>*, array with
- // num_resolved_fields_ elements.
+ uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 71a47f6..ef0aaaa 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -54,8 +54,7 @@
EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
|| java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
- EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields()
- || java_lang_dex_file_->NumFieldIds() == dex_cache->NumResolvedFields());
+ EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
|| java_lang_dex_file_->NumProtoIds() == dex_cache->NumResolvedMethodTypes());
}
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 54034c2..f6b6489 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -68,16 +68,8 @@
}
}
mirror::DexCache* const dex_cache = declaring_class->GetDexCache();
- ArtField* art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
- if (UNLIKELY(art_field == nullptr)) {
- if (IsStatic()) {
- art_field = declaring_class->FindDeclaredStaticField(dex_cache, GetDexFieldIndex());
- } else {
- art_field = declaring_class->FindInstanceField(dex_cache, GetDexFieldIndex());
- }
- CHECK(art_field != nullptr);
- dex_cache->SetResolvedField(GetDexFieldIndex(), art_field, kRuntimePointerSize);
- }
+ ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
+ CHECK(art_field != nullptr);
CHECK_EQ(declaring_class, art_field->GetDeclaringClass());
return art_field;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d81c13d..9b707f8 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -444,7 +444,6 @@
if (!kPreloadDexCachesCollectStats) {
return;
}
- // TODO: Update for hash-based DexCache arrays.
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
@@ -464,7 +463,7 @@
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
- ArtField* field = dex_cache->GetResolvedField(j, class_linker->GetImagePointerSize());
+ ArtField* field = class_linker->GetResolvedField(j, dex_cache);
if (field != nullptr) {
filled->num_fields++;
}
diff --git a/runtime/oat.h b/runtime/oat.h
index df43107..1544121 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '1', '5', '\0' }; // hash-based DexCache fields
+ static constexpr uint8_t kOatVersion[] = { '1', '1', '4', '\0' }; // hash-based DexCache types.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 3347070..b009b47 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -215,8 +215,9 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ mirror::DexCache* dex_cache = method->GetDexCache();
+ ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
if (UNLIKELY(field == nullptr)) {
return false;
}
@@ -226,9 +227,7 @@
if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) {
break;
}
- ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
- method,
- /* is_static */ false);
+ ArtField* f = dex_cache->GetResolvedField(iputs[old_pos].field_index, pointer_size);
DCHECK(f != nullptr);
if (f == field) {
auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -733,9 +732,9 @@
if (method == nullptr) {
return false;
}
- ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
+ mirror::DexCache* dex_cache = method->GetDexCache();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ArtField* field = dex_cache->GetResolvedField(field_idx, pointer_size);
if (field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 95904af..f9a1405 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -51,11 +51,7 @@
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader(), dex_file->NumCallSiteIds()) {
}
-inline size_t DexCacheArraysLayout::Alignment() const {
- return Alignment(pointer_size_);
-}
-
-inline constexpr size_t DexCacheArraysLayout::Alignment(PointerSize pointer_size) {
+constexpr size_t DexCacheArraysLayout::Alignment() {
// mirror::Type/String/MethodTypeDexCacheType alignment is 8,
// i.e. higher than or equal to the pointer alignment.
static_assert(alignof(mirror::TypeDexCacheType) == 8,
@@ -64,8 +60,8 @@
"Expecting alignof(StringDexCacheType) == 8");
static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
"Expecting alignof(MethodTypeDexCacheType) == 8");
- // This is the same as alignof(FieldDexCacheType) for the given pointer size.
- return 2u * static_cast<size_t>(pointer_size);
+ // This is the same as alignof(MethodTypeDexCacheType).
+ return alignof(mirror::StringDexCacheType);
}
template <typename T>
@@ -104,8 +100,8 @@
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- uint32_t string_hash = string_idx % mirror::DexCache::kDexCacheStringCacheSize;
- return strings_offset_ + ElementOffset(PointerSize::k64, string_hash);
+ return strings_offset_ + ElementOffset(PointerSize::k64,
+ string_idx % mirror::DexCache::kDexCacheStringCacheSize);
}
inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
@@ -123,20 +119,15 @@
}
inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const {
- uint32_t field_hash = field_idx % mirror::DexCache::kDexCacheFieldCacheSize;
- return fields_offset_ + 2u * static_cast<size_t>(pointer_size_) * field_hash;
+ return fields_offset_ + ElementOffset(pointer_size_, field_idx);
}
inline size_t DexCacheArraysLayout::FieldsSize(size_t num_elements) const {
- size_t cache_size = mirror::DexCache::kDexCacheFieldCacheSize;
- if (num_elements < cache_size) {
- cache_size = num_elements;
- }
- return 2u * static_cast<size_t>(pointer_size_) * num_elements;
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::FieldsAlignment() const {
- return 2u * static_cast<size_t>(pointer_size_);
+ return static_cast<size_t>(pointer_size_);
}
inline size_t DexCacheArraysLayout::MethodTypesSize(size_t num_elements) const {
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index 377a374..ed677ed 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -57,9 +57,7 @@
return size_;
}
- size_t Alignment() const;
-
- static constexpr size_t Alignment(PointerSize pointer_size);
+ static constexpr size_t Alignment();
size_t TypesOffset() const {
return types_offset_;
@@ -127,6 +125,8 @@
const size_t call_sites_offset_;
const size_t size_;
+ static size_t Alignment(PointerSize pointer_size);
+
static size_t ElementOffset(PointerSize element_size, uint32_t idx);
static size_t ArraySize(PointerSize element_size, uint32_t num_elements);