ART: Compress LengthPrefixedArray on 32-bit targets.
Previously, the LengthPrefixedArray<ArtMethod> on 32-bit
targets contained a 64-bit length field followed by the
ArtMethod elements with size only a multiple of 4, not 8.
Consequently, an odd-length array broke the alignment for
the following array which would have the 64-bit length
placed at an unaligned address.
To fix that, we make the length field 32-bit and explicitly
pass the alignment information to the LengthPrefixedArray.
This also makes the 32-bit boot image a bit smaller.
On Nexus 5, AOSP, ToT, the field section is 11528B smaller
and the method section is 21036B smaller. 64-bit targets
should see the same savings for the field section but no
difference for the methods section.
Change-Id: I3e03e7b94129025c8a1c117c27645a34dec516d2
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a03ff75..c4fe2cb 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -244,8 +244,8 @@
DCHECK(object != nullptr);
DCHECK_NE(image_objects_offset_begin_, 0u);
- size_t previous_bin_sizes = bin_slot_previous_sizes_[bin_slot.GetBin()];
- size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
+ size_t bin_slot_offset = bin_slot_offsets_[bin_slot.GetBin()];
+ size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
SetImageOffset(object, new_offset);
@@ -866,8 +866,10 @@
}
bool any_dirty = false;
size_t count = 0;
+ const size_t method_alignment = ArtMethod::ObjectAlignment(target_ptr_size_);
const size_t method_size = ArtMethod::ObjectSize(target_ptr_size_);
- auto iteration_range = MakeIterationRangeFromLengthPrefixedArray(array, method_size);
+ auto iteration_range =
+ MakeIterationRangeFromLengthPrefixedArray(array, method_size, method_alignment);
for (auto& m : iteration_range) {
any_dirty = any_dirty || WillMethodBeDirty(&m);
++count;
@@ -876,7 +878,9 @@
kNativeObjectRelocationTypeArtMethodClean;
Bin bin_type = BinTypeForNativeRelocationType(type);
// Forward the entire array at once, but header first.
- const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0, method_size);
+ const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
+ method_size,
+ method_alignment);
auto it = native_object_relocations_.find(array);
CHECK(it == native_object_relocations_.end()) << "Method array " << array
<< " already forwarded";
@@ -972,9 +976,10 @@
size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
native_object_relocations_.emplace(&image_method_array_,
NativeObjectRelocation { offset, image_method_type });
+ size_t method_alignment = ArtMethod::ObjectAlignment(target_ptr_size_);
const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::ObjectSize(target_ptr_size_));
- CHECK_ALIGNED(array_size, 8u);
+ 0, ArtMethod::ObjectSize(target_ptr_size_), method_alignment);
+ CHECK_ALIGNED_PARAM(array_size, method_alignment);
offset += array_size;
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -982,13 +987,21 @@
AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
}
- // Calculate cumulative bin slot sizes.
- size_t previous_sizes = 0u;
+ // Calculate bin slot offsets.
+ size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
- bin_slot_previous_sizes_[i] = previous_sizes;
- previous_sizes += bin_slot_sizes_[i];
+ bin_slot_offsets_[i] = bin_offset;
+ bin_offset += bin_slot_sizes_[i];
+ if (i == kBinArtField) {
+ static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
+ static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
+ DCHECK_ALIGNED(bin_offset, 4u);
+ DCHECK(method_alignment == 4u || method_alignment == 8u);
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ }
}
- DCHECK_EQ(previous_sizes, GetBinSizeSum());
+ // NOTE: There may be additional padding between the bin slots and the intern table.
+
DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
// Transform each object's bin slot into an offset which will be used to do the final copy.
@@ -1002,7 +1015,7 @@
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
- relocation.offset += image_objects_offset_begin_ + bin_slot_previous_sizes_[bin_type];
+ relocation.offset += bin_slot_offsets_[bin_type];
}
// Calculate how big the intern table will be after being serialized.
@@ -1029,15 +1042,15 @@
// Add field section.
auto* field_section = §ions[ImageHeader::kSectionArtFields];
*field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField],
- field_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
cur_pos = field_section->End();
+ // Round up to the alignment the required by the method section.
+ cur_pos = RoundUp(cur_pos, ArtMethod::ObjectAlignment(target_ptr_size_));
// Add method section.
auto* methods_section = §ions[ImageHeader::kSectionArtMethods];
*methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] +
bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
- methods_section->Offset());
+ CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
@@ -1135,7 +1148,10 @@
}
case kNativeObjectRelocationTypeArtMethodArrayClean:
case kNativeObjectRelocationTypeArtMethodArrayDirty: {
- memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0));
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(
+ 0,
+ ArtMethod::ObjectSize(target_ptr_size_),
+ ArtMethod::ObjectAlignment(target_ptr_size_)));
break;
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index f4e10cc..c8aa82d 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -54,7 +54,7 @@
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
+ bin_slot_sizes_(), bin_slot_offsets_(), bin_slot_count_(),
intern_table_bytes_(0u), image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
@@ -359,7 +359,7 @@
// Bin slot tracking for dirty object packing
size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin
- size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
+ size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
// Cached size of the intern table for when we allocate memory.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 5e29ca7..c8ecea0 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1618,9 +1618,7 @@
const size_t pointer_size =
InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet());
DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor,
- image_space->Begin(),
- ArtMethod::ObjectSize(pointer_size));
+ methods_section.VisitPackedArtMethods(&visitor, image_space->Begin(), pointer_size);
}
}
// Dump the large objects separately.
@@ -1642,13 +1640,19 @@
const auto& intern_section = image_header_.GetImageSection(
ImageHeader::kSectionInternedStrings);
stats_.header_bytes = header_bytes;
- size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes;
- stats_.alignment_bytes += alignment_bytes;
+ stats_.alignment_bytes += RoundUp(header_bytes, kObjectAlignment) - header_bytes;
+ // Add padding between the field and method section.
+ // (Field section is 4-byte aligned, method section is 8-byte aligned on 64-bit targets.)
+ stats_.alignment_bytes +=
+ method_section.Offset() - (field_section.Offset() + field_section.Size());
+ // Add padding between the method section and the intern table.
+ // (Method section is 4-byte aligned on 32-bit targets, intern table is 8-byte aligned.)
+ stats_.alignment_bytes +=
+ intern_section.Offset() - (method_section.Offset() + method_section.Size());
stats_.alignment_bytes += bitmap_section.Offset() - image_header_.GetImageSize();
stats_.bitmap_bytes += bitmap_section.Size();
stats_.art_field_bytes += field_section.Size();
- // RoundUp to 8 bytes to match the intern table alignment expectation.
- stats_.art_method_bytes += RoundUp(method_section.Size(), sizeof(uint64_t));
+ stats_.art_method_bytes += method_section.Size();
stats_.interned_strings_bytes += intern_section.Size();
stats_.Dump(os, indent_os);
os << "\n";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 283eea9..d54e1b2 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -454,9 +454,8 @@
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
const auto& section = image_header->GetMethodsSection();
const size_t pointer_size = InstructionSetPointerSize(isa_);
- const size_t method_size = ArtMethod::ObjectSize(pointer_size);
PatchOatArtMethodVisitor visitor(this);
- section.VisitPackedArtMethods(&visitor, heap_->Begin(), method_size);
+ section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
class FixupRootVisitor : public RootVisitor {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 85c03ed..8221fe2 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -513,6 +513,13 @@
(sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
}
+ // Alignment of an instance of this object.
+ static size_t ObjectAlignment(size_t pointer_size) {
+ // The ArtMethod alignment is the same as image pointer size. This differs from
+ // alignof(ArtMethod) if cross-compiling with image_pointer_size_ != sizeof(void*).
+ return pointer_size;
+ }
+
void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 6f45dc8..1da6750 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -158,6 +158,9 @@
#define DCHECK_ALIGNED(value, alignment) \
DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
+#define CHECK_ALIGNED_PARAM(value, alignment) \
+ CHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
+
#define DCHECK_ALIGNED_PARAM(value, alignment) \
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f19263d..ef48710 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1213,9 +1213,8 @@
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& methods = header.GetMethodsSection();
- const size_t art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), art_method_size);
+ methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
}
// reinit class_roots_
@@ -2294,9 +2293,11 @@
if (length == 0) {
return nullptr;
}
- auto* ret = new(Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtField>::ComputeSize(length))) LengthPrefixedArray<ArtField>(
- length);
+ // If the ArtField alignment changes, review all uses of LengthPrefixedArray<ArtField>.
+ static_assert(alignof(ArtField) == 4, "ArtField alignment is expected to be 4.");
+ size_t storage_size = LengthPrefixedArray<ArtField>::ComputeSize(length);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new(array_storage) LengthPrefixedArray<ArtField>(length);
CHECK(ret != nullptr);
std::uninitialized_fill_n(&ret->At(0), length, ArtField());
return ret;
@@ -2306,13 +2307,15 @@
if (length == 0) {
return nullptr;
}
+ const size_t method_alignment = ArtMethod::ObjectAlignment(image_pointer_size_);
const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- auto* ret = new (Runtime::Current()->GetLinearAlloc()->Alloc(
- self, LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size)))
- LengthPrefixedArray<ArtMethod>(length);
+ const size_t storage_size =
+ LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size, method_alignment);
+ void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size);
+ auto* ret = new (array_storage) LengthPrefixedArray<ArtMethod>(length);
CHECK(ret != nullptr);
for (size_t i = 0; i < length; ++i) {
- new(reinterpret_cast<void*>(&ret->At(i, method_size))) ArtMethod;
+ new(reinterpret_cast<void*>(&ret->At(i, method_size, method_alignment))) ArtMethod;
}
return ret;
}
@@ -4689,6 +4692,7 @@
const bool have_interfaces = interfaces.Get() != nullptr;
const size_t num_interfaces =
have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
+ const size_t method_alignment = ArtMethod::ObjectAlignment(image_pointer_size_);
const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
if (num_interfaces == 0) {
if (super_ifcount == 0) {
@@ -4914,7 +4918,7 @@
// matter which direction we go. We walk it backward anyway.)
for (k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
- &input_virtual_methods->At(k, method_size) :
+ &input_virtual_methods->At(k, method_size, method_alignment) :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
ArtMethod* vtable_method_for_name_comparison =
vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
@@ -4975,10 +4979,14 @@
// where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the
// realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since
// CopyFrom has internal read barriers.
- const size_t old_size = old_virtuals != nullptr ?
- LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count, method_size) : 0u;
+ const size_t old_size = old_virtuals != nullptr
+ ? LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count,
+ method_size,
+ method_alignment)
+ : 0u;
const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count,
- method_size);
+ method_size,
+ method_alignment);
auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size));
if (UNLIKELY(virtuals == nullptr)) {
@@ -4989,7 +4997,7 @@
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
if (virtuals != old_virtuals) {
// Maps from heap allocated miranda method to linear alloc miranda method.
- StrideIterator<ArtMethod> out = virtuals->Begin(method_size);
+ StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment);
// Copy over the old methods + miranda methods.
for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
move_table.emplace(&m, &*out);
@@ -4999,7 +5007,7 @@
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size) + old_method_count);
+ StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
@@ -5022,7 +5030,7 @@
self->AssertPendingOOMException();
return false;
}
- out = StrideIterator<ArtMethod>(virtuals->Begin(method_size) + old_method_count);
+ out = virtuals->Begin(method_size, method_alignment) + old_method_count;
size_t vtable_pos = old_vtable_count;
for (size_t i = old_method_count; i < new_method_count; ++i) {
// Leave the declaring class alone as type indices are relative to it
@@ -5893,8 +5901,10 @@
}
ArtMethod* ClassLinker::CreateRuntimeMethod() {
+ const size_t method_alignment = ArtMethod::ObjectAlignment(image_pointer_size_);
const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
- ArtMethod* method = &AllocArtMethodArray(Thread::Current(), 1)->At(0, method_size);
+ LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(Thread::Current(), 1);
+ ArtMethod* method = &method_array->At(0, method_size, method_alignment);
CHECK(method != nullptr);
method->SetDexMethodIndex(DexFile::kDexNoIndex);
CHECK(method->IsRuntimeMethod());
diff --git a/runtime/image.cc b/runtime/image.cc
index ba1e58b..5890947b 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '8', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -153,19 +153,21 @@
for (size_t i = 0; i < array->Length(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
- pos += array->ComputeSize(array->Length(), sizeof(ArtField));
+ pos += array->ComputeSize(array->Length());
}
}
void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
uint8_t* base,
- size_t method_size) const {
+ size_t pointer_size) const {
+ const size_t method_alignment = ArtMethod::ObjectAlignment(pointer_size);
+ const size_t method_size = ArtMethod::ObjectSize(pointer_size);
for (size_t pos = 0; pos < Size(); ) {
auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
for (size_t i = 0; i < array->Length(); ++i) {
- visitor->Visit(&array->At(i, method_size));
+ visitor->Visit(&array->At(i, method_size, method_alignment));
}
- pos += array->ComputeSize(array->Length(), method_size);
+ pos += array->ComputeSize(array->Length(), method_size, method_alignment);
}
}
diff --git a/runtime/image.h b/runtime/image.h
index eb26f7f..1a0d8fd 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -65,7 +65,7 @@
}
// Visit ArtMethods in the section starting at base.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t method_size) const;
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
// Visit ArtMethods in the section starting at base.
void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 2b2e8d3..d9bc656 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -21,6 +21,8 @@
#include "linear_alloc.h"
#include "stride_iterator.h"
+#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/iteration_range.h"
namespace art {
@@ -28,29 +30,35 @@
template<typename T>
class LengthPrefixedArray {
public:
- explicit LengthPrefixedArray(uint64_t length) : length_(length) {}
+ explicit LengthPrefixedArray(size_t length)
+ : length_(dchecked_integral_cast<uint32_t>(length)) {}
- T& At(size_t index, size_t element_size = sizeof(T)) {
+ T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
DCHECK_LT(index, length_);
- return *reinterpret_cast<T*>(&data_[0] + index * element_size);
+ return AtUnchecked(index, element_size, alignment);
}
- StrideIterator<T> Begin(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0]), element_size);
+ StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size);
}
- StrideIterator<T> End(size_t element_size = sizeof(T)) {
- return StrideIterator<T>(reinterpret_cast<T*>(&data_[0] + element_size * length_),
- element_size);
+ StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size);
}
- static size_t OffsetOfElement(size_t index, size_t element_size = sizeof(T)) {
- return offsetof(LengthPrefixedArray<T>, data_) + index * element_size;
+ static size_t OffsetOfElement(size_t index,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ DCHECK_ALIGNED_PARAM(element_size, alignment);
+ return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size;
}
- // Alignment is the caller's responsibility.
- static size_t ComputeSize(size_t num_elements, size_t element_size = sizeof(T)) {
- return OffsetOfElement(num_elements, element_size);
+ static size_t ComputeSize(size_t num_elements,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) {
+ size_t result = OffsetOfElement(num_elements, element_size, alignment);
+ DCHECK_ALIGNED_PARAM(result, alignment);
+ return result;
}
uint64_t Length() const {
@@ -58,21 +66,26 @@
}
// Update the length but does not reallocate storage.
- void SetLength(uint64_t length) {
- length_ = length;
+ void SetLength(size_t length) {
+ length_ = dchecked_integral_cast<uint32_t>(length);
}
private:
- uint64_t length_; // 64 bits for 8 byte alignment of data_.
- uint8_t data_[0];
+ T& AtUnchecked(size_t index, size_t element_size, size_t alignment) {
+ return *reinterpret_cast<T*>(
+ reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
+ }
+
+ uint32_t length_;
+ uint8_t data[0];
};
// Returns empty iteration range if the array is null.
template<typename T>
IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray(
- LengthPrefixedArray<T>* arr, size_t element_size) {
+ LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
return arr != nullptr ?
- MakeIterationRange(arr->Begin(element_size), arr->End(element_size)) :
+ MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) :
MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 887e204..b15747f 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -92,14 +92,18 @@
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::ObjectSize(pointer_size),
+ ArtMethod::ObjectAlignment(pointer_size));
}
inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
auto* methods = GetDirectMethodsPtr();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::ObjectSize(pointer_size),
+ ArtMethod::ObjectAlignment(pointer_size));
}
template<VerifyObjectFlags kVerifyFlags>
@@ -133,7 +137,9 @@
CheckPointerSize(pointer_size);
LengthPrefixedArray<ArtMethod>* methods = GetVirtualMethodsPtrUnchecked();
DCHECK(methods != nullptr);
- return &methods->At(i, ArtMethod::ObjectSize(pointer_size));
+ return &methods->At(i,
+ ArtMethod::ObjectSize(pointer_size),
+ ArtMethod::ObjectAlignment(pointer_size));
}
inline PointerArray* Class::GetVTable() {
@@ -837,29 +843,31 @@
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetDirectMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::ObjectSize(pointer_size),
+ ArtMethod::ObjectAlignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetVirtualMethodsPtrUnchecked(),
- ArtMethod::ObjectSize(pointer_size));
+ ArtMethod::ObjectSize(pointer_size),
+ ArtMethod::ObjectAlignment(pointer_size));
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFields() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() {
- return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked(), sizeof(ArtField));
+ return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1ca98e5..c337e91 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -208,7 +208,7 @@
}
}
if (kIsDebugBuild) {
- for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields, sizeof(ArtField))) {
+ for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) {
CHECK_NE(field.GetName(), name->ToModifiedUtf8());
}
}